コード例 #1
0
class LogFilePlotFitter(traits.HasTraits):
    """This class allows the user to fit the data in log file plots with standard 
    functions or a custom function"""

    model = traits.Trait(
        "Gaussian", {
            "Linear": Model(fittingFunctions.linear),
            "Quadratic": Model(fittingFunctions.quadratic),
            "Gaussian": Model(fittingFunctions.gaussian),
            "lorentzian": Model(fittingFunctions.lorentzian),
            "parabola": Model(fittingFunctions.parabola),
            "exponential": Model(fittingFunctions.exponentialDecay),
            "sineWave": Model(fittingFunctions.sineWave),
            "sineWaveDecay1": Model(fittingFunctions.sineWaveDecay1),
            "sineWaveDecay2": Model(fittingFunctions.sineWaveDecay2),
            "sincSquared": Model(fittingFunctions.sincSquared),
            "sineSquared": Model(fittingFunctions.sineSquared),
            "sineSquaredDecay": Model(fittingFunctions.sineSquaredDecay),
            "custom": Model(custom)
        },
        desc="model selected for fitting the data"
    )  # mapped trait. so model --> string and model_ goes to Model object. see http://docs.enthought.com/traits/traits_user_manual/custom.html#mapped-traits
    parametersList = traits.List(
        Parameter, desc="list of parameters for fitting in chosen model")

    customCode = traits.Code(
        "def custom(x, param1, param2):\n\treturn param1*param2*x",
        desc="python code for a custom fitting function")
    customCodeCompileButton = traits.Button(
        "compile",
        desc=
        "defines the above function and assigns it to the custom model for fitting"
    )
    fitButton = traits.Button(
        "fit",
        desc="runs fit on selected data set using selected parameters and model"
    )
    usePreviousFitButton = traits.Button(
        "use previous fit",
        desc="use the fitted values as the initial guess for the next fit")
    guessButton = traits.Button(
        "guess",
        desc=
        "guess initial values from data using _guess function in library. If not defined button is disabled"
    )
    saveFitButton = traits.Button(
        "save fit",
        desc="writes fit parameters values and tolerances to a file")
    cycleAndFitButton = traits.Button(
        "cycle fit",
        desc=
        "fits using current initial parameters, saves fit, copies calculated values to initial guess and moves to next dataset in ordered dict"
    )
    dataSets = collections.OrderedDict(
    )  #dict mapping dataset name (for when we have multiple data sets) --> (xdata,ydata ) tuple (scipy arrays) e.g. {"myData": (array([1,2,3]), array([1,2,3]))}
    dataSetNames = traits.List(traits.String)
    selectedDataSet = traits.Enum(values="dataSetNames")
    modelFitResult = None
    logFilePlotReference = None
    modelFitMessage = traits.String("not yet fitted")
    isFitted = traits.Bool(False)
    maxFitTime = traits.Float(
        10.0, desc="maximum time fitting can last before abort")
    statisticsButton = traits.Button("stats")
    statisticsString = traits.String("statistics not calculated")

    predefinedModelGroup = traitsui.VGroup(
        traitsui.Item("model", show_label=False),
        traitsui.Item("object.model_.definitionString",
                      style="readonly",
                      show_label=False,
                      visible_when="model!='custom'"))
    customFunctionGroup = traitsui.VGroup(traitsui.Item("customCode",
                                                        show_label=False),
                                          traitsui.Item(
                                              "customCodeCompileButton",
                                              show_label=False),
                                          visible_when="model=='custom'")
    modelGroup = traitsui.VGroup(predefinedModelGroup,
                                 customFunctionGroup,
                                 show_border=True)
    dataAndFittingGroup = traitsui.VGroup(
        traitsui.HGroup(
            traitsui.Item("selectedDataSet", label="dataset"),
            traitsui.Item("fitButton", show_label=False),
            traitsui.Item("usePreviousFitButton", show_label=False),
            traitsui.Item("guessButton",
                          show_label=False,
                          enabled_when="model_.guessFunction is not None")),
        traitsui.HGroup(traitsui.Item("cycleAndFitButton", show_label=False),
                        traitsui.Item("saveFitButton", show_label=False),
                        traitsui.Item("statisticsButton", show_label=False)),
        traitsui.Item("statisticsString", style="readonly"),
        traitsui.Item("modelFitMessage", style="readonly"),
        show_border=True)
    variablesGroup = traitsui.VGroup(traitsui.Item(
        "parametersList",
        editor=traitsui.ListEditor(style="custom"),
        show_label=False,
        resizable=True),
                                     show_border=True,
                                     label="parameters")

    traits_view = traitsui.View(traitsui.Group(modelGroup,
                                               dataAndFittingGroup,
                                               variablesGroup,
                                               layout="split"),
                                resizable=True)

    def __init__(self, **traitsDict):
        super(LogFilePlotFitter, self).__init__(**traitsDict)
        self._set_parametersList()

    def _set_parametersList(self):
        """sets the parameter list to the correct values given the current model """
        self.parametersList = [
            Parameter(name=parameterName, parameter=parameterObject)
            for (parameterName,
                 parameterObject) in self.model_.parameters.iteritems()
        ]

    def _model_changed(self):
        """updates model and hences changes parameters appropriately"""
        self._set_parametersList()
        self._guessButton_fired(
        )  # will only guess if there is a valid guessing function

    def _customCodeCompileButton_fired(self):
        """defines function as defined by user """
        exec(self.customCode)
        self.model_.__init__(custom)
        self._set_parametersList()

    def setFitData(self, name, xData, yData):
        """updates the dataSets dictionary """
        self.dataSets[name] = (xData, yData)

    def cleanValidNames(self, uniqueValidNames):
        """removes any elements from datasets dictionary that do not 
        have a key in uniqueValidNames"""
        for dataSetName in self.dataSets.keys():
            if dataSetName not in uniqueValidNames:
                del self.dataSets[dataSetName]

    def setValidNames(self):
        """sets list of valid choices for datasets """
        self.dataSetNames = self.dataSets.keys()

    def getParameters(self):
        """ returns the lmfit parameters object for the fit function"""
        return lmfit.Parameters(
            {_.name: _.parameter
             for _ in self.parametersList})

    def _setCalculatedValues(self, modelFitResult):
        """updates calculated values with calculated argument """
        parametersResult = modelFitResult.params
        for variable in self.parametersList:
            variable.calculatedValue = parametersResult[variable.name].value

    def _setCalculatedValuesErrors(self, modelFitResult):
        """given the covariance matrix returned by scipy optimize fit
        convert this into stdeviation errors for parameters list and updated
        the stdevError attribute of variables"""
        parametersResult = modelFitResult.params
        for variable in self.parametersList:
            variable.stdevError = parametersResult[variable.name].stderr

    def fit(self):
        params = self.getParameters()
        x, y = self.dataSets[self.selectedDataSet]
        self.modelFitResult = self.model_.model.fit(y, x=x, params=params)
        #self.modelFitResult = self.model_.model.fit(y, x=x, params=params,iter_cb=self.getFitCallback(time.time()))#can also pass fit_kws= {"maxfev":1000}
        self._setCalculatedValues(
            self.modelFitResult)  #update fitting paramters final values
        self._setCalculatedValuesErrors(self.modelFitResult)
        self.modelFitMessage = self.modelFitResult.message
        if not self.modelFitResult.success:
            logger.error("failed to fit in LogFilePlotFitter")
        self.isFitted = True
        if self.logFilePlotReference is not None:
            self.logFilePlotReference.plotFit()

    def getFitCallback(self, startTime):
        """returns the callback function that is called at every iteration of fit to check if it 
        has been running too long"""
        def fitCallback(params, iter, resid, *args, **kws):
            """check the time and compare to start time """
            if time.time() - startTime > self.maxFitTime:
                return True

        return fitCallback

    def _fitButton_fired(self):
        self.fit()

    def _usePreviousFitButton_fired(self):
        """update the initial guess value with the fitted values of the parameter """
        for parameter in self.parametersList:
            parameter.initialValue = parameter.calculatedValue

    def _guessButton_fired(self):
        """calls _guess function and updates initial fit values accordingly """
        print "guess button clicked"
        if self.model_.guessFunction is None:
            print "attempted to guess initial values but no guess function is defined. returning without changing initial values"
            logger.error(
                "attempted to guess initial values but no guess function is defined. returning without changing initial values"
            )
            return
        logger.info("attempting to guess initial values using %s" %
                    self.model_.guessFunction.__name__)
        xs, ys = self.dataSets[self.selectedDataSet]
        guessDictionary = self.model_.guessFunction(xs, ys)
        logger.debug("guess results = %s" % guessDictionary)
        print "guess results = %s" % guessDictionary
        for parameterName, guessValue in guessDictionary.iteritems():
            for parameter in self.parametersList:
                if parameter.name == parameterName:
                    parameter.initialValue = guessValue

    def _saveFitButton_fired(self):
        saveFolder, filename = os.path.split(self.logFilePlotReference.logFile)
        parametersResult = self.modelFitResult.params
        logFileName = os.path.split(saveFolder)[1]
        functionName = self.model_.function.__name__
        saveFileName = os.path.join(
            saveFolder, logFileName + "-" + functionName + "-fitSave.csv")

        #parse selected data set name to get column names
        #selectedDataSet is like "aaaa=1.31 bbbb=1.21"
        seriesColumnNames = [
            seriesString.split("=")[0]
            for seriesString in self.selectedDataSet.split(" ")
        ]

        if not os.path.exists(saveFileName):  #create column names
            with open(saveFileName, "ab+") as csvFile:
                writer = csv.writer(csvFile)
                writer.writerow(
                    seriesColumnNames +
                    [variable.name for variable in self.parametersList] + [
                        variable.name + "-tolerance"
                        for variable in self.parametersList
                    ])
        with open(saveFileName, "ab+") as csvFile:  #write save to file
            writer = csv.writer(csvFile)
            seriesValues = [
                seriesString.split("=")[1]
                for seriesString in self.selectedDataSet.split(" ")
            ]  #values of the legend keys so you know what fit was associated with
            writer.writerow(seriesValues + [
                parametersResult[variable.name].value
                for variable in self.parametersList
            ] + [
                parametersResult[variable.name].stderr
                for variable in self.parametersList
            ])

    def _cycleAndFitButton_fired(self):
        logger.info("cycle and fit button pressed")
        self._fitButton_fired()
        self._saveFitButton_fired()
        self._usePreviousFitButton_fired()
        currentDataSetIndex = self.dataSets.keys().index(self.selectedDataSet)
        self.selectedDataSet = self.dataSets.keys()[currentDataSetIndex + 1]

    def _statisticsButton_fired(self):
        from scipy.stats import pearsonr
        xs, ys = self.dataSets[self.selectedDataSet]
        mean = scipy.mean(ys)
        median = scipy.median(ys)
        std = scipy.std(ys)
        minimum = scipy.nanmin(ys)
        maximum = scipy.nanmax(ys)
        peakToPeak = maximum - minimum
        pearsonCorrelation = pearsonr(xs, ys)
        resultString = "mean=%G , median=%G stdev =%G\nmin=%G,max=%G, pk-pk=%G\nPearson Correlation=(%G,%G)\n(stdev/mean)=%G" % (
            mean, median, std, minimum, maximum, peakToPeak,
            pearsonCorrelation[0], pearsonCorrelation[1], std / mean)
        self.statisticsString = resultString
コード例 #2
0
ファイル: signal_tools.py プロジェクト: swang29/hyperspy
class SpikesRemoval(SpanSelectorInSignal1D):
    interpolator_kind = t.Enum(
        'Linear',
        'Spline',
        default='Linear',
        desc="the type of interpolation to use when\n"
             "replacing the signal where a spike has been replaced")
    threshold = t.Float(400, desc="the derivative magnitude threshold above\n"
                        "which to find spikes")
    click_to_show_instructions = t.Button()
    show_derivative_histogram = t.Button()
    spline_order = t.Range(1, 10, 3,
                           desc="the order of the spline used to\n"
                           "connect the reconstructed data")
    interpolator = None
    default_spike_width = t.Int(5,
                                desc="the width over which to do the interpolation\n"
                                "when removing a spike (this can be "
                                "adjusted for each\nspike by clicking "
                                     "and dragging on the display during\n"
                                     "spike replacement)")
    index = t.Int(0)
    add_noise = t.Bool(True,
                       desc="whether to add noise to the interpolated\nportion"
                       "of the spectrum. The noise properties defined\n"
                       "in the Signal metadata are used if present,"
                            "otherwise\nshot noise is used as a default")

    def __init__(self, signal, navigation_mask=None, signal_mask=None):
        super(SpikesRemoval, self).__init__(signal)
        self.interpolated_line = None
        self.coordinates = [coordinate for coordinate in
                            signal.axes_manager._am_indices_generator()
                            if (navigation_mask is None or not
                                navigation_mask[coordinate[::-1]])]
        self.signal = signal
        self.line = signal._plot.signal_plot.ax_lines[0]
        self.ax = signal._plot.signal_plot.ax
        signal._plot.auto_update_plot = False
        if len(self.coordinates) > 1:
            signal.axes_manager.indices = self.coordinates[0]
        self.index = 0
        self.argmax = None
        self.derivmax = None
        self.kind = "linear"
        self._temp_mask = np.zeros(self.signal().shape, dtype='bool')
        self.signal_mask = signal_mask
        self.navigation_mask = navigation_mask
        md = self.signal.metadata
        from hyperspy.signal import BaseSignal

        if "Signal.Noise_properties" in md:
            if "Signal.Noise_properties.variance" in md:
                self.noise_variance = md.Signal.Noise_properties.variance
                if isinstance(md.Signal.Noise_properties.variance, BaseSignal):
                    self.noise_type = "heteroscedastic"
                else:
                    self.noise_type = "white"
            else:
                self.noise_type = "shot noise"
        else:
            self.noise_type = "shot noise"

    def _threshold_changed(self, old, new):
        self.index = 0
        self.update_plot()

    def _click_to_show_instructions_fired(self):
        from pyface.message_dialog import information
        m = information(None, SPIKES_REMOVAL_INSTRUCTIONS,
                        title="Instructions"),

    def _show_derivative_histogram_fired(self):
        self.signal._spikes_diagnosis(signal_mask=self.signal_mask,
                                      navigation_mask=self.navigation_mask)

    def detect_spike(self):
        derivative = np.diff(self.signal())
        if self.signal_mask is not None:
            derivative[self.signal_mask[:-1]] = 0
        if self.argmax is not None:
            left, right = self.get_interpolation_range()
            self._temp_mask[left:right] = True
            derivative[self._temp_mask[:-1]] = 0
        if abs(derivative.max()) >= self.threshold:
            self.argmax = derivative.argmax()
            self.derivmax = abs(derivative.max())
            return True
        else:
            return False

    def _reset_line(self):
        if self.interpolated_line is not None:
            self.interpolated_line.close()
            self.interpolated_line = None
            self.reset_span_selector()

    def find(self, back=False):
        self._reset_line()
        ncoordinates = len(self.coordinates)
        spike = self.detect_spike()
        with self.signal.axes_manager.events.indices_changed.suppress():
            while not spike and (
                    (self.index < ncoordinates - 1 and back is False) or
                    (self.index > 0 and back is True)):
                if back is False:
                    self.index += 1
                else:
                    self.index -= 1
                spike = self.detect_spike()

        if spike is False:
            m = SimpleMessage()
            m.text = 'End of dataset reached'
            try:
                m.gui()
            except (NotImplementedError, ImportError):
                # This is only available for traitsui, ipywidgets has a
                # progress bar instead.
                pass
            except ValueError as error:
                _logger.warning(error)
            self.index = 0
            self._reset_line()
            return
        else:
            minimum = max(0, self.argmax - 50)
            maximum = min(len(self.signal()) - 1, self.argmax + 50)
            thresh_label = DerivativeTextParameters(
                text=r"$\mathsf{\delta}_\mathsf{max}=$",
                color="black")
            self.ax.legend([thresh_label], [repr(int(self.derivmax))],
                           handler_map={DerivativeTextParameters:
                                        DerivativeTextHandler()},
                           loc='best')
            self.ax.set_xlim(
                self.signal.axes_manager.signal_axes[0].index2value(
                    minimum),
                self.signal.axes_manager.signal_axes[0].index2value(
                    maximum))
            self.update_plot()
            self.create_interpolation_line()

    def update_plot(self):
        if self.interpolated_line is not None:
            self.interpolated_line.close()
            self.interpolated_line = None
        self.reset_span_selector()
        self.update_spectrum_line()
        if len(self.coordinates) > 1:
            self.signal._plot.pointer._on_navigate(self.signal.axes_manager)

    def update_spectrum_line(self):
        self.line.auto_update = True
        self.line.update()
        self.line.auto_update = False

    def _index_changed(self, old, new):
        self.signal.axes_manager.indices = self.coordinates[new]
        self.argmax = None
        self._temp_mask[:] = False

    def on_disabling_span_selector(self):
        if self.interpolated_line is not None:
            self.interpolated_line.close()
            self.interpolated_line = None

    def _spline_order_changed(self, old, new):
        self.kind = self.spline_order
        self.span_selector_changed()

    def _add_noise_changed(self, old, new):
        self.span_selector_changed()

    def _interpolator_kind_changed(self, old, new):
        if new == 'linear':
            self.kind = new
        else:
            self.kind = self.spline_order
        self.span_selector_changed()

    def _ss_left_value_changed(self, old, new):
        if not (np.isnan(self.ss_right_value) or np.isnan(self.ss_left_value)):
            self.span_selector_changed()

    def _ss_right_value_changed(self, old, new):
        if not (np.isnan(self.ss_right_value) or np.isnan(self.ss_left_value)):
            self.span_selector_changed()

    def create_interpolation_line(self):
        self.interpolated_line = drawing.signal1d.Signal1DLine()
        self.interpolated_line.data_function = self.get_interpolated_spectrum
        self.interpolated_line.set_line_properties(
            color='blue',
            type='line')
        self.signal._plot.signal_plot.add_line(self.interpolated_line)
        self.interpolated_line.auto_update = False
        self.interpolated_line.autoscale = False
        self.interpolated_line.plot()

    def get_interpolation_range(self):
        axis = self.signal.axes_manager.signal_axes[0]
        if np.isnan(self.ss_left_value) or np.isnan(self.ss_right_value):
            left = self.argmax - self.default_spike_width
            right = self.argmax + self.default_spike_width
        else:
            left = axis.value2index(self.ss_left_value)
            right = axis.value2index(self.ss_right_value)

        # Clip to the axis dimensions
        nchannels = self.signal.axes_manager.signal_shape[0]
        left = left if left >= 0 else 0
        right = right if right < nchannels else nchannels - 1

        return left, right

    def get_interpolated_spectrum(self, axes_manager=None):
        data = self.signal().copy()
        axis = self.signal.axes_manager.signal_axes[0]
        left, right = self.get_interpolation_range()
        if self.kind == 'linear':
            pad = 1
        else:
            pad = self.spline_order
        ileft = left - pad
        iright = right + pad
        ileft = np.clip(ileft, 0, len(data))
        iright = np.clip(iright, 0, len(data))
        left = int(np.clip(left, 0, len(data)))
        right = int(np.clip(right, 0, len(data)))
        if ileft == 0:
            # Extrapolate to the left
            if right == iright:
                right -= 1
            data[:right] = data[right:iright].mean()

        elif iright == len(data):
            # Extrapolate to the right
            if left == ileft:
                left += 1
            data[left:] = data[ileft:left].mean()

        else:
            # Interpolate
            x = np.hstack((axis.axis[ileft:left], axis.axis[right:iright]))
            y = np.hstack((data[ileft:left], data[right:iright]))
            intp = sp.interpolate.interp1d(x, y, kind=self.kind)
            data[left:right] = intp(axis.axis[left:right])

        # Add noise
        if self.add_noise is True:
            if self.noise_type == "white":
                data[left:right] += np.random.normal(
                    scale=np.sqrt(self.noise_variance),
                    size=right - left)
            elif self.noise_type == "heteroscedastic":
                noise_variance = self.noise_variance(
                    axes_manager=self.signal.axes_manager)[left:right]
                noise = [np.random.normal(scale=np.sqrt(item))
                         for item in noise_variance]
                data[left:right] += noise
            else:
                data[left:right] = np.random.poisson(
                    np.clip(data[left:right], 0, np.inf))

        return data

    def span_selector_changed(self):
        if self.interpolated_line is None:
            return
        else:
            self.interpolated_line.update()

    def apply(self):
        if not self.interpolated_line:  # No spike selected
            return
        self.signal()[:] = self.get_interpolated_spectrum()
        self.signal.events.data_changed.trigger(obj=self.signal)
        self.update_spectrum_line()
        self.interpolated_line.close()
        self.interpolated_line = None
        self.reset_span_selector()
        self.find()
コード例 #3
0
class HCFT(tr.HasStrictTraits):
    """High-Cycle Fatigue Tool"""
    # =========================================================================
    # Traits definitions
    # =========================================================================
    # Assigning the view
    traits_view = hcft_window

    # CSV import
    decimal = tr.Enum(',', '.')
    delimiter = tr.Str(';')
    file_path = tr.File
    open_file_button = tr.Button('Open file')
    columns_headers = tr.List
    npy_folder_path = tr.Str
    file_name = tr.Str

    # CSV processing
    take_time_from_time_column = tr.Bool(True)
    records_per_second = tr.Float(100)
    time_column = tr.Enum(values='columns_headers')
    skip_first_rows = tr.Range(low=1, high=10 ** 9, value=3, mode='spinner')
    add_columns_average = tr.Button
    columns_to_be_averaged = tr.List
    parse_csv_to_npy = tr.Button

    # Plotting
    x_axis = tr.Enum(values='columns_headers')
    y_axis = tr.Enum(values='columns_headers')
    x_axis_multiplier = tr.Enum(1, -1)
    y_axis_multiplier = tr.Enum(-1, 1)
    add_plot = tr.Button
    apply_filters = tr.Bool
    plot_settings_btn = tr.Button
    plot_settings = PlotSettings()
    plot_settings_active = tr.Bool
    normalize_cycles = tr.Bool
    smooth = tr.Bool
    plot_every_nth_point = tr.Range(low=1, high=1000000, mode='spinner')
    old_peak_force_before_cycles = tr.Float
    peak_force_before_cycles = tr.Float
    add_creep_plot = tr.Button(desc='Creep plot of X axis array')
    clear_plot = tr.Button

    force_column = tr.Enum(values='columns_headers')
    window_length = tr.Range(low=1, high=10 ** 9 - 1, value=31, mode='spinner')
    polynomial_order = tr.Range(low=1, high=10 ** 9, value=2, mode='spinner')
    activate_ascending_branch_smoothing = tr.Bool(False, label='Activate')

    generate_filtered_and_creep_npy = tr.Button
    force_max = tr.Float(100)
    force_min = tr.Float(40)
    min_cycle_force_range = tr.Float(50)
    cutting_method = tr.Enum('Define min cycle range(force difference)', 'Define Max, Min')

    log = tr.Str('')
    clear_log = tr.Button

    # =========================================================================
    # Assigning default values
    # =========================================================================
    figure = tr.Instance(mpl.figure.Figure)

    def _figure_default(self):
        figure = mpl.figure.Figure(facecolor='white')
        figure.set_tight_layout(True)
        return figure

    # =========================================================================
    # File management
    # =========================================================================
    def _open_file_button_fired(self):
        try:
            self.reset()

            extensions = ['*.csv', '*.txt']  # handle only one extension...
            wildcard = ';'.join(extensions)
            dialog = FileDialog(title='Select text file',
                                action='open', wildcard=wildcard,
                                default_path=self.file_path)
            result = dialog.open()

            # Test if the user opened a file to avoid throwing an exception if he doesn't
            if result == OK:
                self.file_path = dialog.path
            else:
                return

            # Populate headers list which fills the x-axis and y-axis with values automatically
            self.columns_headers = get_headers(self.file_path, decimal=self.decimal, delimiter=self.delimiter)

            # Saving file name and path and creating NPY folder
            dir_path = os.path.dirname(self.file_path)
            self.npy_folder_path = os.path.join(dir_path, 'NPY')
            if not os.path.exists(self.npy_folder_path):
                os.makedirs(self.npy_folder_path)

            self.file_name = os.path.splitext(os.path.basename(self.file_path))[0]

            self.import_data_json()

        except:
            self.log_exception()

    def _add_columns_average_fired(self):
        try:
            columns_average = ColumnsAverage()
            for name in self.columns_headers:
                columns_average.columns.append(Column(column_name=name))

            # kind='modal' pauses the implementation until the window is closed
            columns_average.configure_traits(kind='modal')

            columns_to_be_averaged_temp = []
            for i in columns_average.columns:
                if i.selected:
                    columns_to_be_averaged_temp.append(i.column_name)

            if columns_to_be_averaged_temp:  # If it's not empty
                self.columns_to_be_averaged.append(columns_to_be_averaged_temp)

                avg_file_suffix = self.get_suffix_for_columns_to_be_averaged(columns_to_be_averaged_temp)
                self.columns_headers.append(avg_file_suffix)
        except:
            self.log_exception()

    def _parse_csv_to_npy_fired(self):
        # Run method on different thread so GUI doesn't freeze
        # thread = Thread(target = threaded_function, function_args = (10,))
        thread = Thread(target=self.parse_csv_to_npy_fired)
        thread.start()

    def parse_csv_to_npy_fired(self):
        try:
            self.print_custom('Parsing csv into npy files...')
            self.export_data_json()

            """ Exporting npy arrays of original columns """
            for i in range(len(self.columns_headers) - len(self.columns_to_be_averaged)):
                column_name = self.columns_headers[i]
                column_array = np.array(pd.read_csv(self.file_path, delimiter=self.delimiter, decimal=self.decimal,
                                                    skiprows=self.skip_first_rows, usecols=[i]))

                # TODO detect column name before loading completely to skip loading if the following condition applies
                if column_name == self.time_column and self.take_time_from_time_column is False:
                    column_array = np.arange(start=0.0, stop=len(column_array) / self.records_per_second,
                                             step=1.0 / self.records_per_second)

                np.save(self.get_npy_file_path(column_name), column_array)

            """ Exporting npy arrays of averaged columns """
            for columns_names in self.columns_to_be_averaged:
                temp_array = np.zeros((1))
                for column_name in columns_names:
                    temp_array = temp_array + np.load(self.get_npy_file_path(column_name)).flatten()
                avg = temp_array / len(columns_names)

                np.save(self.get_average_npy_file_path(columns_names), avg)

            self.print_custom('Finished parsing csv into npy files.')
        except:
            self.log_exception()

    def get_npy_file_path(self, column_name):
        return os.path.join(self.npy_folder_path, self.file_name + '_' + column_name + '.npy')

    def get_filtered_npy_file_path(self, column_name):
        return os.path.join(self.npy_folder_path, self.file_name + '_' + column_name + '_filtered.npy')

    def get_max_npy_file_path(self, column_name):
        return os.path.join(self.npy_folder_path, self.file_name + '_' + column_name + '_max.npy')

    def get_min_npy_file_path(self, column_name):
        return os.path.join(self.npy_folder_path, self.file_name + '_' + column_name + '_min.npy')

    def get_average_npy_file_path(self, columns_names):
        avg_file_suffix = self.get_suffix_for_columns_to_be_averaged(columns_names)
        return os.path.join(self.npy_folder_path, self.file_name + '_' + avg_file_suffix + '.npy')

    def get_suffix_for_columns_to_be_averaged(self, columns_names):
        suffix_for_saved_file_name = 'avg_' + '_'.join(columns_names)
        return suffix_for_saved_file_name

    def export_data_json(self):
        # Output data MUST have exactly similar keys and variable names
        output_data = {'take_time_from_time_column': self.take_time_from_time_column,
                       'time_column': self.time_column,
                       'records_per_second': self.records_per_second,
                       'skip_first_rows': self.skip_first_rows,
                       'columns_headers': self.columns_headers,
                       'columns_to_be_averaged': self.columns_to_be_averaged,
                       'x_axis': self.x_axis,
                       'y_axis': self.y_axis,
                       'x_axis_multiplier': self.x_axis_multiplier,
                       'y_axis_multiplier': self.y_axis_multiplier,
                       'force_column': self.force_column,
                       'window_length': self.window_length,
                       'polynomial_order': self.polynomial_order,
                       'peak_force_before_cycles': self.peak_force_before_cycles,
                       'cutting_method': self.cutting_method,
                       'force_max': self.force_max,
                       'force_min': self.force_min,
                       'min_cycle_force_range': self.min_cycle_force_range}
        with open(self.get_json_file_path(), 'w') as outfile:
            json.dump(output_data, outfile, sort_keys=True, indent=4)
        self.print_custom('.json data file exported successfully.')

    def import_data_json(self):
        json_path = self.get_json_file_path()
        if not os.path.isfile(json_path):
            return
        # class_vars is a list with class variables names
        # vars(self) & self.__dict__.items() didn't include some Trait variables like force_column = tr.Enum(values=..
        class_vars = [attr for attr in dir(self) if not attr.startswith("_") and not attr.startswith("__")]
        with open(json_path) as infile:
            data_in = json.load(infile)
        for key_data, value_data in data_in.items():
            for key_class in class_vars:
                if key_data == key_class:
                    # Equivalent to: self.key_class = value_data
                    setattr(self, key_class, value_data)
                    break
        self.print_custom('.json data file imported successfully.')

    def get_json_file_path(self):
        return os.path.join(self.npy_folder_path, self.file_name + '.json')

    def _generate_filtered_and_creep_npy_fired(self):
        # Run method on different thread so GUI doesn't freeze
        # thread = Thread(target = threaded_function, function_args = (10,))
        thread = Thread(target=self.generate_filtered_and_creep_npy_fired)
        thread.start()

    def generate_filtered_and_creep_npy_fired(self):
        try:
            self.export_data_json()
            if not self.npy_files_exist(self.get_npy_file_path(self.force_column)):
                return
            self.print_custom('Generating filtered and creep files...')

            # 1- Export filtered force
            force = np.load(self.get_npy_file_path(self.force_column)).flatten()
            peak_force_before_cycles_index = np.where(abs((force)) > abs(self.peak_force_before_cycles))[0][0]
            force_ascending = force[0:peak_force_before_cycles_index]
            force_rest = force[peak_force_before_cycles_index:]

            force_max_indices, force_min_indices = self.get_array_max_and_min_indices(force_rest)

            force_max_min_indices = np.concatenate((force_min_indices, force_max_indices))
            force_max_min_indices.sort()

            force_rest_filtered = force_rest[force_max_min_indices]
            force_filtered = np.concatenate((force_ascending, force_rest_filtered))
            np.save(self.get_filtered_npy_file_path(self.force_column), force_filtered)

            # 2- Export filtered displacements
            # Export displacements combining processed ascending branch and unprocessed min/max values
            self.export_filtered_displacements(force_max_min_indices, peak_force_before_cycles_index)

            # 3- Export creep for displacements
            # Cut unwanted max min values to get correct full cycles and remove false min/max values caused by noise
            self.export_displacements_creep(force_rest, force_max_indices, force_min_indices,
                                            peak_force_before_cycles_index)

            self.print_custom('Filtered and creep npy files are generated.')
        except:
            self.log_exception()

    def export_filtered_displacements(self, force_max_min_indices, peak_force_before_cycles_index):
        for i in range(len(self.columns_headers)):
            if self.columns_headers[i] != self.force_column and self.columns_headers[i] != self.time_column:

                disp = np.load(self.get_npy_file_path(self.columns_headers[i])).flatten()
                disp_ascending = disp[0:peak_force_before_cycles_index]
                disp_rest = disp[peak_force_before_cycles_index:]

                if self.activate_ascending_branch_smoothing:
                    disp_ascending = savgol_filter(disp_ascending, window_length=self.window_length,
                                                   polyorder=self.polynomial_order)

                disp_rest_filtered = disp_rest[force_max_min_indices]
                filtered_disp = np.concatenate((disp_ascending, disp_rest_filtered))
                np.save(self.get_filtered_npy_file_path(self.columns_headers[i]), filtered_disp)

    def export_displacements_creep(self, force_rest, force_max_indices, force_min_indices,
                                   peak_force_before_cycles_index):
        if self.cutting_method == "Define Max, Min":
            force_max_indices_cut, force_min_indices_cut = self.cut_indices_of_min_max_range(force_rest,
                                                                                             force_max_indices,
                                                                                             force_min_indices,
                                                                                             self.force_max,
                                                                                             self.force_min)
        elif self.cutting_method == "Define min cycle range(force difference)":
            force_max_indices_cut, force_min_indices_cut = self.cut_indices_of_defined_range(force_rest,
                                                                                             force_max_indices,
                                                                                             force_min_indices,
                                                                                             self.min_cycle_force_range)
        self.print_custom("Cycles number= ", len(force_min_indices))
        self.print_custom("Cycles number after cutting fake cycles = ", len(force_min_indices_cut))

        for i in range(len(self.columns_headers)):
            if self.columns_headers[i] != self.time_column:
                array = np.load(self.get_npy_file_path(self.columns_headers[i])).flatten()
                array_rest = array[peak_force_before_cycles_index:]
                array_rest_maxima = array_rest[force_max_indices_cut]
                array_rest_minima = array_rest[force_min_indices_cut]
                np.save(self.get_max_npy_file_path(self.columns_headers[i]), array_rest_maxima)
                np.save(self.get_min_npy_file_path(self.columns_headers[i]), array_rest_minima)

    def get_array_max_and_min_indices(self, input_array):
        # Checking dominant sign
        positive_values_count = np.sum(np.array(input_array) >= 0)
        negative_values_count = input_array.size - positive_values_count

        # Getting max and min indices
        if positive_values_count > negative_values_count:
            force_max_indices = self.get_max_indices(input_array)
            force_min_indices = self.get_min_indices(input_array)
        else:
            force_max_indices = self.get_min_indices(input_array)
            force_min_indices = self.get_max_indices(input_array)

        return force_max_indices, force_min_indices

    def get_max_indices(self, a):
        # TODO try to vectorize this
        # This method doesn't qualify first and last elements as max
        max_indices = []
        i = 1
        while i < a.size - 1:
            previous_element = a[i - 1]

            # Skip repeated elements and record previous element value
            first_repeated_element = True
            while a[i] == a[i + 1] and i < a.size - 1:
                if first_repeated_element:
                    previous_element = a[i - 1]
                    first_repeated_element = False
                if i < a.size - 2:
                    i += 1
                else:
                    break

            # Append value if it's a local max
            if a[i] > a[i + 1] and a[i] > previous_element:
                max_indices.append(i)
            i += 1
        return np.array(max_indices)

    def get_min_indices(self, a):
        # TODO try to vectorize this
        # This method doesn't qualify first and last elements as min
        min_indices = []
        i = 1
        while i < a.size - 1:
            previous_element = a[i - 1]

            # Skip repeated elements and record previous element value
            first_repeated_element = True
            while a[i] == a[i + 1]:
                if first_repeated_element:
                    previous_element = a[i - 1]
                    first_repeated_element = False
                if i < a.size - 2:
                    i += 1
                else:
                    break

            # Append value if it's a local min
            if a[i] < a[i + 1] and a[i] < previous_element:
                min_indices.append(i)
            i += 1
        return np.array(min_indices)

    def cut_indices_of_min_max_range(self, array, max_indices, min_indices,
                                     range_upper_value, range_lower_value):
        # TODO try to vectorize this
        cut_max_indices = []
        cut_min_indices = []

        for max_index in max_indices:
            if abs(array[max_index]) > abs(range_upper_value):
                cut_max_indices.append(max_index)
        for min_index in min_indices:
            if abs(array[min_index]) < abs(range_lower_value):
                cut_min_indices.append(min_index)
        return cut_max_indices, cut_min_indices

    def cut_indices_of_defined_range(self, array, max_indices, min_indices, range_):
        # TODO try to vectorize this
        cut_max_indices = []
        cut_min_indices = []

        for max_index, min_index in zip(max_indices, min_indices):
            if abs(array[max_index] - array[min_index]) > range_:
                cut_max_indices.append(max_index)
                cut_min_indices.append(min_index)

        if max_indices.size > min_indices.size:
            cut_max_indices.append(max_indices[-1])
        elif min_indices.size > max_indices.size:
            cut_min_indices.append(min_indices[-1])

        return cut_max_indices, cut_min_indices

    def _activate_changed(self):
        if not self.activate_ascending_branch_smoothing:
            self.old_peak_force_before_cycles = self.peak_force_before_cycles
            self.peak_force_before_cycles = 0
        else:
            self.peak_force_before_cycles = self.old_peak_force_before_cycles

    def _window_length_changed(self, new):
        if new <= self.polynomial_order:
            dialog = MessageDialog(
                title='Attention!',
                message='Window length must be bigger than polynomial order.')
            dialog.open()

        if new % 2 == 0 or new <= 0:
            dialog = MessageDialog(
                title='Attention!',
                message='Window length must be odd positive integer.')
            dialog.open()

    def _polynomial_order_changed(self, new):
        if new >= self.window_length:
            dialog = MessageDialog(
                title='Attention!',
                message='Polynomial order must be smaller than window length.')
            dialog.open()

    # =========================================================================
    # Plotting
    # =========================================================================
    data_changed = tr.Event

    def _plot_settings_btn_fired(self):
        try:
            self.plot_settings.configure_traits(kind='modal')
        except:
            self.log_exception()

    def npy_files_exist(self, path):
        if os.path.exists(path):
            return True
        else:
            self.print_custom('Please parse csv file to generate npy files first!')
            return False

    def filtered_and_creep_npy_files_exist(self, path):
        if os.path.exists(path):
            return True
        else:
            self.print_custom('Please generate filtered and creep npy files first!')
            return False

    def _add_plot_fired(self):
        # Run method on different thread so GUI doesn't freeze
        # thread = Thread(target = threaded_function, function_args = (10,))
        thread = Thread(target=self.add_plot_fired)
        thread.start()

    def add_plot_fired(self):
        try:
            if self.apply_filters:
                if not self.filtered_and_creep_npy_files_exist(self.get_filtered_npy_file_path(self.x_axis)):
                    return
                # TODO link this _filtered to the path creation function
                x_axis_name = self.x_axis + '_filtered'
                y_axis_name = self.y_axis + '_filtered'
                self.print_custom('Loading npy files...')
                # when mmap_mode!=None, the array will be loaded as 'numpy.memmap'
                # object which doesn't load the array to memory until it's
                # indexed
                x_axis_array = np.load(self.get_filtered_npy_file_path(self.x_axis), mmap_mode='r')
                y_axis_array = np.load(self.get_filtered_npy_file_path(self.y_axis), mmap_mode='r')
            else:
                if not self.npy_files_exist(self.get_npy_file_path(self.x_axis)):
                    return

                x_axis_name = self.x_axis
                y_axis_name = self.y_axis
                self.print_custom('Loading npy files...')
                # when mmap_mode!=None, the array will be loaded as 'numpy.memmap'
                # object which doesn't load the array to memory until it's
                # indexed
                x_axis_array = np.load(self.get_npy_file_path(self.x_axis), mmap_mode='r')
                y_axis_array = np.load(self.get_npy_file_path(self.y_axis), mmap_mode='r')

            if self.plot_settings_active:
                print(self.plot_settings.num_of_first_rows_to_take)
                print(self.plot_settings.num_of_rows_to_skip_after_each_section)
                print(self.plot_settings.num_of_rows_in_each_section)
                print(np.size(x_axis_array))
                indices = self.get_indices_array(np.size(x_axis_array),
                                                 self.plot_settings.num_of_first_rows_to_take,
                                                 self.plot_settings.num_of_rows_to_skip_after_each_section,
                                                 self.plot_settings.num_of_rows_in_each_section)
                x_axis_array = self.x_axis_multiplier * x_axis_array[indices]
                y_axis_array = self.y_axis_multiplier * y_axis_array[indices]
            else:
                x_axis_array = self.x_axis_multiplier * x_axis_array
                y_axis_array = self.y_axis_multiplier * y_axis_array

            self.print_custom('Adding Plot...')
            mpl.rcParams['agg.path.chunksize'] = 10000

            ax = self.figure.add_subplot(1, 1, 1)

            ax.set_xlabel(x_axis_name)
            ax.set_ylabel(y_axis_name)
            ax.plot(x_axis_array, y_axis_array, 'k', linewidth=1.2, color=np.random.rand(3),
                    label=self.file_name + ', ' + x_axis_name)

            ax.legend()
            self.data_changed = True
            self.print_custom('Finished adding plot.')

        except:
            self.log_exception()

    def _add_creep_plot_fired(self):
        # Run method on different thread so GUI doesn't freeze
        # thread = Thread(target = threaded_function, function_args = (10,))
        thread = Thread(target=self.add_creep_plot_fired)
        thread.start()

    def add_creep_plot_fired(self):
        try:
            if not self.filtered_and_creep_npy_files_exist(self.get_max_npy_file_path(self.x_axis)):
                return

            self.print_custom('Loading npy files...')
            disp_max = self.x_axis_multiplier * np.load(self.get_max_npy_file_path(self.x_axis))
            disp_min = self.x_axis_multiplier * np.load(self.get_min_npy_file_path(self.x_axis))
            complete_cycles_number = disp_max.size

            self.print_custom('Adding creep-fatigue plot...')
            mpl.rcParams['agg.path.chunksize'] = 10000

            ax = self.figure.add_subplot(1, 1, 1)

            ax.set_xlabel('Cycles number')
            ax.set_ylabel(self.x_axis)

            if self.plot_every_nth_point > 1:
                disp_max = disp_max[0::self.plot_every_nth_point]
                disp_min = disp_min[0::self.plot_every_nth_point]

            if self.smooth:
                # Keeping the first item of the array and filtering the rest
                disp_max = np.concatenate((
                    np.array([disp_max[0]]),
                    savgol_filter(disp_max[1:], window_length=self.window_length, polyorder=self.polynomial_order)
                ))
                disp_min = np.concatenate((
                    np.array([disp_min[0]]),
                    savgol_filter(disp_min[1:], window_length=self.window_length, polyorder=self.polynomial_order)
                ))

            if self.normalize_cycles:
                ax.plot(np.linspace(0, 1., disp_max.size), disp_max,
                        'k', linewidth=1.2, color=np.random.rand(3), label='Max'
                                                                           + ', ' + self.file_name + ', ' + self.x_axis)
                ax.plot(np.linspace(0, 1., disp_min.size), disp_min,
                        'k', linewidth=1.2, color=np.random.rand(3), label='Min'
                                                                           + ', ' + self.file_name + ', ' + self.x_axis)
            else:
                ax.plot(np.linspace(0, complete_cycles_number,
                                    disp_max.size), disp_max,
                        'k', linewidth=1.2, color=np.random.rand(3), label='Max'
                                                                           + ', ' + self.file_name + ', ' + self.x_axis)
                ax.plot(np.linspace(0, complete_cycles_number,
                                    disp_min.size), disp_min,
                        'k', linewidth=1.2, color=np.random.rand(3), label='Min'
                                                                           + ', ' + self.file_name + ', ' + self.x_axis)

            ax.legend()
            self.data_changed = True
            self.print_custom('Finished adding creep-fatigue plot.')

        except:
            self.log_exception()

    def get_indices_array(self,
                          array_size,
                          first_rows,
                          distance,
                          num_of_rows_after_each_distance):
        result_1 = np.arange(first_rows)
        result_2 = np.arange(start=first_rows, stop=array_size,
                             step=distance + num_of_rows_after_each_distance)
        result_2_updated = np.array([], dtype=np.int_)

        for result_2_value in result_2:
            data_slice = np.arange(result_2_value, result_2_value +
                                   num_of_rows_after_each_distance)
            result_2_updated = np.concatenate((result_2_updated, data_slice))

        result = np.concatenate((result_1, result_2_updated))
        return result

    def _clear_plot_fired(self):
        self.figure.clear()
        self.data_changed = True

    # =========================================================================
    # Logging
    # =========================================================================
    def print_custom(self, *input_args):
        print(*input_args)
        if self.log == '':
            self.log = ''.join(str(e) for e in list(input_args))
        else:
            self.log = self.log + '\n' + \
                       ''.join(str(e) for e in list(input_args))

    def log_exception(self):
        self.print_custom('SOMETHING WENT WRONG!')
        self.print_custom('--------- Error message: ---------')
        self.print_custom(traceback.format_exc())
        self.print_custom('----------------------------------')

    def _clear_log_fired(self):
        self.log = ''

    # =========================================================================
    # Other functions
    # =========================================================================
    def reset(self):
        self.columns_to_be_averaged = []
        self.log = ''
コード例 #4
0
ファイル: rc_traits.py プロジェクト: zihua/matplotlib
class PatchRC(traits.HasTraits):
    linewidth = traits.Float(1.0)
    facecolor = Color
    edgecolor = Color
    antialiased = flexible_true_trait
コード例 #5
0
class config(HasTraits):
    uuid = traits.Str(desc="UUID")
    desc = traits.Str(desc="Workflow Description")
    # Directories
    working_dir = Directory(mandatory=True,
                            desc="Location of the Nipype working directory")
    sink_dir = Directory(os.path.abspath('.'),
                         mandatory=True,
                         desc="Location where the BIP will store the results")
    crash_dir = Directory(mandatory=False,
                          desc="Location to store crash files")
    json_sink = Directory(mandatory=False, desc="Location to store json_files")
    surf_dir = Directory(mandatory=True, desc="Freesurfer subjects directory")

    # Execution

    run_using_plugin = Bool(
        False,
        usedefault=True,
        desc="True to run pipeline with plugin, False to run serially")
    plugin = traits.Enum("PBS",
                         "MultiProc",
                         "SGE",
                         "Condor",
                         usedefault=True,
                         desc="plugin to use, if run_using_plugin=True")
    plugin_args = traits.Dict({"qsub_args": "-q many"},
                              usedefault=True,
                              desc='Plugin arguments.')
    test_mode = Bool(
        False,
        mandatory=False,
        usedefault=True,
        desc='Affects whether where and if the workflow keeps its \
                            intermediary files. True to keep intermediary files. '
    )
    timeout = traits.Float(14.0)
    # Subjects

    #subjects= traits.List(traits.Str, mandatory=True, usedefault=True,
    #    desc="Subject id's. Note: These MUST match the subject id's in the \
    #                            Freesurfer directory. For simplicity, the subject id's should \
    #                            also match with the location of individual functional files.")

    datagrabber = traits.Instance(Data, ())
    # First Level

    subjectinfo = traits.Code()
    contrasts = traits.Code()
    interscan_interval = traits.Float()
    film_threshold = traits.Float()
    input_units = traits.Enum('scans', 'secs')
    is_sparse = traits.Bool(False)
    model_hrf = traits.Bool(True)
    stimuli_as_impulses = traits.Bool(True)
    use_temporal_deriv = traits.Bool(True)
    volumes_in_cluster = traits.Int(1)
    ta = traits.Float()
    tr = traits.Float()
    hpcutoff = traits.Float()
    scan_onset = traits.Int(0)
    scale_regressors = traits.Bool(True)
    #bases = traits.Dict({'dgamma':{'derivs': False}},use_default=True)
    bases = traits.Dict(
        {'dgamma': {
            'derivs': False
        }},
        traits.Enum('dgamma', 'gamma', 'none'),
        traits.Dict(traits.Enum('derivs', None), traits.Bool),
        desc=
        "name of basis function and options e.g., {'dgamma': {'derivs': True}}"
    )

    # preprocessing info
    preproc_config = traits.File(desc="preproc config file")
    use_compcor = traits.Bool(desc="use noise components from CompCor")
    #advanced_options
    use_advanced_options = Bool(False)
    advanced_options = traits.Code()
コード例 #6
0
class MATS1D5DPCumPress(MATSEval):

    node_name = 'Pressure sensitive cumulative damage plasticity'

    E_N = tr.Float(30000,
                   label='E_N',
                   desc='Normal stiffness of the interface',
                   MAT=True,
                   enter_set=True,
                   auto_set=False)

    E_T = tr.Float(12900,
                   label='E_T',
                   desc='Shear modulus of the interface',
                   MAT=True,
                   enter_set=True,
                   auto_set=False)

    gamma = tr.Float(55.0,
                     label='gamma',
                     desc='Kinematic Hardening Modulus',
                     MAT=True,
                     enter_set=True,
                     auto_set=False)

    K = tr.Float(11,
                 label='K',
                 desc='Isotropic hardening modulus',
                 MAT=True,
                 enter_set=True,
                 auto_set=False)

    S = tr.Float(0.005,
                 label='S',
                 desc='Damage accumulation parameter',
                 MAT=True,
                 enter_set=True,
                 auto_set=False)

    r = tr.Float(1,
                 label='r',
                 desc='Damage accumulation parameter',
                 MAT=True,
                 enter_set=True,
                 auto_set=False)

    c = tr.Float(1,
                 Label='c',
                 desc='Damage accumulation parameter',
                 MAT=True,
                 enter_set=True,
                 auto_set=False)
    m = tr.Float(0.3,
                 label='m',
                 desc='Lateral Pressure Coefficient',
                 MAT=True,
                 enter_set=True,
                 auto_set=False)

    tau_bar = tr.Float(4.2,
                       label='tau_bar',
                       desc='Reversibility limit',
                       MAT=True,
                       enter_set=True,
                       auto_set=False)

    state_var_shapes = dict(s_pi=(), alpha=(), z=(), omega=())

    D_rs = tr.Property(depends_on='E_N,E_T')

    @tr.cached_property
    def _get_D_rs(self):
        print('recalculating D_rs')
        return np.array([[self.E_T, 0], [0, self.E_N]], dtype=np.float_)

    def init(self, s_pi, alpha, z, omega):
        r'''
        Initialize the state variables.
        '''
        s_pi[...] = 0
        alpha[...] = 0
        z[...] = 0
        omega[...] = 0

    algorithmic = tr.Bool(True)

    def get_corr_pred(self, u_r, t_n, s_pi, alpha, z, omega):

        s = u_r[..., 0]
        w = u_r[..., 1]
        # For normal direction
        H_w_N = np.array(w <= 0.0, dtype=np.float_)
        E_alg_N = H_w_N * self.E_N
        sig_N = E_alg_N * w
        # For tangential
        # Y = 0.5 * self.E_T * (u_T - s_pi)**2
        tau_pi_trial = self.E_T * (s - s_pi)
        Z = self.K * z
        X = self.gamma * alpha
        f = np.fabs(tau_pi_trial - X) - Z - self.tau_bar + self.m * sig_N
        # Identify inelastic material points
        # @todo: consider the usage of np.where()
        I = f > 1e-6
        sig_T = (1 - omega) * self.E_T * (s - s_pi)
        # Return mapping
        delta_lambda_I = (f[I] / (self.E_T /
                                  (1 - omega[I]) + self.gamma + self.K))
        # Update all state variables
        s_pi[I] += (delta_lambda_I * np.sign(tau_pi_trial[I] - X[I]) /
                    (1 - omega[I]))
        Y = 0.5 * self.E_T * (s - s_pi)**2
        omega[I] += (delta_lambda_I * (1 - omega[I])**self.c *
                     (Y[I] / self.S)**self.r *
                     (self.tau_bar / (self.tau_bar - self.m * sig_N[I])))
        sig_T[I] = (1 - omega[I]) * self.E_T * (s[I] - s_pi[I])
        alpha[I] += delta_lambda_I * np.sign(tau_pi_trial[I] - X[I])
        z[I] += delta_lambda_I
        # Unloading stiffness

        E_alg_T = (1 - omega) * self.E_T

        # Consistent tangent operator
        if False:
            E_alg_T = ((1 - omega) * self.E_T - (1 - omega) * self.E_T**2 /
                       (self.E_T + (self.gamma + self.K) * (1 - omega)) -
                       ((1 - omega)**self.c * (self.E_T**2) *
                        ((Y / self.S)**self.r) * np.sign(tau_pi_trial - X) *
                        (s - s_pi)) / ((self.E_T /
                                        (1 - omega)) + self.gamma + self.K))

        if False:
            #print('DONT COME HERE')
            E_alg_T = (
                (1 - omega) * self.E_T - ((self.E_T**2 * (1 - omega)) /
                                          (self.E_T + (self.gamma + self.K) *
                                           (1 - omega))) -
                ((1 - omega)**self.c * (Y / self.S)**self.r * self.E_T**2 *
                 (s - s_pi) * self.tau_bar /
                 (self.tau_bar - self.m * sig_N) * np.sign(tau_pi_trial - X)) /
                (self.E_T / (1 - omega) + self.gamma + self.K))

        sig = np.zeros_like(u_r)
        sig[..., 0] = sig_T
        sig[..., 1] = sig_N
        E_TN = np.einsum(
            'abEm->Emab',
            np.array([[E_alg_T, np.zeros_like(E_alg_T)],
                      [np.zeros_like(E_alg_N), E_alg_N]]))
        #         print(' sig_N =',  sig_N)
        abc = open('sigNfortau10000lp5000.txt', 'a+', newline='\n')
        for e in range(len(sig_N)):
            abc.write('%f ' % sig_N[e][0])
        abc.write('\n')
        abc.close()
        # print('s_pi=', s_pi)
        return sig, E_TN

    def _get_var_dict(self):
        var_dict = super(MATS1D5DPCumPress, self)._get_var_dict()
        var_dict.update(slip=self.get_slip,
                        s_el=self.get_s_el,
                        shear=self.get_shear,
                        omega=self.get_omega,
                        s_pi=self.get_s_pi,
                        alpha=self.get_alpha,
                        z=self.get_z)
        return var_dict

    def get_slip(self, u_r, tn1, **state):
        return self.get_eps(u_r, tn1)[..., 0]

    def get_shear(self, u_r, tn1, **state):
        return self.get_sig(u_r, tn1, **state)[..., 0]

    def get_omega(self, u_r, tn1, s_pi, alpha, z, omega):
        return omega

    def get_s_pi(self, u_r, tn1, s_pi, alpha, z, omega):
        return s_pi

    def get_alpha(self, u_r, tn1, s_pi, alpha, z, omega):
        return alpha

    def get_z(self, u_r, tn1, s_pi, alpha, z, omega):
        return z

    def get_s_el(self, u_r, tn1, **state):
        s = self.get_slip(u_r, tn1, **state)
        s_p = self.get_s_pi(u_r, tn1, **state)
        s_e = s - s_p
        return s_e


#     def get_sig_N(self, u_r, tn1, **state):
#         return self.get_sig(u_r, tn1, **state)[..., 1]

    tree_view = ui.View(ui.Item('E_N'), ui.Item('E_T'), ui.Item('gamma'),
                        ui.Item('K'), ui.Item('S'), ui.Item('r'), ui.Item('c'),
                        ui.Item('m'), ui.Item('tau_bar'),
                        ui.Item('D_rs', style='readonly'))

    traits_view = tree_view
コード例 #7
0
class MATS1D5D(MATSEval):

    node_name = "damage bond model"

    E_T = tr.Float(100.0,
                   tooltip='Shear stiffness of the interface [MPa]',
                   MAT=True,
                   unit='MPa',
                   symbol='E_\mathrm{s}',
                   desc='Shear-modulus of the interface',
                   auto_set=True,
                   enter_set=True)

    E_N = tr.Float(100.0,
                   tooltip='Normal stiffness of the interface [MPa]',
                   MAT=True,
                   unit='MPa',
                   symbol='E_\mathrm{n}',
                   desc='Normal stiffness of the interface',
                   auto_set=False,
                   enter_set=True)

    state_var_shapes = dict(omega=(), kappa=())

    omega_fn_type = tr.Trait(
        'exp-slope',
        dict(li=LiDamageFn,
             exp_slope=ExpSlopeDamageFn,
             abaqus=AbaqusDamageFn,
             FRP=FRPDamageFn,
             multilinear=MultilinearDamageFn),
        MAT=True,
    )

    @on_trait_change('omega_fn_type')
    def _reset_omega_fn(self):
        self.omega_fn = self.omega_fn_type_()

    omega_fn = tr.Instance(IDamageFn, report=True)

    def _omega_fn_default(self):
        return self.omega_fn_type_()

    def omega(self, k):
        return self.omega_fn(k)

    def omega_derivative(self, k):
        return self.omega_fn.diff(k)

    def init(self, omega, kappa):
        r'''
        Initialize the state variables.
        '''
        omega[...] = 0
        kappa[...] = 0

    algorithmic = tr.Bool(True)

    def get_corr_pred(self, u_r, t_n, omega, kappa):

        s = u_r[..., 0]
        w = u_r[..., 1]
        # For normal
        H_w_N = np.array(w <= 0.0, dtype=np.float_)
        E_alg_N = H_w_N * self.E_N
        sig_N = E_alg_N * w
        kappa[...] = np.max(np.array([kappa, np.fabs(s)]), axis=0)
        omega[...] = self.omega(kappa)
        tau = (1 - omega) * self.E_T * s
        domega_ds = self.omega_derivative(kappa)

        if self.algorithmic:
            E_alg_T = ((1 - omega) - domega_ds * s) * self.E_T
        else:
            E_alg_T = (1 - omega) * self.E_T

        sig = np.zeros_like(u_r)
        sig[..., 0] = tau
        sig[..., 1] = sig_N
        E_TN = np.einsum(
            'ab...->...ab',
            np.array([[E_alg_T, np.zeros_like(E_alg_T)],
                      [np.zeros_like(E_alg_N), E_alg_N]]))
        return sig, E_TN

    traits_view = ui.View(ui.Item('E_T'), ui.Item('E_N'))
コード例 #8
0
ファイル: vecorized_fem.py プロジェクト: ABaktheer/confatmod
class TimeLoop(HasStrictTraits):

    tline = tr.Instance(TLine)
    '''Time line object specifying the start, end, time step and current time
    '''
    def _tline_default(self):
        return TLine(min=0.0, max=1.0, step=1.0)

    ts = tr.Instance(DOTSGrid)
    '''State object delivering the predictor and corrector
    '''

    bc_mngr = tr.Instance(BCondMngr, ())
    '''Boundary condition manager.
    '''

    bc_list = tr.List([])
    '''List of boundary conditions.
    '''

    def _bc_list_changed(self):
        self.bc_mngr.bcond_list = self.bc_list

    step_tolerance = tr.Float(1e-8)
    '''Time step tolerance.
    '''

    KMAX = tr.Int(300)
    '''Maximum number of iterations.
    '''

    tolerance = tr.Float(1e-3)
    '''Tolerance of the residuum norm. 
    '''

    t_n1 = tr.Float(0, input=True)
    '''Target time for the next increment.
    '''

    t_n = tr.Float(0, input=True)
    '''Time of the last equilibrium state. 
    '''

    d_t = tr.Float(0, input=True)
    '''Current time increment size.
    '''

    def eval(self):

        update_state = False
        tloop.bc_mngr.setup(None)
        K = SysMtxAssembly()
        self.bc_mngr.apply_essential(K)
        U_n = np.zeros((self.ts.mesh.n_dofs, ), dtype=np.float_)
        dU = np.copy(U_n)
        U_k = np.copy(U_n)
        F_ext = np.zeros_like(U_n)

        while (self.t_n1 - self.tline.max) <= self.step_tolerance:

            print 'current time %f' % self.t_n1,

            self.d_t = self.tline.step

            k = 0
            step_flag = 'predictor'

            while k < self.KMAX:

                K.reset_mtx()
                K_arr, F_int, n_F_int = self.ts.get_corr_pred(
                    U_k, dU, self.t_n, self.t_n1, update_state)
                if update_state:
                    update_state = False

                K.sys_mtx_arrays.append(K_arr)

                F_ext[:] = 0.0
                self.bc_mngr.apply(step_flag, None, K, F_ext, self.t_n,
                                   self.t_n1)
                R = F_ext - F_int
                K.apply_constraints(R)
                if n_F_int == 0.0:
                    n_F_int = 1.0
                norm = np.linalg.norm(R, ord=None)  # / n_F_int
                if norm < self.tolerance:  # convergence satisfied
                    print 'converged in %d iterations' % (k + 1)
                    update_state = True
                    self.F_int_record.append(F_int)
                    self.U_record.append(np.copy(U_k))
                    break  # update_switch -> on
                dU = K.solve()
                U_k += dU
                k += 1
                step_flag = 'corrector'

            U_n = np.copy(U_k)
            self.t_n = self.t_n1
            self.record_response(U_k, self.t_n)
            self.t_n1 = self.t_n + self.d_t
            self.tline.val = min(self.t_n, self.tline.max)

        return U_n

    ug = tr.WeakRef
    write_dir = tr.Directory
    F_int_record = tr.List(tr.Array(np.float_))
    U_record = tr.List(tr.Array(np.float_))
    F_ext_record = tr.List(tr.Array(np.float_))
    t_record = tr.List(np.float_)
    record_dofs = tr.Array(np.int_)

    def record_response(self, U, t):
        n_c = self.ts.fets.n_nodal_dofs
        U_Ia = U.reshape(-1, n_c)
        U_Eia = U_Ia[self.ts.I_Ei]
        eps_Enab = np.einsum('Einabc,Eic->Enab', self.ts.B_Einabc, U_Eia)
        sig_Enab = np.einsum('abef,Emef->Emab', self.ts.mats.D_abcd, eps_Enab)
        U_vector_field = np.einsum('Ia,ab->Ib', U_Eia.reshape(-1, n_c),
                                   delta23_ab)
        self.ug.point_data.vectors = U_vector_field
        self.ug.point_data.vectors.name = 'displacement'
        eps_Encd = np.einsum('...ab,ac,bd->...cd', eps_Enab, delta23_ab,
                             delta23_ab)
        eps_Encd_tensor_field = eps_Encd.reshape(-1, 9)
        self.ug.point_data.tensors = eps_Encd_tensor_field
        self.ug.point_data.tensors.name = 'strain'
        fname = os.path.join(self.write_dir, 'step_%008.4f' % t)
        write_data(self.ug, fname.replace('.', '_'))
コード例 #9
0
ファイル: vdots_grid.py プロジェクト: simvisage/bmcs
class DOTSGrid(BMCSLeafNode):
    '''Domain time steppsr on a grid mesh
    '''
    x_0 = tr.Tuple(0., 0., input=True)
    L_x = tr.Float(200, input=True, MESH=True)
    L_y = tr.Float(100, input=True, MESH=True)
    n_x = tr.Int(100, input=True, MESH=True)
    n_y = tr.Int(30, input=True, MESH=True)
    integ_factor = tr.Float(1.0, input=True, MESH=True)
    fets = tr.Instance(IFETSEval, input=True, MESH=True)

    D1_abcd = tr.Array(np.float_, input=True)
    '''Symmetric operator distributing the 
    derivatives of the shape functions into the 
    tensor field
    '''
    def _D1_abcd_default(self):
        delta = np.identity(2)
        # symmetrization operator
        D1_abcd = 0.5 * (np.einsum('ac,bd->abcd', delta, delta) +
                         np.einsum('ad,bc->abcd', delta, delta))
        return D1_abcd

    mesh = tr.Property(tr.Instance(FEGrid), depends_on='+input')

    @tr.cached_property
    def _get_mesh(self):
        return FEGrid(coord_min=self.x_0,
                      coord_max=(self.x_0[0] + self.L_x,
                                 self.x_0[1] + self.L_y),
                      shape=(self.n_x, self.n_y),
                      fets_eval=self.fets)

    cached_grid_values = tr.Property(tr.Tuple, depends_on='+input')

    @tr.cached_property
    def _get_cached_grid_values(self):
        x_Ia = self.mesh.X_Id
        n_I, n_a = x_Ia.shape
        dof_Ia = np.arange(n_I * n_a, dtype=np.int_).reshape(n_I, -1)
        I_Ei = self.mesh.I_Ei
        x_Eia = x_Ia[I_Ei, :]
        dof_Eia = dof_Ia[I_Ei]
        x_Ema = np.einsum('im,Eia->Ema', self.fets.N_im, x_Eia)
        J_Emar = np.einsum('imr,Eia->Emar', self.fets.dN_imr, x_Eia)
        J_Enar = np.einsum('inr,Eia->Enar', self.fets.dN_inr, x_Eia)
        det_J_Em = np.linalg.det(J_Emar)
        inv_J_Emar = np.linalg.inv(J_Emar)
        inv_J_Enar = np.linalg.inv(J_Enar)
        B_Eimabc = np.einsum('abcd,imr,Eidr->Eimabc', self.D1_abcd,
                             self.fets.dN_imr, inv_J_Emar)
        B_Einabc = np.einsum('abcd,inr,Eidr->Einabc', self.D1_abcd,
                             self.fets.dN_inr, inv_J_Enar)
        BB_Emicjdabef = np.einsum('Eimabc,Ejmefd, Em, m->Emicjdabef', B_Eimabc,
                                  B_Eimabc, det_J_Em, self.fets.w_m)
        return (BB_Emicjdabef, B_Eimabc, dof_Eia, x_Eia, dof_Ia, I_Ei,
                B_Einabc, det_J_Em)

    BB_Emicjdabef = tr.Property()
    '''Quadratic form of the kinematic mapping.
    '''

    def _get_BB_Emicjdabef(self):
        return self.cached_grid_values[0]

    B_Eimabc = tr.Property()
    '''Kinematic mapping between displacements and strains in every
    integration point.
    '''

    def _get_B_Eimabc(self):
        return self.cached_grid_values[1]

    B_Einabc = tr.Property()
    '''Kinematic mapping between displacement and strain in every
    visualization point
    '''

    def _get_B_Einabc(self):
        return self.cached_grid_values[6]

    dof_Eia = tr.Property()
    '''Mapping [element, node, direction] -> degree of freedom.
    '''

    def _get_dof_Eia(self):
        return self.cached_grid_values[2]

    x_Eia = tr.Property()
    '''Mapping [element, node, direction] -> value of coordinate.
    '''

    def _get_x_Eia(self):
        return self.cached_grid_values[3]

    dof_Ia = tr.Property()
    '''[global node, direction] -> degree of freedom
    '''

    def _get_dof_Ia(self):
        return self.cached_grid_values[4]

    I_Ei = tr.Property()
    '''[element, node] -> global node
    '''

    def _get_I_Ei(self):
        return self.cached_grid_values[5]

    det_J_Em = tr.Property()
    '''Jacobi determinant in every element and integration point.
    '''

    def _get_det_J_Em(self):
        return self.cached_grid_values[7]

    state_arrays = tr.Property(tr.Dict(tr.Str, tr.Array),
                               depends_on='fets, mats')
    '''Dictionary of state arrays.
    The entry names and shapes are defined by the material
    model.
    '''

    @tr.cached_property
    def _get_state_arrays(self):
        return {
            name: np.zeros((
                self.mesh.n_active_elems,
                self.fets.n_m,
            ) + mats_sa_shape,
                           dtype=np.float_)
            for name, mats_sa_shape in list(
                self.mats.state_array_shapes.items())
        }

    def get_corr_pred(self, U, dU, t_n, t_n1, update_state, algorithmic):
        '''Get the corrector and predictor for the given increment
        of unknown .
        '''
        n_c = self.fets.n_nodal_dofs
        U_Ia = U.reshape(-1, n_c)
        U_Eia = U_Ia[self.I_Ei]
        eps_Emab = np.einsum('Eimabc,Eic->Emab', self.B_Eimabc, U_Eia)
        dU_Ia = dU.reshape(-1, n_c)
        dU_Eia = dU_Ia[self.I_Ei]
        deps_Emab = np.einsum('Eimabc,Eic->Emab', self.B_Eimabc, dU_Eia)
        D_Emabef, sig_Emab = self.mats.get_corr_pred(eps_Emab, deps_Emab, t_n,
                                                     t_n1, update_state,
                                                     algorithmic,
                                                     **self.state_arrays)
        K_Eicjd = self.integ_factor * np.einsum('Emicjdabef,Emabef->Eicjd',
                                                self.BB_Emicjdabef, D_Emabef)
        n_E, n_i, n_c, n_j, n_d = K_Eicjd.shape
        K_E = K_Eicjd.reshape(-1, n_i * n_c, n_j * n_d)
        dof_E = self.dof_Eia.reshape(-1, n_i * n_c)
        K_subdomain = SysMtxArray(mtx_arr=K_E, dof_map_arr=dof_E)
        f_Eic = self.integ_factor * np.einsum(
            'm,Eimabc,Emab,Em->Eic', self.fets.w_m, self.B_Eimabc, sig_Emab,
            self.det_J_Em)
        f_Ei = f_Eic.reshape(-1, n_i * n_c)
        F_dof = np.bincount(dof_E.flatten(), weights=f_Ei.flatten())
        F_int = F_dof
        norm_F_int = np.linalg.norm(F_int)
        return K_subdomain, F_int, norm_F_int
コード例 #10
0
ファイル: configuration.py プロジェクト: mehravehs/cmp
class PipelineConfiguration(traits.HasTraits):

    # project settings
    project_dir = traits.Directory(
        exists=False, desc="data path to where the project is stored")

    # project metadata (for connectome file)
    project_metadata = traits.Dict(
        desc="project metadata to be stored in the connectome file")
    # DEPRECATED: this field is deprecated after version >1.0.2
    generator = traits.Str()

    # parcellation scheme
    parcellation_scheme = traits.Enum("NativeFreesurfer",
                                      ["Lausanne2008", "NativeFreesurfer"],
                                      desc="used parcellation scheme")

    # choose between 'L' (linear) and 'N' (non-linear) and 'B' (bbregister)
    registration_mode = traits.Enum(
        "Linear", ["Linear", "Nonlinear", "BBregister"],
        desc="registration mode: linear or non-linear or bbregister")

    diffusion_imaging_model = traits.Enum("DSI", ["DSI", "DTI", "QBALL"])

    # DSI
    nr_of_gradient_directions = traits.Str('515')
    nr_of_sampling_directions = traits.Str('181')
    odf_recon_param = traits.Str('-b0 1 -dsi -p 4 -sn 0')
    hardi_recon_param = traits.Str('-b0 1 -p 3 -sn 0')

    # DTI
    gradient_table_file = traits.File(exists=False)
    gradient_table = traits.Enum('siemens_64', [
        'custom', 'mgh_dti_006', 'mgh_dti_018', 'mgh_dti_030', 'mgh_dti_042',
        'mgh_dti_060', 'mgh_dti_072', 'mgh_dti_090', 'mgh_dti_120',
        'mgh_dti_144', 'siemens_06', 'siemens_12', 'siemens_20', 'siemens_256',
        'siemens_30', 'siemens_64'
    ])
    nr_of_b0 = traits.Str('1')
    max_b0_val = traits.Str('1000')
    dti_recon_param = traits.Str('')
    dtb_dtk2dir_param = traits.Str('')

    # tractography
    streamline_param = traits.Str('--angle 60  --seeds 32')

    # registration
    lin_reg_param = traits.Str('-usesqform -nosearch -dof 6 -cost mutualinfo')
    nlin_reg_bet_T2_param = traits.Str('-f 0.35 -g 0.15')
    nlin_reg_bet_b0_param = traits.Str('-f 0.2 -g 0.2')
    nlin_reg_fnirt_param = traits.Str(
        '--subsamp=8,4,2,2 --miter==5,5,5,5 --lambda=240,120,90,30 --splineorder=3 --applyinmask=0,0,1,1 --applyrefmask=0,0,1,1'
    )
    bb_reg_param = traits.Str('--init-header --dti')

    # dicom converter
    do_convert_diffusion = traits.Bool(True)
    do_convert_T1 = traits.Bool(True)
    do_convert_T2 = traits.Bool(False)
    do_convert_fMRI = traits.Bool(False)

    # rsfmri
    # choose between 'L' (linear) and 'B' (bbregister)
    rsfmri_registration_mode = traits.Enum(
        "Linear", ["Linear", "BBregister"],
        desc="registration mode: linear or bbregister")
    rsfmri_lin_reg_param = traits.Str(
        '-usesqform -nosearch -dof 6 -cost mutualinfo')
    rsfmri_bb_reg_param = traits.Str('--init-header --dti')
    do_save_mat = traits.Bool(True)

    # rsfmri PREPROCESSING STEPS
    rsfmri_slice_timing = traits.Enum("none", [
        "none", "bottom-top interleaved", "top-bottom interleaved",
        "bottom-top", "top-bottom"
    ],
                                      desc="time slicing mode")
    rsfmri_smoothing = traits.Str('0')
    rsfmri_discard = traits.Str('5')
    rsfmri_nuisance_global = traits.Bool(False)
    rsfmri_nuisance_WM = traits.Bool(True)
    rsfmri_nuisance_CSF = traits.Bool(True)
    rsfmri_nuisance_motion = traits.Bool(True)
    rsfmri_detrending = traits.Bool(True)
    rsfmri_lowpass = traits.Str('1')
    rsfmri_scrubbing_parameters = traits.Bool(True)
    rsfmri_scrubbing_apply = traits.Bool(True)
    rsfmri_scrubbing_FD = traits.Str('0.5')
    rsfmri_scrubbing_DVARS = traits.Str('5')

    # DEPRECATED:
    subject_raw_glob_diffusion = traits.Str("*.*")
    subject_raw_glob_T1 = traits.Str("*.*")
    subject_raw_glob_T2 = traits.Str("*.*")
    extract_diffusion_metadata = traits.Bool(False)

    # subject
    subject_name = traits.Str()
    subject_timepoint = traits.Str()
    subject_workingdir = traits.Directory()
    subject_logger = None
    subject_metadata = [
        KeyValue(key='description', value=''),
        KeyValue(key='', value=''),
        KeyValue(key='', value=''),
        KeyValue(key='', value=''),
        KeyValue(key='', value=''),
        KeyValue(key='', value=''),
    ]

    active_createfolder = traits.Bool(True)
    active_dicomconverter = traits.Bool(False)
    active_registration = traits.Bool(False)
    active_segmentation = traits.Bool(False)
    active_parcellation = traits.Bool(False)
    active_applyregistration = traits.Bool(False)
    active_reconstruction = traits.Bool(False)
    active_tractography = traits.Bool(False)
    active_fiberfilter = traits.Bool(False)
    active_connectome = traits.Bool(False)
    active_statistics = traits.Bool(False)
    active_cffconverter = traits.Bool(False)
    active_rsfmri_registration = traits.Bool(False)
    active_rsfmri_preprocessing = traits.Bool(False)
    active_rsfmri_connectionmatrix = traits.Bool(False)
    skip_completed_stages = traits.Bool(False)

    # metadata
    creator = traits.Str()
    email = traits.Str()
    publisher = traits.Str()
    created = traits.Date()
    modified = traits.Date()
    license = traits.Str()
    #    rights = traits.Str()
    reference = traits.Str()
    #    relation =  traits.Str()
    species = traits.Str('H**o sapiens')
    description = traits.Str()

    # segmentation
    recon_all_param = traits.Str('-all -no-isrunning')

    # parcellation
    custompar_nrroi = traits.Int()
    custompar_nodeinfo = traits.File()
    custompar_volumeparcell = traits.File()

    # fiber filtering
    apply_splinefilter = traits.Bool(
        True, desc='apply the spline filtering from diffusion toolkit')
    apply_fiberlength = traits.Bool(True, desc='apply cutoff to fiber lengths')
    fiber_cutoff_lower = traits.Float(
        20.0,
        desc='cut fibers that are shorter in length than given length in mm')
    fiber_cutoff_upper = traits.Float(
        500.0,
        desc='cut fibers that are longer in length than given length in mm')

    # measures
    connection_P0 = traits.Bool(False)
    connection_gfa = traits.Bool(False)
    connection_kurtosis = traits.Bool(False)
    connection_skewness = traits.Bool(False)
    connection_adc = traits.Bool(False)
    connection_fa = traits.Bool(False)

    # cff converter
    cff_fullnetworkpickle = traits.Bool(
        True,
        desc='stores the full network pickle generated by connectome creation')
    cff_cmatpickle = traits.Bool(True)
    cff_originalfibers = traits.Bool(True, desc='stores original fibers')
    cff_filteredfibers = traits.Bool(True, desc='stores filtered fibers')
    cff_finalfiberlabels = traits.Bool(
        True, desc='stores final fibers and their labelarrays')
    cff_fiberarr = traits.Bool(True)
    cff_rawdiffusion = traits.Bool(True)
    cff_scalars = traits.Bool(True)
    cff_rawT1 = traits.Bool(True)
    cff_rawT2 = traits.Bool(True)
    cff_roisegmentation = traits.Bool(
        True, desc='stores multi-resolution parcellation volumes')
    cff_surfaces = traits.Bool(True,
                               desc='stores individually genertated surfaces')
    cff_surfacelabels = traits.Bool(
        True, desc='stores individually genertated surfaces')

    # do you want to do manual white matter mask correction?
    wm_handling = traits.Enum(
        1, [1, 2, 3],
        desc="in what state should the freesurfer step be processed")

    # custom parcellation
    parcellation = traits.Dict(
        desc="provide the dictionary with your parcellation.")

    # start up fslview
    inspect_registration = traits.Bool(
        False, desc='start fslview to inspect the the registration results')
    fsloutputtype = traits.Enum('NIFTI', ['NIFTI'])

    # connectome creation
    compute_curvature = traits.Bool(False)

    # email notification, needs a local smtp server
    # sudo apt-get install postfix
    emailnotify = traits.ListStr(
        [], desc='the email address to send stage completion status message')

    freesurfer_home = traits.Directory(exists=False, desc="path to Freesurfer")
    fsl_home = traits.Directory(exists=False, desc="path to FSL")
    dtk_home = traits.Directory(exists=False, desc="path to diffusion toolkit")

    # This file stores descriptions of the inputs/outputs to each stage of the
    # CMP pipeline.  It can be queried using the PipelineStatus python object
    pipeline_status_file = traits.Str("cmp.status")

    # Pipeline status object
    pipeline_status = pipeline_status.PipelineStatus()

    def _get_lausanne_parcellation(self, parcel="NativeFreesurfer"):

        if parcel == "Lausanne2008":
            return {
                'scale33': {
                    'number_of_regions':
                    83,
                    # contains name, url, color, freesurfer_label, etc. used for connection matrix
                    'node_information_graphml':
                    op.join(
                        self.get_lausanne_parcellation_path('resolution83'),
                        'resolution83.graphml'),
                    # scalar node values on fsaverage? or atlas?
                    'surface_parcellation':
                    None,
                    # scalar node values in fsaverage volume?
                    'volume_parcellation':
                    None,
                    # the subdirectory name from where to copy parcellations, with hemispheric wildcard
                    'fs_label_subdir_name':
                    'regenerated_%s_36',
                    # should we subtract the cortical rois for the white matter mask?
                    'subtract_from_wm_mask':
                    1,
                },
                'scale60': {
                    'number_of_regions':
                    129,
                    'node_information_graphml':
                    op.join(
                        self.get_lausanne_parcellation_path('resolution150'),
                        'resolution150.graphml'),
                    'surface_parcellation':
                    None,
                    'volume_parcellation':
                    None,
                    'fs_label_subdir_name':
                    'regenerated_%s_60',
                    'subtract_from_wm_mask':
                    1,
                },
                'scale125': {
                    'number_of_regions':
                    234,
                    'node_information_graphml':
                    op.join(
                        self.get_lausanne_parcellation_path('resolution258'),
                        'resolution258.graphml'),
                    'surface_parcellation':
                    None,
                    'volume_parcellation':
                    None,
                    'fs_label_subdir_name':
                    'regenerated_%s_125',
                    'subtract_from_wm_mask':
                    1,
                },
                'scale250': {
                    'number_of_regions':
                    463,
                    'node_information_graphml':
                    op.join(
                        self.get_lausanne_parcellation_path('resolution500'),
                        'resolution500.graphml'),
                    'surface_parcellation':
                    None,
                    'volume_parcellation':
                    None,
                    'fs_label_subdir_name':
                    'regenerated_%s_250',
                    'subtract_from_wm_mask':
                    1,
                },
                'scale500': {
                    'number_of_regions':
                    1015,
                    'node_information_graphml':
                    op.join(
                        self.get_lausanne_parcellation_path('resolution1015'),
                        'resolution1015.graphml'),
                    'surface_parcellation':
                    None,
                    'volume_parcellation':
                    None,
                    'fs_label_subdir_name':
                    'regenerated_%s_500',
                    'subtract_from_wm_mask':
                    1,
                },
            }
        else:
            return {
                'freesurferaparc': {
                    'number_of_regions':
                    83,
                    # contains name, url, color, freesurfer_label, etc. used for connection matrix
                    'node_information_graphml':
                    op.join(
                        self.get_lausanne_parcellation_path('freesurferaparc'),
                        'resolution83.graphml'),
                    # scalar node values on fsaverage? or atlas?
                    'surface_parcellation':
                    None,
                    # scalar node values in fsaverage volume?
                    'volume_parcellation':
                    None,
                }
            }

    def __init__(self, **kwargs):
        # NOTE: In python 2.6, object.__init__ no longer accepts input
        # arguments.  HasTraits does not define an __init__ and
        # therefore these args were being ignored.
        super(PipelineConfiguration, self).__init__(**kwargs)

        # the default parcellation provided
        self.parcellation = self._get_lausanne_parcellation(
            parcel="NativeFreesurfer")

        self.can_use_dipy = dipy_here

        # no email notify
        self.emailnotify = []

        # default gradient table for DTI
        self.gradient_table_file = self.get_cmp_gradient_table('siemens_64')

        # try to discover paths from environment variables
        try:
            self.freesurfer_home = op.join(os.environ['FREESURFER_HOME'])
            self.fsl_home = op.join(os.environ['FSLDIR'])
            self.dtk_home = os.environ['DTDIR']
            self.dtk_matrices = op.join(self.dtk_home, 'matrices')
        except KeyError:
            pass

        self.fsloutputtype = 'NIFTI'
        os.environ['FSLOUTPUTTYPE'] = self.fsloutputtype
        os.environ['FSLOUTPUTTYPE'] = 'NIFTI'

    def consistency_check(self):
        """ Provides a checking facility for configuration objects """

        # project name not empty
        if not op.exists(self.project_dir):
            msg = 'Your project directory does not exist!'
            raise Exception(msg)

        # check metadata
        if self.creator == '':
            raise Exception('You need to enter creator metadata!')
        if self.publisher == '':
            raise Exception('You need to enter publisher metadata!')
        if self.email == '':
            raise Exception('You need to enter email of a contact person!')

        # check if software paths exists
        pas = {
            'configuration.freesurfer_home': self.freesurfer_home,
            'configuration.fsl_home': self.fsl_home,
            'configuration.dtk_home': self.dtk_home,
            'configuration.dtk_matrices': self.dtk_matrices
        }
        for k, p in pas.items():
            if not op.exists(p):
                msg = 'Required software path for %s does not exists: %s' % (k,
                                                                             p)
                raise Exception(msg)

        if self.subject_workingdir == '':
            msg = 'No working directory defined for subject'
            raise Exception(msg)
#        else:
#            wdir = self.get_subj_dir()
#            if not op.exists(wdir):
#                msg = 'Working directory %s does not exists for subject' % (wdir)
#                raise Exception(msg)
#            else:
#                wdiff = op.join(self.get_raw_diffusion())
#                print wdiff
#                if not op.exists(wdiff):
#                    msg = 'Diffusion MRI subdirectory %s does not exists for the subject' % wdiff
#                    raise Exception(msg)
#                wt1 = op.join(self.get_rawt1())
#                if not op.exists(wt1):
#                    msg = 'Structural MRI subdirectory %s T1 does not exist in RAWDATA' % wt1
#                    raise Exception(msg)

    def get_cmp_home(self):
        """ Return the cmp home path """
        return op.dirname(__file__)

    def get_rawdata(self):
        """ Return raw data path for the subject """
        return op.join(self.get_subj_dir(), 'RAWDATA')

    def get_log(self):
        """ Get subject log dir """
        return op.join(self.get_subj_dir(), 'LOG')

    def get_logname(self, suffix='.log'):
        """ Get a generic name for the log and pickle files """
        a = dt.datetime.now()
        return 'pipeline-%s-%02i%02i-%s-%s%s' % (
            a.date().isoformat(), a.time().hour, a.time().minute,
            self.subject_name, self.subject_timepoint, suffix)

    def get_logger(self):
        """ Get the logger instance created """
        if self.subject_logger is None:
            # setup logger for the subject
            self.subject_logger = \
                getLog(os.path.join(self.get_log(), self.get_logname()))
            return self.subject_logger
        else:
            return self.subject_logger

    def get_rawglob(self, modality):
        """ DEPRECATED: Get the file name endings for modality """

        if modality == 'diffusion':
            if not self.subject_raw_glob_diffusion == '':
                return self.subject_raw_glob_diffusion
            else:
                raise Exception('No raw_glob_diffusion defined for subject')

        elif modality == 'T1':
            if not self.subject_raw_glob_T1 == '':
                return self.subject_raw_glob_T1
            else:
                raise Exception('No raw_glob_T1 defined for subject')

        elif modality == 'T2':
            if not self.subject_raw_glob_T2 == '':
                return self.subject_raw_glob_T2
            else:
                raise Exception('No raw_glob_T2 defined for subject')

    def get_dicomfiles(self, modality):
        """ Get a list of dicom files for the requested modality. Tries to
        discover them automatically
        """
        from glob import glob

        if modality == 'diffusion':
            pat = self.get_raw_diffusion()
        elif modality == 'T1':
            pat = self.get_rawt1()
        elif modality == 'T2':
            pat = self.get_rawt2()
        elif modality == 'fMRI':
            pat = self.get_rawrsfmri()

        # discover files with *.* and *
        difiles = sorted(glob(op.join(pat, '*.*')) + glob(op.join(pat, '*')))

        # exclude potential .nii and .nii.gz files
        difiles = [
            e for e in difiles
            if not e.endswith('.nii') and not e.endswith('.nii.gz')
        ]

        # check if no files and throw exception
        if len(difiles) == 0:
            raise Exception('Could not find any DICOM files in folder %s' %
                            pat)

        return difiles

    def get_rawrsfmri(self):
        """ Get raw functional MRI path for subject """
        return op.join(self.get_rawdata(), 'fMRI')

    def get_rawt1(self):
        """ Get raw structural MRI T1 path for subject """
        return op.join(self.get_rawdata(), 'T1')

    def get_rawt2(self):
        """ Get raw structural MRI T2 path for subject """
        return op.join(self.get_rawdata(), 'T2')

    def get_subj_dir(self):
        return self.subject_workingdir

    def get_raw_diffusion(self):
        """ Get the raw diffusion path for subject """
        if self.diffusion_imaging_model == 'DSI':
            return op.join(self.get_subj_dir(), 'RAWDATA', 'DSI')
        elif self.diffusion_imaging_model == 'DTI':
            return op.join(self.get_subj_dir(), 'RAWDATA', 'DTI')
        elif self.diffusion_imaging_model == 'QBALL':
            return op.join(self.get_subj_dir(), 'RAWDATA', 'QBALL')

    def get_fs(self):
        """ Returns the subject root folder path for freesurfer files """
        return op.join(self.get_subj_dir(), 'FREESURFER')

    def get_stats(self):
        """ Return statistic output path """
        return op.join(self.get_subj_dir(), 'STATS')

    def get_cffdir(self):
        """ Returns path to store connectome file """
        return op.join(self.get_cmp(), 'cff')

    def get_nifti(self):
        """ Returns the subject root folder path for nifti files """
        return op.join(self.get_subj_dir(), 'NIFTI')

    def get_nifti_trafo(self):
        """ Returns the path to the subjects transformation / registration matrices """
        return op.join(self.get_nifti(), 'transformations')

    def get_nifti_bbregister(self):
        """ Returns the path to the subjects transformation / registration matrices, bbregister mode """
        return op.join(self.get_nifti(), 'bbregister')

    def get_diffusion_metadata(self):
        """ Diffusion metadata, i.e. where gradient_table.txt is stored """
        return op.join(self.get_nifti(), 'diffusion_metadata')

    def get_nifti_wm_correction(self):
        """ Returns the path to the subjects wm_correction path """
        return op.join(self.get_nifti(), 'wm_correction')

    def get_cmp(self):
        return op.join(self.get_subj_dir(), 'CMP')

    def get_cmp_rawdiff(self, ):
        return op.join(self.get_cmp(), 'raw_diffusion')

    def get_cmp_rawdiff_reconout(self):
        """ Returns the output path for diffusion reconstruction without prefix"""
        if self.diffusion_imaging_model == 'DSI':
            return op.join(self.get_cmp(), 'raw_diffusion', 'odf_0')
        elif self.diffusion_imaging_model == 'DTI':
            return op.join(self.get_cmp(), 'raw_diffusion', 'dti_0')
        elif self.diffusion_imaging_model == 'QBALL':
            return op.join(self.get_cmp(), 'raw_diffusion', 'qball_0')

    def get_cmp_rawdiff_resampled(self):
        return op.join(self.get_cmp_rawdiff(), '2x2x2')

    def get_cmp_fsout(self):
        return op.join(self.get_cmp(), 'fs_output')

    def get_cmp_fibers(self):
        return op.join(self.get_cmp(), 'fibers')

    def get_cmp_scalars(self):
        return op.join(self.get_cmp(), 'scalars')

    def get_cmp_matrices(self):
        return op.join(self.get_cmp_fibers(), 'matrices')

    def get_cmp_fmri(self):
        return op.join(self.get_cmp(), 'fMRI')

    def get_cmp_fmri_preproc(self):
        return op.join(self.get_cmp_fmri(), 'preprocessing')

    def get_cmp_fmri_matrices(self):
        return op.join(self.get_cmp_fmri(), 'matrices')

    def get_cmp_fmri_timeseries(self):
        return op.join(self.get_cmp_fmri(), 'timeseries')

    def get_cmp_tracto_mask(self):
        return op.join(self.get_cmp_fsout(), 'HR')

    def get_cmp_tracto_mask_tob0(self):
        return op.join(self.get_cmp_fsout(), 'HR__registered-TO-b0')

    def get_custom_gradient_table(self):
        """ Returns the absolute path to the custom gradient table
        with optional b-values in the 4th row """
        return self.gradient_table_file

    def get_cmp_gradient_table(self, name):
        """ Return default gradient tables shipped with CMP. These are mainly derived from
        Diffusion Toolkit """
        cmp_path = op.dirname(__file__)
        return op.join(cmp_path, 'data', 'diffusion', 'gradient_tables',
                       name + '.txt')

    def get_dtb_streamline_vecs_file(self, as_text=False):
        """ Returns the odf directions file used for DTB_streamline """
        cmp_path = op.dirname(__file__)
        if as_text:
            return op.join(cmp_path, 'data', 'diffusion', 'odf_directions',
                           '181_vecs.txt')
        else:
            return op.join(cmp_path, 'data', 'diffusion', 'odf_directions',
                           '181_vecs.dat')

    # XXX
    def get_cmp_scalarfields(self):
        """ Returns a list with tuples with the scalar field name and the
        absolute path to its nifti file """

        ret = []

        if self.diffusion_imaging_model == 'DSI':
            # add gfa per default
            ret.append(('gfa', op.join(self.get_cmp_scalars(),
                                       'dsi_gfa.nii.gz')))
            # XXX: add adc per default

        elif self.diffusion_imaging_model == 'DTI':
            # nothing to add yet for DTI
            pass

        return ret

    def get_dtk_dsi_matrix(self):
        """ Returns the DSI matrix from Diffusion Toolkit
        
        The parameters have to be set in the configuration object with keys:
        1. number of gradient directions : 'nr_of_gradient_directions'
        2. number of sampling directions : 'nr_of_sampling_directions'
        
        Example
        -------
        
        confobj.nr_of_gradient_directions = 515
        confobj.nr_of_sampling_directions = 181
        
        Returns matrix including absolute path to DSI_matrix_515x181.dat
        
        """

        grad = self.nr_of_gradient_directions
        samp = self.nr_of_sampling_directions
        fpath = op.join(self.dtk_matrices,
                        "DSI_matrix_%sx%s.dat" % (grad, samp))
        if not op.exists(fpath):
            msg = "DSI matrix does not exists: %s" % fpath
            raise Exception(msg)
        return fpath

    def get_lausanne_atlas(self, name=None):
        """ Return the absolute path to the lausanne parcellation atlas
        for the resolution name """

        cmp_path = op.dirname(__file__)

        provided_atlases = [
            'myatlas_36_rh.gcs', 'myatlasP1_16_rh.gcs', 'myatlasP17_28_rh.gcs',
            'myatlasP29_36_rh.gcs', 'myatlas_60_rh.gcs', 'myatlas_125_rh.gcs',
            'myatlas_250_rh.gcs', 'myatlas_36_lh.gcs', 'myatlasP1_16_lh.gcs',
            'myatlasP17_28_lh.gcs', 'myatlasP29_36_lh.gcs',
            'myatlas_60_lh.gcs', 'myatlas_125_lh.gcs', 'myatlas_250_lh.gcs'
        ]

        if name in provided_atlases:
            return op.join(cmp_path, 'data', 'colortable_and_gcs',
                           'my_atlas_gcs', name)
        else:
            msg = "Atlas %s does not exists" % name
            raise Exception(msg)

    def get_freeview_lut(self, name):
        """ Returns the Look-Up-Table as text file for a given parcellation scheme
        in  a dictionary """

        cmp_path = op.dirname(__file__)
        if name == "NativeFreesurfer":
            return {
                'freesurferaparc':
                op.join(cmp_path, 'data', 'parcellation', 'nativefreesurfer',
                        'freesurferaparc', 'FreeSurferColorLUT_adapted.txt')
            }
        else:
            return ""

    def get_lausanne_parcellation_path(self, parcellationname):

        cmp_path = op.dirname(__file__)

        if self.parcellation_scheme == "Lausanne2008":
            allowed_default_parcel = [
                'resolution83', 'resolution150', 'resolution258',
                'resolution500', 'resolution1015'
            ]
            if parcellationname in allowed_default_parcel:
                return op.join(cmp_path, 'data', 'parcellation',
                               'lausanne2008', parcellationname)
            else:
                msg = "Not a valid default parcellation name for the lausanne2008 parcellation scheme"
                raise Exception(msg)

        else:
            allowed_default_parcel = ['freesurferaparc']
            if parcellationname in allowed_default_parcel:
                return op.join(cmp_path, 'data', 'parcellation',
                               'nativefreesurfer', parcellationname)
            else:
                msg = "Not a valid default parcellation name for the NativeFreesurfer parcellation scheme"
                raise Exception(msg)

    def get_cmp_binary_path(self):
        """ Returns the path to the binary files for the current platform
        and architecture """

        if sys.platform == 'linux2':

            import platform as pf
            if '32' in pf.architecture()[0]:
                return op.join(op.dirname(__file__), "binary", "linux2",
                               "bit32")
            elif '64' in pf.architecture()[0]:
                return op.join(op.dirname(__file__), "binary", "linux2",
                               "bit64")
        else:
            raise ('No binary files compiled for your platform!')

    def get_pipeline_status_file(self):
        """Returns the absolute path of the pipeline status file"""
        return op.join(self.get_subj_dir(), self.pipeline_status_file)

    def init_pipeline_status(self):
        """Create the 'cmp.status'.  The 'cmp.status' file contains information
        about the inputs/outputs of each pipeline stage"""
        status_file = op.join(self.get_subj_dir(), self.pipeline_status_file)
        self.pipeline_status.Pipeline.name = "cmp"
        self.pipeline_status.SaveToFile(status_file)

    def update_pipeline_status(self):
        """Update the pipeline status on disk with the current status in memory"""
        status_file = op.join(self.get_subj_dir(), self.pipeline_status_file)
        self.pipeline_status.SaveToFile(status_file)
コード例 #11
0
class TStep(tr.HasStrictTraits):
    '''Manage the data and metadata of a time step within an interation loop.
    '''
    title = tr.Str('<unnamed>')

    tloop_type = tr.Type(ITLoop)
    '''Type of time loop to be used with the model
    '''

    #=========================================================================
    # HISTORY
    #=========================================================================
    hist_type = tr.Type(Hist)

    hist = tr.Property(tr.Instance(IHist))
    r'''History representation of the model response.
    '''
    @tr.cached_property
    def _get_hist(self):
        return self.hist_type(tstep_source=self)

    debug = tr.Bool(False)

    t_n1 = tr.Float(0.0, auto_set=False, enter_set=True)
    '''Target value of the control variable.
    '''
    U_n = tr.Float(0.0, auto_set=False, enter_set=True)
    '''Current fundamental value of the primary variable.
    '''
    U_k = tr.Float(0.0, auto_set=False, enter_set=True)
    '''Current trial value of the primary variable.
    '''

    def init_state(self):
        '''Initialize state.
        '''
        self.U_n = 0.0
        self.t_n1 = 0.0
        self.U_k = 0.0

    def record_state(self):
        '''Provide the current state for history recording.
        '''
        pass

    _corr_pred = tr.Property(depends_on='U_k,t_n1')

    @tr.cached_property
    def _get__corr_pred(self):
        return self.get_corr_pred(self.U_k, self.t_n1)

    R = tr.Property

    def _get_R(self):
        R, _ = self._corr_pred
        return R

    dR = tr.Property

    def _get_dR(self):
        _, dR = self._corr_pred
        return dR

    R_norm = tr.Property

    def _get_R_norm(self):
        R = self.R
        return np.sqrt(R * R)

    def make_iter(self):
        d_U = self.R / self.dR
        self.U_k += d_U

    def make_incr(self):
        '''Update the control, primary and state variables..
        '''
        self.U_n = self.U_k
        # self.hist.record_timestep()

    sim = tr.Property()
    '''Launch a simulator - currently only one simulator is allowed
    for a model. Mutiple might also make sense when different solvers
    are to be compared. The simulator pulls the time loop type
    from the model.
    '''

    @tr.cached_property
    def _get_sim(self):
        return Simulator(tstep=self)
コード例 #12
0
ファイル: sz_material_model.py プロジェクト: simvisage/bmcs
class MaterialModel(BMCSLeafNode, RInputRecord, Vis2D):

    node_name = 'material model'

    f_c = tr.Float(-80.0,
                   MAT=True,
                   unit=r'$\mathrm{MPa}$',
                   symbol=r'f_\mathrm{c}',
                   auto_set=False,
                   enter_set=True,
                   desc='concrete strength')
    E_c = tr.Float(28000,
                   MAT=True,
                   unit=r'$\mathrm{MPa}$',
                   symbol=r'E_\mathrm{c}',
                   auto_set=False,
                   enter_set=True,
                   desc='concrete material stiffness')

    f_t = tr.Float(3.0, MAT=True)
    G_f = tr.Float(0.5, MT=True)

    L_fps = tr.Float(50, MAT=True)
    L_c = tr.Property

    def _get_L_c(self):
        return self.E_c * self.G_f / self.f_t**2

    traits_view = ui.View(
        ui.Item('f_t'),
        ui.Item('f_c'),
        ui.Item('E_c'),
        ui.Item('G_f'),
        ui.Item('L'),
        ui.Item('L_c', style='readonly'),
    )

    tree_view = traits_view

    L = tr.Float(100, PARAM=True)
    co_law_data = tr.Property(depends_on='+MAT')

    @tr.cached_property
    def _get_co_law_data(self):
        return dict(f_t=float(self.f_t),
                    G_f=float(self.G_f),
                    f_c=self.f_c,
                    E_c=self.E_c,
                    L_c=self.L_c,
                    L=self.L)

    get_sig_eps = tr.Property(depends_on='+MAT')

    @tr.cached_property
    def _get_get_sig_eps(self):
        return sp.lambdify(eps, sig_eps.subs(self.co_law_data), 'numpy')

    get_d_sig_eps = tr.Property(depends_on='+MAT')

    @tr.cached_property
    def _get_get_d_sig_eps(self):
        return sp.lambdify(eps, d_sig_eps.subs(self.co_law_data), 'numpy')

    #=========================================================================
    # Sig w
    #=========================================================================
    get_sig_w = tr.Property(depends_on='+MAT')

    @tr.cached_property
    def _get_get_sig_w(self):
        return sp.lambdify(w, sig_w.subs(self.co_law_data), 'numpy')

    get_d_sig_w = tr.Property(depends_on='+MAT')

    @tr.cached_property
    def _get_get_d_sig_w(self):
        return sp.lambdify(w, d_sig_w.subs(self.co_law_data), 'numpy')

    #=========================================================================
    #
    #=========================================================================
    tau_1 = tr.Float(1.0, MAT=True)
    s_1 = tr.Float(0.000001, MAT=True)
    tau_2 = tr.Float(1.0, MAT=True)
    s_2 = tr.Float(0.02, MAT=True)
    tau_3 = tr.Float(0.0, MAT=True)
    s_3 = tr.Float(1.6, MAT=True)

    bond_law_data = tr.Property(depends_on='+MAT')

    @tr.cached_property
    def _get_bond_law_data(self):
        return dict(tau_1=self.tau_1,
                    s_1=self.s_1,
                    tau_2=self.tau_2,
                    s_2=self.s_2,
                    tau_3=self.tau_3,
                    s_3=self.s_3)

    get_tau_s_plus = tr.Property(depends_on='+MAT')

    @tr.cached_property
    def _get_get_tau_s_plus(self):
        return sp.lambdify(s, tau_s.subs(self.bond_law_data), 'numpy')

    get_d_tau_s_plus = tr.Property(depends_on='+MAT')

    @tr.cached_property
    def _get_get_d_tau_s_plus(self):
        return sp.lambdify(s, d_tau_s.subs(self.bond_law_data), 'numpy')

    def get_tau_s(self, s):
        signs = np.sign(s)
        return signs * self.get_tau_s_plus(signs * s)

    def get_d_tau_s(self, s):
        signs = np.sign(s)
        return signs * self.get_d_tau_s_plus(signs * s)

    #=========================================================================
    # Steel sig_eps
    #=========================================================================
    L_f = tr.Float(200.0, MAT=True)
    E_f = tr.Float(210000, MAT=True)
    f_s_t = tr.Float(500, MAT=True)

    steel_law_data = tr.Property(depends_on='+MAT')

    @tr.cached_property
    def _get_steel_law_data(self):
        return dict(L_f=float(self.L_f), E_f=float(self.E_f), f_s_t=self.f_s_t)

    get_sig_w_f = tr.Property(depends_on='+MAT')

    @tr.cached_property
    def _get_get_sig_w_f(self):
        return sp.lambdify(w, sig_w_f.subs(self.steel_law_data), 'numpy')

    get_d_sig_w_f = tr.Property(depends_on='+MAT')

    @tr.cached_property
    def _get_get_d_sig_eps_f(self):
        return sp.lambdify(w, d_sig_w_f.subs(self.steel_law_data), 'numpy')

    #=========================================================================
    # Plotting
    #=========================================================================

    def plot_sig_eps(self, ax1, ax2):
        eps_min = (f_c / E_c * 2).subs(self.co_law_data)
        eps_max = (f_t / E_c * 2).subs(self.co_law_data)
        eps_data = np.linspace(float(eps_min), float(eps_max), 100)
        ax1.plot(eps_data, self.get_sig_eps(eps_data), color='black')
        ax1.set_xlabel(r'$\varepsilon\;\;\mathrm{[-]}$')
        ax1.set_ylabel(r'$\sigma\;\;\mathrm{[MPa]}$')
        ax1.set_title('Concrete law')
        ax2.plot(eps_data, self.get_d_sig_eps(eps_data), color='black')
        ax2.set_xlabel(r'$s\;\;\mathrm{[mm]}$')
        ax2.set_ylabel(r'$\mathrm{d}\sigma/\mathrm{d}w\;\;\mathrm{[MPa/mm]}$')
        ax2.set_title('tangential stiffness')

    def plot_sig_w(self, ax, vot=1.0):

        w_min_expr = (f_c / E_c * L * 2).subs(self.co_law_data)

        w_max_expr = (sp.solve(f_w + f_w.diff(w) * w,
                               w)[0]).subs(self.co_law_data)
        w_max = np.float_(w_max_expr) * 5
        w_min = np.float_(w_min_expr) * 0.5
        w_data = np.linspace(w_min, w_max, 100)
        ax.plot(w_data, self.get_sig_w(w_data), lw=2, color='red')
        ax.fill_between(w_data, self.get_sig_w(w_data), color='red', alpha=0.2)
        ax.set_xlabel(r'$w\;\;\mathrm{[mm]}$')
        ax.set_ylabel(r'$\sigma\;\;\mathrm{[MPa]}$')
        ax.set_title('crack opening law')

    def plot_sig_w_f(self, ax, vot=1.0):

        w_max_expr = (f_s_t / E_f * L_f * 2).subs(self.steel_law_data)
        w_min_expr = 0
        w_max = np.float_(w_max_expr)
        w_min = np.float_(w_min_expr)
        w_data = np.linspace(w_min, w_max, 50)
        ax.plot(w_data, self.get_sig_w_f(w_data), lw=2, color='darkred')
        ax.fill_between(w_data,
                        self.get_sig_w_f(w_data),
                        color='darkred',
                        alpha=0.2)
        ax.set_xlabel(r'$w\;\;\mathrm{[mm]}$')
        ax.set_ylabel(r'$\sigma\;\;\mathrm{[MPa]}$')
        ax.set_title('crack opening law')

    def plot_d_sig_w(self, ax2, vot=1.0):
        w_min_expr = (f_c / E_c * L * 2).subs(self.co_law_data)

        w_max_expr = (sp.solve(f_w + f_w.diff(w) * w,
                               w)[0]).subs(self.co_law_data)
        w_max = np.float_(w_max_expr) * 10
        w_min = np.float_(w_min_expr) * 10
        w_data = np.linspace(w_min, w_max, 100)
        ax2.plot(w_data, self.get_d_sig_w(w_data), color='orange')
        ax2.set_xlabel(r'$s\;\;\mathrm{[mm]}$')
        ax2.set_ylabel(r'$\mathrm{d}\sigma/\mathrm{d}w\;\;\mathrm{[MPa/mm]}$')

    def plot_tau_s(self, ax, vot=1.0):
        s_max = float(s_3.subs(self.bond_law_data))
        s_data = np.linspace(-s_max, s_max, 100)
        ax.plot(s_data, self.get_tau_s(s_data), lw=2, color='blue')
        ax.fill_between(s_data,
                        self.get_tau_s(s_data),
                        color='blue',
                        alpha=0.2)
        ax.set_xlabel(r'$s\;\;\mathrm{[mm]}$')
        ax.set_ylabel(r'$\tau\;\;\mathrm{[MPa]}$')
        ax.set_title('crack interface law')

    def plot_d_tau_s(self, ax2, vot=1.0):
        s_max = float(s_3.subs(self.bond_law_data))
        s_data = np.linspace(-s_max, s_max, 100)
        ax2.plot(s_data, self.get_d_tau_s(s_data), color='orange')
コード例 #13
0
class TLoop(tr.HasTraits):

    tstep = tr.Instance(ITStep)
    sim = tr.DelegatesTo('tstep')
    tline = tr.Property

    def _get_tline(self):
        return self.sim.tline

    k_max = tr.Int(100, enter_set=True, auto_set=False)

    acc = tr.Float(1e-4, enter_set=True, auto_set=False)

    verbose = tr.Bool(False, enter_set=True, auto_set=False)

    paused = tr.Bool(False)

    restart = tr.Bool(True)

    user_wants_abort = tr.Property

    def _get_user_wants_abort(self):
        return self.restart or self.paused

    def init(self):
        if self.paused:
            self.paused = False
        if self.restart:
            self.tline.val = self.tline.min
            self.tstep.init_state()
            self.restart = False

    def eval(self):
        t_n1 = self.tline.val
        t_max = self.tline.max
        dt = self.tline.step

        if self.verbose:
            print('t:', end='')

        while t_n1 <= (t_max + 1e-8):
            if self.verbose:
                print('\t%5.2f' % t_n1, end='')
            k = 0
            self.tstep.t_n1 = t_n1
            while (k < self.k_max) and (not self.user_wants_abort):
                R_norm = self.tstep.R_norm
                if R_norm < self.acc:
                    if self.verbose:
                        print('(%g), ' % k, end='\n')
                    break
                try:
                    self.tstep.make_iter()
                except RuntimeError as e:
                    raise (e)
                k += 1
            else:  # handle unfinished iteration loop
                if k >= self.k_max:  # maximum number of restarts exceeded
                    # no success abort the simulation
                    self.restart = True
                    print('')
                    raise StopIteration(
                        'Warning: '
                        'convergence not reached in %g iterations' % k)
                else:  # reduce the step size
                    dt /= 2
                    continue

            # accept the time step and record the state in history
            self.tstep.make_incr(t_n1)
            # update the line - launches notifiers to subscribers
            self.tline.val = min(t_n1, self.tline.max)
            # set a new target time
            t_n1 += dt
            self.tstep.t_n1 = t_n1
        return
コード例 #14
0
class TStep(tr.HasStrictTraits):

    model = tr.Instance(IModel)
    sim = tr.DelegatesTo('model')

    t_n = tr.Float(0.0)
    t_n1 = tr.Float(0.0)

    def init_state(self):
        self.t_n = 0
        self.t_n1 = 0
        self.model.init_state()
        self.linalg_sys.register_constraints(0, 1)
        for var, state in self.model.S.items():
            state[...] = 0

    trial_state_changed = tr.Event

    R_dR_dU = tr.Property(depends_on='trial_state_changed, t_n1')

    @tr.cached_property
    def _get_R_dR_dU(self):
        R = self.R
        dR_dU = self.dR_dU
        return R, dR_dU

    R = tr.Property(depends_on='trial_state_changed, t_n1')

    @tr.cached_property
    def _get_R(self):
        bc = self.model.bc
        R = self.model.F - self.t_n1
        return R

    R_norm = tr.Property(depends_on='trial_state_changed, t_n1')

    @tr.cached_property
    def _get_R_norm(self):
        R = self.R
        return np.sqrt(np.einsum('...i,...i', R, R))

    dR_dU = tr.Property(depends_on='trial_state_changed')

    @tr.cached_property
    def _get_dR_dU(self):
        return self.model.d_F_U

    linalg_sys = tr.Instance(LinAlgSys, ())

    def make_iter(self):
        R, dR_dU = self.R_dR_dU
        self.linalg_sys.A = dR_dU
        self.linalg_sys.b_0 = R
        self.linalg_sys.apply_constraints(self.step_flag, self.t_n, self.t_n1)

        d_U = self.linalg_sys.solve()
        #d_U = -R / dR_dU
        self.model.U_k += d_U
        self.trial_state_changed = True

    def make_incr(self, t_n1):
        self.model.U_n[...] = self.model.U_k
        self.t_n1 = t_n1
コード例 #15
0
class HCFF(tr.HasStrictTraits):
    '''High-Cycle Fatigue Filter
    '''

    #=========================================================================
    # Traits definitions
    #=========================================================================
    decimal = tr.Enum(',', '.')
    delimiter = tr.Str(';')
    records_per_second = tr.Float(100)
    take_time_from_first_column = tr.Bool
    file_csv = tr.File
    open_file_csv = tr.Button('Input file')
    skip_first_rows = tr.Int(3, auto_set=False, enter_set=True)
    columns_headers_list = tr.List([])
    x_axis = tr.Enum(values='columns_headers_list')
    y_axis = tr.Enum(values='columns_headers_list')
    x_axis_multiplier = tr.Enum(1, -1)
    y_axis_multiplier = tr.Enum(-1, 1)
    npy_folder_path = tr.Str
    file_name = tr.Str
    apply_filters = tr.Bool
    normalize_cycles = tr.Bool
    smooth = tr.Bool
    plot_every_nth_point = tr.Range(low=1, high=1000000, mode='spinner')
    force_name = tr.Str('Kraft')
    old_peak_force_before_cycles = tr.Float
    peak_force_before_cycles = tr.Float
    window_length = tr.Int(31)
    polynomial_order = tr.Int(2)
    activate = tr.Bool(False)
    plots_num = tr.Enum(1, 2, 3, 4, 6, 9)
    plot_list = tr.List()
    add_plot = tr.Button
    add_creep_plot = tr.Button(desc='Creep plot of X axis array')
    clear_plot = tr.Button
    parse_csv_to_npy = tr.Button
    generate_filtered_and_creep_npy = tr.Button
    add_columns_average = tr.Button
    force_max = tr.Float(100)
    force_min = tr.Float(40)
    min_cycle_force_range = tr.Float(50)
    cutting_method = tr.Enum('Define min cycle range(force difference)',
                             'Define Max, Min')
    columns_to_be_averaged = tr.List

    figure = tr.Instance(Figure)

    def _figure_default(self):
        figure = Figure(facecolor='white')
        figure.set_tight_layout(True)
        return figure

    #=========================================================================
    # File management
    #=========================================================================

    def _open_file_csv_fired(self):

        self.reset()
        """ Handles the user clicking the 'Open...' button.
        """
        extns = [
            '*.csv',
        ]  # seems to handle only one extension...
        wildcard = '|'.join(extns)

        dialog = FileDialog(title='Select text file',
                            action='open',
                            wildcard=wildcard,
                            default_path=self.file_csv)

        result = dialog.open()
        """ Test if the user opened a file to avoid throwing an exception if he 
        doesn't """
        if result == OK:
            self.file_csv = dialog.path
        else:
            return
        """ Filling x_axis and y_axis with values """
        headers_array = np.array(
            pd.read_csv(self.file_csv,
                        delimiter=self.delimiter,
                        decimal=self.decimal,
                        nrows=1,
                        header=None))[0]
        for i in range(len(headers_array)):
            headers_array[i] = self.get_valid_file_name(headers_array[i])
        self.columns_headers_list = list(headers_array)
        """ Saving file name and path and creating NPY folder """
        dir_path = os.path.dirname(self.file_csv)
        self.npy_folder_path = os.path.join(dir_path, 'NPY')
        if os.path.exists(self.npy_folder_path) == False:
            os.makedirs(self.npy_folder_path)

        self.file_name = os.path.splitext(os.path.basename(self.file_csv))[0]

    def _parse_csv_to_npy_fired(self):
        print('Parsing csv into npy files...')

        for i in range(
                len(self.columns_headers_list) -
                len(self.columns_to_be_averaged)):
            column_array = np.array(
                pd.read_csv(self.file_csv,
                            delimiter=self.delimiter,
                            decimal=self.decimal,
                            skiprows=self.skip_first_rows,
                            usecols=[i]))
            """ TODO! Create time array supposing it's column is the
            first one in the file and that we have 100 reads in 1 second """
            if i == 0 and self.take_time_from_first_column == False:
                column_array = np.arange(start=0.0,
                                         stop=len(column_array) /
                                         self.records_per_second,
                                         step=1.0 / self.records_per_second)

            np.save(
                os.path.join(
                    self.npy_folder_path, self.file_name + '_' +
                    self.columns_headers_list[i] + '.npy'), column_array)
        """ Exporting npy arrays of averaged columns """
        for columns_names in self.columns_to_be_averaged:
            temp = np.zeros((1))
            for column_name in columns_names:
                temp = temp + np.load(
                    os.path.join(self.npy_folder_path, self.file_name + '_' +
                                 column_name + '.npy')).flatten()
            avg = temp / len(columns_names)

            avg_file_suffex = self.get_suffex_for_columns_to_be_averaged(
                columns_names)
            np.save(
                os.path.join(self.npy_folder_path,
                             self.file_name + '_' + avg_file_suffex + '.npy'),
                avg)

        print('Finsihed parsing csv into npy files.')

    def get_suffex_for_columns_to_be_averaged(self, columns_names):
        suffex_for_saved_file_name = 'avg_' + '_'.join(columns_names)
        return suffex_for_saved_file_name

    def get_valid_file_name(self, original_file_name):
        valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
        new_valid_file_name = ''.join(c for c in original_file_name
                                      if c in valid_chars)
        return new_valid_file_name

    def _clear_plot_fired(self):
        self.figure.clear()
        self.plot_list = []
        self.data_changed = True

    def _add_columns_average_fired(self):
        columns_average = ColumnsAverage()
        for name in self.columns_headers_list:
            columns_average.columns.append(Column(column_name=name))

        # kind='modal' pauses the implementation until the window is closed
        columns_average.configure_traits(kind='modal')

        columns_to_be_averaged_temp = []
        for i in columns_average.columns:
            if i.selected:
                columns_to_be_averaged_temp.append(i.column_name)

        if columns_to_be_averaged_temp:  # If it's not empty
            self.columns_to_be_averaged.append(columns_to_be_averaged_temp)

            avg_file_suffex = self.get_suffex_for_columns_to_be_averaged(
                columns_to_be_averaged_temp)
            self.columns_headers_list.append(avg_file_suffex)

    def _generate_filtered_and_creep_npy_fired(self):

        if self.npy_files_exist(
                os.path.join(self.npy_folder_path, self.file_name + '_' +
                             self.force_name + '.npy')) == False:
            return

        # 1- Export filtered force
        force = np.load(
            os.path.join(self.npy_folder_path, self.file_name + '_' +
                         self.force_name + '.npy')).flatten()
        peak_force_before_cycles_index = np.where(
            abs((force)) > abs(self.peak_force_before_cycles))[0][0]
        force_ascending = force[0:peak_force_before_cycles_index]
        force_rest = force[peak_force_before_cycles_index:]

        force_max_indices, force_min_indices = self.get_array_max_and_min_indices(
            force_rest)

        force_max_min_indices = np.concatenate(
            (force_min_indices, force_max_indices))
        force_max_min_indices.sort()

        force_rest_filtered = force_rest[force_max_min_indices]
        force_filtered = np.concatenate((force_ascending, force_rest_filtered))
        np.save(
            os.path.join(
                self.npy_folder_path,
                self.file_name + '_' + self.force_name + '_filtered.npy'),
            force_filtered)

        # 2- Export filtered displacements
        # TODO I skipped time presuming it's the first column
        for i in range(1, len(self.columns_headers_list)):
            if self.columns_headers_list[i] != str(self.force_name):

                disp = np.load(
                    os.path.join(
                        self.npy_folder_path, self.file_name + '_' +
                        self.columns_headers_list[i] + '.npy')).flatten()
                disp_ascending = disp[0:peak_force_before_cycles_index]
                disp_rest = disp[peak_force_before_cycles_index:]

                if self.activate == True:
                    disp_ascending = savgol_filter(
                        disp_ascending,
                        window_length=self.window_length,
                        polyorder=self.polynomial_order)

                disp_rest_filtered = disp_rest[force_max_min_indices]
                filtered_disp = np.concatenate(
                    (disp_ascending, disp_rest_filtered))
                np.save(
                    os.path.join(
                        self.npy_folder_path, self.file_name + '_' +
                        self.columns_headers_list[i] + '_filtered.npy'),
                    filtered_disp)

        # 3- Export creep for displacements
        # Cutting unwanted max min values to get correct full cycles and remove
        # false min/max values caused by noise
        if self.cutting_method == "Define Max, Min":
            force_max_indices_cutted, force_min_indices_cutted = \
                self.cut_indices_of_min_max_range(force_rest,
                                                  force_max_indices,
                                                  force_min_indices,
                                                  self.force_max,
                                                  self.force_min)
        elif self.cutting_method == "Define min cycle range(force difference)":
            force_max_indices_cutted, force_min_indices_cutted = \
                self.cut_indices_of_defined_range(force_rest,
                                                  force_max_indices,
                                                  force_min_indices,
                                                  self.min_cycle_force_range)

        print("Cycles number= ", len(force_min_indices))
        print("Cycles number after cutting fake cycles because of noise= ",
              len(force_min_indices_cutted))

        # TODO I skipped time with presuming it's the first column
        for i in range(1, len(self.columns_headers_list)):
            array = np.load(
                os.path.join(
                    self.npy_folder_path, self.file_name + '_' +
                    self.columns_headers_list[i] + '.npy')).flatten()
            array_rest = array[peak_force_before_cycles_index:]
            array_rest_maxima = array_rest[force_max_indices_cutted]
            array_rest_minima = array_rest[force_min_indices_cutted]
            np.save(
                os.path.join(
                    self.npy_folder_path, self.file_name + '_' +
                    self.columns_headers_list[i] + '_max.npy'),
                array_rest_maxima)
            np.save(
                os.path.join(
                    self.npy_folder_path, self.file_name + '_' +
                    self.columns_headers_list[i] + '_min.npy'),
                array_rest_minima)

        print('Filtered and creep npy files are generated.')

    def cut_indices_of_min_max_range(self, array, max_indices, min_indices,
                                     range_upper_value, range_lower_value):
        cutted_max_indices = []
        cutted_min_indices = []

        for max_index in max_indices:
            if abs(array[max_index]) > abs(range_upper_value):
                cutted_max_indices.append(max_index)
        for min_index in min_indices:
            if abs(array[min_index]) < abs(range_lower_value):
                cutted_min_indices.append(min_index)
        return cutted_max_indices, cutted_min_indices

    def cut_indices_of_defined_range(self, array, max_indices, min_indices,
                                     range_):
        cutted_max_indices = []
        cutted_min_indices = []

        for max_index, min_index in zip(max_indices, min_indices):
            if abs(array[max_index] - array[min_index]) > range_:
                cutted_max_indices.append(max_index)
                cutted_min_indices.append(min_index)

        return cutted_max_indices, cutted_min_indices

    def get_array_max_and_min_indices(self, input_array):

        # Checking dominant sign
        positive_values_count = np.sum(np.array(input_array) >= 0)
        negative_values_count = input_array.size - positive_values_count

        # Getting max and min indices
        if (positive_values_count > negative_values_count):
            force_max_indices = argrelextrema(input_array, np.greater_equal)[0]
            force_min_indices = argrelextrema(input_array, np.less_equal)[0]
        else:
            force_max_indices = argrelextrema(input_array, np.less_equal)[0]
            force_min_indices = argrelextrema(input_array, np.greater_equal)[0]

        # Remove subsequent max/min indices (np.greater_equal will give 1,2 for
        # [4, 8, 8, 1])
        force_max_indices = self.remove_subsequent_max_values(
            force_max_indices)
        force_min_indices = self.remove_subsequent_min_values(
            force_min_indices)

        # If size is not equal remove the last element from the big one
        if force_max_indices.size > force_min_indices.size:
            force_max_indices = force_max_indices[:-1]
        elif force_max_indices.size < force_min_indices.size:
            force_min_indices = force_min_indices[:-1]

        return force_max_indices, force_min_indices

    def remove_subsequent_max_values(self, force_max_indices):
        to_delete_from_maxima = []
        for i in range(force_max_indices.size - 1):
            if force_max_indices[i + 1] - force_max_indices[i] == 1:
                to_delete_from_maxima.append(i)

        force_max_indices = np.delete(force_max_indices, to_delete_from_maxima)
        return force_max_indices

    def remove_subsequent_min_values(self, force_min_indices):
        to_delete_from_minima = []
        for i in range(force_min_indices.size - 1):
            if force_min_indices[i + 1] - force_min_indices[i] == 1:
                to_delete_from_minima.append(i)
        force_min_indices = np.delete(force_min_indices, to_delete_from_minima)
        return force_min_indices

    def _activate_changed(self):
        if self.activate == False:
            self.old_peak_force_before_cycles = self.peak_force_before_cycles
            self.peak_force_before_cycles = 0
        else:
            self.peak_force_before_cycles = self.old_peak_force_before_cycles

    def _window_length_changed(self, new):

        if new <= self.polynomial_order:
            dialog = MessageDialog(
                title='Attention!',
                message='Window length must be bigger than polynomial order.')
            dialog.open()

        if new % 2 == 0 or new <= 0:
            dialog = MessageDialog(
                title='Attention!',
                message='Window length must be odd positive integer.')
            dialog.open()

    def _polynomial_order_changed(self, new):

        if new >= self.window_length:
            dialog = MessageDialog(
                title='Attention!',
                message='Polynomial order must be less than window length.')
            dialog.open()

    #=========================================================================
    # Plotting
    #=========================================================================

    plot_list_current_elements_num = tr.Int(0)

    def npy_files_exist(self, path):
        if os.path.exists(path) == True:
            return True
        else:
            dialog = MessageDialog(
                title='Attention!',
                message='Please parse csv file to generate npy files first.'.
                format(self.plots_num))
            dialog.open()
            return False

    def filtered_and_creep_npy_files_exist(self, path):
        if os.path.exists(path) == True:
            return True
        else:
            dialog = MessageDialog(
                title='Attention!',
                message='Please generate filtered and creep npy files first.'.
                format(self.plots_num))
            dialog.open()
            return False

    def max_plots_number_is_reached(self):
        if len(self.plot_list) >= self.plots_num:
            dialog = MessageDialog(title='Attention!',
                                   message='Max plots number is {}'.format(
                                       self.plots_num))
            dialog.open()
            return True
        else:
            return False

    def _plot_list_changed(self):
        if len(self.plot_list) > self.plot_list_current_elements_num:
            self.plot_list_current_elements_num = len(self.plot_list)

    data_changed = tr.Event

    def _add_plot_fired(self):

        if self.max_plots_number_is_reached() == True:
            return

        if self.apply_filters:

            if self.filtered_and_creep_npy_files_exist(
                    os.path.join(
                        self.npy_folder_path, self.file_name + '_' +
                        self.x_axis + '_filtered.npy')) == False:
                return

            x_axis_name = self.x_axis + '_filtered'
            y_axis_name = self.y_axis + '_filtered'

            print('Loading npy files...')

            x_axis_array = self.x_axis_multiplier * \
                np.load(os.path.join(self.npy_folder_path,
                                     self.file_name + '_' + self.x_axis
                                     + '_filtered.npy'))
            y_axis_array = self.y_axis_multiplier * \
                np.load(os.path.join(self.npy_folder_path,
                                     self.file_name + '_' + self.y_axis
                                     + '_filtered.npy'))
        else:

            if self.npy_files_exist(
                    os.path.join(self.npy_folder_path, self.file_name + '_' +
                                 self.x_axis + '.npy')) == False:
                return

            x_axis_name = self.x_axis
            y_axis_name = self.y_axis

            print('Loading npy files...')

            x_axis_array = self.x_axis_multiplier * \
                np.load(os.path.join(self.npy_folder_path,
                                     self.file_name + '_' + self.x_axis
                                     + '.npy'))
            y_axis_array = self.y_axis_multiplier * \
                np.load(os.path.join(self.npy_folder_path,
                                     self.file_name + '_' + self.y_axis
                                     + '.npy'))

        print('Adding Plot...')
        mpl.rcParams['agg.path.chunksize'] = 50000

        ax = self.apply_new_subplot()

        ax.set_xlabel(x_axis_name)
        ax.set_ylabel(y_axis_name)
        ax.plot(x_axis_array,
                y_axis_array,
                'k',
                linewidth=1.2,
                color=np.random.rand(3, ),
                label=self.file_name + ', ' + x_axis_name)

        ax.legend()

        self.plot_list.append('{}, {}'.format(x_axis_name, y_axis_name))
        self.data_changed = True
        print('Finished adding plot!')

    def apply_new_subplot(self):
        plt = self.figure
        if (self.plots_num == 1):
            return plt.add_subplot(1, 1, 1)
        elif (self.plots_num == 2):
            plot_location = int('12' + str(len(self.plot_list) + 1))
            return plt.add_subplot(plot_location)
        elif (self.plots_num == 3):
            plot_location = int('13' + str(len(self.plot_list) + 1))
            return plt.add_subplot(plot_location)
        elif (self.plots_num == 4):
            plot_location = int('22' + str(len(self.plot_list) + 1))
            return plt.add_subplot(plot_location)
        elif (self.plots_num == 6):
            plot_location = int('23' + str(len(self.plot_list) + 1))
            return plt.add_subplot(plot_location)
        elif (self.plots_num == 9):
            plot_location = int('33' + str(len(self.plot_list) + 1))
            return plt.add_subplot(plot_location)

    def _add_creep_plot_fired(self):

        if self.filtered_and_creep_npy_files_exist(
                os.path.join(self.npy_folder_path, self.file_name + '_' +
                             self.x_axis + '_max.npy')) == False:
            return

        if self.max_plots_number_is_reached() == True:
            return

        disp_max = self.x_axis_multiplier * \
            np.load(os.path.join(self.npy_folder_path,
                                 self.file_name + '_' + self.x_axis + '_max.npy'))
        disp_min = self.x_axis_multiplier * \
            np.load(os.path.join(self.npy_folder_path,
                                 self.file_name + '_' + self.x_axis + '_min.npy'))
        complete_cycles_number = disp_max.size

        print('Adding creep-fatigue plot...')
        mpl.rcParams['agg.path.chunksize'] = 50000

        ax = self.apply_new_subplot()

        ax.set_xlabel('Cycles number')
        ax.set_ylabel(self.x_axis)

        if self.plot_every_nth_point > 1:
            disp_max = disp_max[0::self.plot_every_nth_point]
            disp_min = disp_min[0::self.plot_every_nth_point]

        if self.smooth:
            # Keeping the first item of the array and filtering the rest
            disp_max = np.concatenate(
                (np.array([disp_max[0]]),
                 savgol_filter(disp_max[1:],
                               window_length=self.window_length,
                               polyorder=self.polynomial_order)))
            disp_min = np.concatenate(
                (np.array([disp_min[0]]),
                 savgol_filter(disp_min[1:],
                               window_length=self.window_length,
                               polyorder=self.polynomial_order)))

        if self.normalize_cycles:
            ax.plot(np.linspace(0, 1., disp_max.size),
                    disp_max,
                    'k',
                    linewidth=1.2,
                    color='red',
                    label='Max' + ', ' + self.file_name + ', ' + self.x_axis)
            ax.plot(np.linspace(0, 1., disp_max.size),
                    disp_min,
                    'k',
                    linewidth=1.2,
                    color='green',
                    label='Min' + ', ' + self.file_name + ', ' + self.x_axis)
        else:
            ax.plot(np.linspace(0, complete_cycles_number, disp_max.size),
                    disp_max,
                    'k',
                    linewidth=1.2,
                    color='red',
                    label='Max' + ', ' + self.file_name + ', ' + self.x_axis)
            ax.plot(np.linspace(0, complete_cycles_number, disp_max.size),
                    disp_min,
                    'k',
                    linewidth=1.2,
                    color='green',
                    label='Min' + ', ' + self.file_name + ', ' + self.x_axis)

        ax.legend()

        self.plot_list.append('Creep-fatigue: {}, {}'.format(
            self.x_axis, self.y_axis))
        self.data_changed = True

        print('Finished adding creep-fatigue plot!')

    def reset(self):
        self.delimiter = ';'
        self.skip_first_rows = 3
        self.columns_headers_list = []
        self.npy_folder_path = ''
        self.file_name = ''
        self.apply_filters = False
        self.force_name = 'Kraft'
        self.plot_list = []
        self.columns_to_be_averaged = []

    #=========================================================================
    # Configuration of the view
    #=========================================================================

    traits_view = ui.View(ui.HSplit(
        ui.VSplit(
            ui.HGroup(ui.UItem('open_file_csv'),
                      ui.UItem('file_csv', style='readonly', width=0.1),
                      label='Input data'),
            ui.Item('add_columns_average', show_label=False),
            ui.VGroup(
                ui.VGroup(ui.Item(
                    'records_per_second',
                    enabled_when='take_time_from_first_column == False'),
                          ui.Item('take_time_from_first_column'),
                          label='Time calculation',
                          show_border=True),
                ui.VGroup(ui.Item('skip_first_rows'),
                          ui.Item('decimal'),
                          ui.Item('delimiter'),
                          ui.Item('parse_csv_to_npy', show_label=False),
                          label='Processing csv file',
                          show_border=True),
                ui.VGroup(ui.HGroup(ui.Item('plots_num'),
                                    ui.Item('clear_plot')),
                          ui.HGroup(ui.Item('x_axis'),
                                    ui.Item('x_axis_multiplier')),
                          ui.HGroup(ui.Item('y_axis'),
                                    ui.Item('y_axis_multiplier')),
                          ui.VGroup(ui.HGroup(
                              ui.Item('add_plot', show_label=False),
                              ui.Item('apply_filters')),
                                    show_border=True,
                                    label='Plotting X axis with Y axis'),
                          ui.VGroup(ui.HGroup(
                              ui.Item('add_creep_plot', show_label=False),
                              ui.VGroup(ui.Item('normalize_cycles'),
                                        ui.Item('smooth'),
                                        ui.Item('plot_every_nth_point'))),
                                    show_border=True,
                                    label='Plotting Creep-fatigue of x-axis'),
                          ui.Item('plot_list'),
                          show_border=True,
                          label='Plotting'))),
        ui.VGroup(
            ui.Item('force_name'),
            ui.VGroup(ui.VGroup(
                ui.Item('window_length'),
                ui.Item('polynomial_order'),
                enabled_when='activate == True or smooth == True'),
                      show_border=True,
                      label='Smoothing parameters (Savitzky-Golay filter):'),
            ui.VGroup(ui.VGroup(
                ui.Item('activate'),
                ui.Item('peak_force_before_cycles',
                        enabled_when='activate == True')),
                      show_border=True,
                      label='Smooth ascending branch for all displacements:'),
            ui.VGroup(
                ui.Item('cutting_method'),
                ui.VGroup(ui.Item('force_max'),
                          ui.Item('force_min'),
                          label='Max, Min:',
                          show_border=True,
                          enabled_when='cutting_method == "Define Max, Min"'),
                ui.VGroup(
                    ui.Item('min_cycle_force_range'),
                    label='Min cycle force range:',
                    show_border=True,
                    enabled_when=
                    'cutting_method == "Define min cycle range(force difference)"'
                ),
                show_border=True,
                label='Cut fake cycles for creep:'),
            ui.Item('generate_filtered_and_creep_npy', show_label=False),
            show_border=True,
            label='Filters'),
        ui.UItem('figure',
                 editor=MPLFigureEditor(),
                 resizable=True,
                 springy=True,
                 width=0.8,
                 label='2d plots')),
                          title='HCFF Filter',
                          resizable=True,
                          width=0.85,
                          height=0.7)
コード例 #16
0
class CameraImage(traits.HasTraits):

    #Traits view definitions:
    traits_view = traitsui.View(traitsui.Group(
        traitsui.HGroup(traitsui.Item('pixelsX', label="Pixels X"),
                        traitsui.Item('pixelsY', label="Pixels Y"))),
                                buttons=["OK", "Cancel"])

    pixelsX = traits.CInt(768)
    pixelsY = traits.CInt(512)

    xs = traits.Array
    ys = traits.Array
    zs = traits.Array

    minZ = traits.Float
    maxZ = traits.Float

    scale = traits.Float(10089.33)
    offset = traits.Float(5000.)

    ODCorrectionBool = traits.Bool(
        False,
        desc=
        "if true will correct the image to account for the maximum OD parameter"
    )
    ODSaturationValue = traits.Float(
        3.0, desc="the value of the saturated optical density")

    model_changed = traits.Event

    fitList = traits.List(fits.Fit)  #list of possible fits

    def __init__(self, *args, **kwargs):
        super(CameraImage, self).__init__(*args, **kwargs)
        self.fitList = [
            fits.GaussianFit(endX=self.pixelsX, endY=self.pixelsY),
            fits.RotatedGaussianFit(endX=self.pixelsX, endY=self.pixelsY),
            fits.ParabolaFit(endX=self.pixelsX, endY=self.pixelsY),
            fits.GaussianAndParabolaFit(endX=self.pixelsX, endY=self.pixelsY),
            fits.FermiGasFit(endX=self.pixelsX, endY=self.pixelsY),
            fits.ClippedExponentialIntegral(endX=self.pixelsX,
                                            endY=self.pixelsY)
        ]

    def _xs_default(self):
        return scipy.linspace(0.0, self.pixelsX - 1, self.pixelsX)

    def _ys_default(self):
        return scipy.linspace(0.0, self.pixelsY - 1, self.pixelsY)

    def _zs_default(self):
        return scipy.zeros((self.pixelsY, self.pixelsX))

    def getImageData(self, imageFile):
        logger.debug("pulling image data")
        # The xs and ys used for the image plot range need to be the
        # edges of the cells.
        self.imageFile = imageFile
        self.xs = scipy.linspace(0.0, self.pixelsX - 1, self.pixelsX)
        self.ys = scipy.linspace(0.0, self.pixelsY - 1, self.pixelsY)
        if not os.path.exists(imageFile):
            #if no file is define the image is flat 0s of camera size
            logger.error("image file not found. filling with zeros")
            self.zs = scipy.zeros((self.pixelsX, self.pixelsY))
            print self.zs
            self.minZ = 0.0
            self.maxZ = 1.0
            self.model_changed = True
        else:
            try:
                self.rawImage = scipy.misc.imread(imageFile)
                self.zs = (self.rawImage - self.offset) / self.scale
                if self.ODCorrectionBool:
                    logger.info("Correcting for OD saturation")
                    self.zs = scipy.log(
                        (1.0 - scipy.exp(-self.ODSaturationValue)) /
                        (scipy.exp(-self.zs) -
                         scipy.exp(-self.ODSaturationValue)))
                    #we should account for the fact if ODSaturation value is wrong or there is noise we can get complex numbers!
                    self.zs[scipy.imag(self.zs) > 0] = scipy.nan
                    self.zs = self.zs.astype(float)
                self.minZ = scipy.nanmin(self.zs)
                self.maxZ = scipy.nanmax(self.zs)
                self.model_changed = True
                for fit in self.fitList:
                    fit.xs = self.xs
                    fit.ys = self.ys
                    fit.zs = self.zs

            except Exception as e:
                logger.error("error in setting data %s" % e.message)
                logger.debug(
                    "Sometimes we get an error  unsupported operand type(s) for -: 'instance' and 'float'. "
                )
                logger.debug(
                    "checking for what could cause this . Tell Tim if you see this error message!!!!"
                )
                logger.debug("type(self.rawImage) ->  %s" %
                             type(self.rawImage))
                logger.debug("type(self.offset) ->  %s" % type(self.offset))

    def _scale_changed(self):
        """update zs data when scale or offset changed """
        logger.info("model scale changed")
        self.getImageData(self.imageFile)

    def _offset_changed(self):
        """update zs data when scale or offset changed """
        self.getImageData(self.imageFile)
コード例 #17
0
class MATS2DMplDamageEEQ(MATS2DEval):

    #-------------------------------------------------------------------------
    # Material parameters
    #-------------------------------------------------------------------------
    E = tr.Float(34e+3,
                 label="E",
                 desc="Young's Modulus",
                 auto_set=False,
                 input=True)

    nu = tr.Float(0.2,
                  label='nu',
                  desc="Poison ratio",
                  auto_set=False,
                  input=True)

    epsilon_0 = Float(59e-6,
                      label="a",
                      desc="Lateral pressure coefficient",
                      enter_set=True,
                      auto_set=False)

    epsilon_f = Float(250e-6,
                      label="a",
                      desc="Lateral pressure coefficient",
                      enter_set=True,
                      auto_set=False)

    c_T = Float(0.00,
                label="a",
                desc="Lateral pressure coefficient",
                enter_set=True,
                auto_set=False)

    state_var_shapes = tr.Property(tr.Dict(), depends_on='n_mp')
    '''Dictionary of state variable entries with their array shapes.
    '''
    @cached_property
    def _get_state_var_shapes(self):
        return dict(kappa=(self.n_mp, ), omega=(self.n_mp, ))

    def _get_lame_params(self):
        la = self.E * self.nu / ((1. + self.nu) * (1. - 2. * self.nu))
        # second Lame parameter (shear modulus)
        mu = self.E / (2. + 2. * self.nu)
        return la, mu

    D_abef = tr.Property(tr.Array, depends_on='+input')

    @tr.cached_property
    def _get_D_abef(self):
        la = self._get_lame_params()[0]
        mu = self._get_lame_params()[1]
        delta = np.identity(2)
        D_abef = (np.einsum(',ij,kl->ijkl', la, delta, delta) +
                  np.einsum(',ik,jl->ijkl', mu, delta, delta) +
                  np.einsum(',il,jk->ijkl', mu, delta, delta))

        return D_abef

    #-------------------------------------------------------------------------
    # MICROPLANE-Kinematic constraints
    #-------------------------------------------------------------------------

    # get the dyadic product of the microplane normals
    _MPNN = Property(depends_on='n_mp')

    @cached_property
    def _get__MPNN(self):
        # dyadic product of the microplane normals

        MPNN_nij = einsum('ni,nj->nij', self._MPN, self._MPN)
        return MPNN_nij

    # get the third order tangential tensor (operator) for each microplane
    _MPTT = Property(depends_on='n_mp')

    @cached_property
    def _get__MPTT(self):
        # Third order tangential tensor for each microplane
        delta = identity(2)
        MPTT_nijr = 0.5 * (
            einsum('ni,jr -> nijr', self._MPN, delta) +
            einsum('nj,ir -> njir', self._MPN, delta) -
            2.0 * einsum('ni,nj,nr -> nijr', self._MPN, self._MPN, self._MPN))
        return MPTT_nijr

    def _get_e_na(self, eps_ab):
        r'''
        Projection of apparent strain onto the individual microplanes
        '''
        e_ni = einsum('nb,...ba->...na', self._MPN, eps_ab)
        return e_ni

    def _get_e_N_n(self, e_na):
        r'''
        Get the normal strain array for each microplane
        '''
        e_N_n = einsum('...na, na->...n', e_na, self._MPN)
        return e_N_n

    def _get_e_equiv_n(self, e_na):
        r'''
        Returns a list of the microplane equivalent strains
        based on the list of microplane strain vectors
        '''
        # magnitude of the normal strain vector for each microplane
        e_N_n = self._get_e_N_n(e_na)
        # positive part of the normal strain magnitude for each microplane
        e_N_pos_n = (np.abs(e_N_n) + e_N_n) / 2.0
        # normal strain vector for each microplane
        e_N_na = einsum('...n,ni -> ...ni', e_N_n, self._MPN)
        # tangent strain ratio
        c_T = self.c_T
        # tangential strain vector for each microplane
        e_T_na = e_na - e_N_na
        # squared tangential strain vector for each microplane
        e_TT_n = einsum('...ni,...ni -> ...n', e_T_na, e_T_na)
        # equivalent strain for each microplane
        e_equiv_n = sqrt(e_N_pos_n * e_N_pos_n + c_T * e_TT_n)
        return e_equiv_n

    def update_state_variables(self, eps_ab, kappa_n, omega_n):
        e_na = self._get_e_na(eps_ab)
        eps_eq_n = self._get_e_equiv_n(e_na)
        f_trial_n = eps_eq_n - self.epsilon_0
        I = np.where(f_trial_n > 0)
        k_n = np.max(np.array([kappa_n[I], eps_eq_n[I]]), axis=0)
        kappa_n[I] = k_n
        omega_n[I] = self._get_omega(k_n)

    def _get_omega(self, kappa_Emn):
        '''
        Return new value of damage parameter
        @par bbam kappa:
        '''
        omega_Emn = np.zeros_like(kappa_Emn)
        epsilon_0 = self.epsilon_0
        epsilon_f = self.epsilon_f
        kappa_idx = np.where(kappa_Emn >= epsilon_0)
        omega_Emn[kappa_idx] = (1.0 -
                                (epsilon_0 / kappa_Emn[kappa_idx] *
                                 np.exp(-1.0 *
                                        (kappa_Emn[kappa_idx] - epsilon_0) /
                                        (epsilon_f - epsilon_0))))
        return omega_Emn

    def _get_phi_Emab(self, kappa_Emn):
        # Returns the 2nd order damage tensor 'phi_mtx'
        # scalar integrity factor for each microplane
        phi_Emn = np.sqrt(1.0 - self._get_omega(kappa_Emn))
        # integration terms for each microplanes
        phi_Emab = einsum('...n,n,nab->...ab', phi_Emn, self._MPW, self._MPNN)
        return phi_Emab

    def _get_beta_Emabcd(self, phi_Emab):
        '''
        Returns the 4th order damage tensor 'beta4' using sum-type symmetrization
        (cf. [Jir99], Eq.(21))
        '''
        delta = identity(2)
        beta_Emijkl = 0.25 * (einsum('...ik,jl->...ijkl', phi_Emab, delta) +
                              einsum('...il,jk->...ijkl', phi_Emab, delta) +
                              einsum('...jk,il->...ijkl', phi_Emab, delta) +
                              einsum('...jl,ik->...ijkl', phi_Emab, delta))

        return beta_Emijkl

    #-------------------------------------------------------------------------
    # Evaluation - get the corrector and predictor
    #-------------------------------------------------------------------------

    def get_corr_pred(self, eps_ab, tn1, kappa, omega):

        self.update_state_variables(eps_ab, kappa, omega)

        #----------------------------------------------------------------------
        # if the regularization using the crack-band concept is on calculate the
        # effective element length in the direction of principle strains
        #----------------------------------------------------------------------
        # if self.regularization:
        #    h = self.get_regularizing_length(sctx, eps_app_eng)
        #    self.phi_fn.h = h

        #------------------------------------------------------------------
        # Damage tensor (2th order):
        #------------------------------------------------------------------
        phi_ab = self._get_phi_Emab(kappa)
        #------------------------------------------------------------------
        # Damage tensor (4th order) using product- or sum-type symmetrization:
        #------------------------------------------------------------------
        beta_abcd = self._get_beta_Emabcd(phi_ab)
        #------------------------------------------------------------------
        # Damaged stiffness tensor calculated based on the damage tensor beta4:
        #------------------------------------------------------------------
        D_ijab = einsum('...ijab, abef, ...cdef -> ...ijcd', beta_abcd,
                        self.D_abef, beta_abcd)

        sig_ab = einsum('...abef,...ef -> ...ab', D_ijab, eps_ab)

        return sig_ab, D_ijab

    '''Number of microplanes - currently fixed for 3D
    '''
    n_mp = Constant(22)

    _alpha_list = Property(depends_on='n_mp')

    @cached_property
    def _get__alpha_list(self):
        return array(
            [np.pi / self.n_mp * (i - 0.5) for i in range(1, self.n_mp + 1)])

    #-----------------------------------------------
    # get the normal vectors of the microplanes
    #-----------------------------------------------

    _MPN = Property(depends_on='n_mp')

    @cached_property
    def _get__MPN(self):
        # microplane normals:

        alpha_list = np.linspace(0, 2 * np.pi, self.n_mp)

        MPN = np.array([[np.cos(alpha), np.sin(alpha)]
                        for alpha in alpha_list])

        return MPN

    #-------------------------------------
    # get the weights of the microplanes
    #-------------------------------------
    _MPW = Property(depends_on='n_mp')

    @cached_property
    def _get__MPW(self):
        # Note that the values in the array must be multiplied by 6 (cf. [Baz05])!
        # The sum of of the array equals 0.5. (cf. [BazLuz04]))
        # The values are given for an Gaussian integration over the unit
        # hemisphere.
        MPW = np.ones(self.n_mp) / self.n_mp * 2

        return MPW
コード例 #18
0
class SpectrumCalibration(SpanSelectorInSpectrum):
    left_value = t.Float(label='New left value')
    right_value = t.Float(label='New right value')
    offset = t.Float()
    scale = t.Float()
    units = t.Unicode()
    view = tu.View(
        tu.Group(
            'left_value',
            'right_value',
            tu.Item('ss_left_value',
                    label='Left',
                    style='readonly'),
            tu.Item('ss_right_value',
                    label='Right',
                    style='readonly'),
            tu.Item(name='offset',
                    style='readonly'),
            tu.Item(name='scale',
                    style='readonly'),
            'units',),
        handler=CalibrationHandler,
        buttons=[OKButton, OurApplyButton, CancelButton],
        kind='live',
        title='Calibration parameters')

    def __init__(self, signal):
        super(SpectrumCalibration, self).__init__(signal)
        if signal.axes_manager.signal_dimension != 1:
            raise SignalDimensionError(
                signal.axes_manager.signal_dimension, 1)
        self.units = self.axis.units
        self.last_calibration_stored = True

    def _left_value_changed(self, old, new):
        if self.span_selector is not None and \
                self.span_selector.range is None:
            messages.information(
                'Please select a range in the spectrum figure'
                'by dragging the mouse over it')
            return
        else:
            self._update_calibration()

    def _right_value_changed(self, old, new):
        if self.span_selector.range is None:
            messages.information(
                'Please select a range in the spectrum figure'
                'by dragging the mouse over it')
            return
        else:
            self._update_calibration()

    def _update_calibration(self, *args, **kwargs):
        if self.left_value == self.right_value:
            return
        lc = self.axis.value2index(self.ss_left_value)
        rc = self.axis.value2index(self.ss_right_value)
        self.offset, self.scale = self.axis.calibrate(
            (self.left_value, self.right_value), (lc, rc),
            modify_calibration=False)
コード例 #19
0
class HardwareAction(traits.HasTraits):
    """Parent class for all hardware actions. User must make a subclass of this for each
    hardware action and overwrite init, close and callback methods where necessary. Other
    functions can use the parent class implementation directly"""
    callbackTime = traits.Float()
    variables = traits.List()
    variablesReference = {
    }  # leave empty. This will be set to the experiment control variables before the call back is executed. This is all taken care of by the snake
    hardwareActionName = traits.Str()
    examineVariablesButton = traits.Button()
    enabled = traits.Bool(True)
    callbackTimeVariableDependent = False  # if true the calbackTime argument is a variable to be parsed in the snake
    callbackTimeString = None  # gets populated if callbackTimeInSequence is a string
    snakeReference = None  # reference to the snake object so that we can call update functions for e.g. examineVariablesDict pane

    def __init__(self, callbackTimeInSequence, **traitsDict):
        super(HardwareAction, self).__init__(**traitsDict)
        if type(callbackTimeInSequence) is float:
            self.callbackTime = callbackTimeInSequence  # time in the sequence when call back should be performed passed during constructions
        elif type(
                callbackTimeInSequence
        ) is str:  #here we check if callback time is a timing edge or a variable
            self.callbackTimeVariableDependent = True
            self.callbackTimeString = callbackTimeInSequence
            logger.info(
                "CallbackTime string detected attempting to parse string as timing edge or variable"
            )
        else:
            self.callbackTime = callbackTimeInSequence  # time in the sequence when call back should be performed passed during constructions
        self.awaitingCallback = True  # goes to False after it has been called back for the final time in a sequence (usually once)
        self.callbackCounter = 0  # number of times called back this sequence
        self.initialised = False  # set to true if init run. set to false if close run
        logger.info("HardwareAction Super class __init__ completed")

    def _variables_default(self):
        """uses the variable mappings dictionary defined in the subclass """
        return self.variableMappings.keys()

    def setVariablesDictionary(self, variables):
        """sets the variables reference to the latest variables dictionary. simply sets the variables reference attribute """
        self.variablesReference = variables

    def mapVariables(self):
        """returns a dictionary of python variable names used in the callback function
        with their correct values for this run. Raises an error if a variable is missing.
        Could potentially implement default values here"""
        logger.debug("variables in %s: %s" %
                     (self.hardwareActionName, self.variablesReference))
        try:
            return {
                self.variableMappings[key]: self.variablesReference[key]
                for key in self.variableMappings.iterkeys()
            }
        except KeyError as e:
            raise e  # defaults handling TODO

    def parseCallbackTime(self):
        """if callback Time is a string we comprehend it as a timing edge name or variable name"""
        if self.callbackTimeString in self.snakeReference.timingEdges:
            self.callbackTime = self.snakeReference.timingEdges[
                self.callbackTimeString]
        elif self.callbackTimeString in self.snakeReference.variables:
            self.callbackTime = self.snakeReference.variables[
                self.callbackTimeString]
        else:
            raise KeyError(
                "callbackTime %s was not found in either the timing edges or variables dictionary. Check Spelling? Could not initialise %s object"
                % (self.callbackTimeString, self.hardwareActionName))

    #####USER SHOULD OVERWRITE THE BELOW FUNCTIONS IN SUBCLASS AS REQUIRED
    def init(self):
        """only called once when the user presses the start button. This should perform
        any hardware specific initialisation. E.g opening sockets / decads connections. Return 
        string is printed to main log terminal"""
        self.initialised = True
        logger.warning(
            "Using default init as no init method has been defined in Hardware Action Subclass"
        )
        return "%s init successful" % self.hardwareActionName

    def close(self):
        """called to close the hardware down when user stops Snake or exits. Should
        safely close the hardware. It should be able to restart again when the 
        init function is called (e.g. user then presses start"""
        logger.warning(
            "Using default close as no close method has been defined in Hardware Action Subclass"
        )
        return "%s closed" % self.hardwareActionName

    def callback(self):
        """This is the function that is called every sequence at the callbackTime. 
        IT SHOULD NOT HANG as this is a blocking function call. You need to handle
        threading yourself if the callback function would take a long time to return.
        Return value should be a string to be printed in terminal"""
        logger.debug("beginning %s callback" % self.hardwareActionName)
        if not self.initialised:
            return "%s not initialised with init function. Cannot be called back until initialised. Doing nothing" % self.hardwareActionName
        try:  #YOUR CALLBACK CODE SHOULD GO IN THIS TRY BLOCK!
            self.finalVariables = self.mapVariables()
            raise NotImplementedError(
                "the callback function needs to be implemented in your subclass"
            )
            return "callback on %s completed" % (self.hardwareActionName)
        except KeyboardInterrupt:
            raise
        except KeyError as e:
            return "Failed to find variable %s in variables %s. Check variable is defined in experiment control " % (
                e.message, self.variablesReference.keys())
        except Exception as e:
            return "Failed to perform callback on %s. Error message %s" % (
                self.hardwareActionName, e.message)

    def _enabled_changed(self):
        """traitsui handler function (is automatically called when enabled changes during interaction with user interface """
        if self.enabled:
            self.snakeReference.mainLog.addLine(
                "%s was just enabled. Will perform its init method now" %
                self.hardwareActionName, 1)
            self.awaitingCallback = False  # by setting this to False we prevent the action being performed till the next sequence begins. This is usually desireable
            returnString = self.init()
            self.snakeReference.mainLog.addLine(returnString)
        elif not self.enabled:
            if self.snakeReference.isRunning:  #only print to log if it's disabled while snake is running
                self.snakeReference.mainLog.addLine(
                    "%s was just disabled. Will perform its close method now" %
                    self.hardwareActionName, 1)
            self.close()  #close method always performed for safety

    def _examineVariablesButton_fired(self):
        """Called when user clicks on book item near hardware action name. This makes a pop up
        which shows all the variables that the hardware action defines. later it might let users
        edit certain parameters"""
        self.snakeReference.updateExamineVariablesDictionary(
            self)  # pass the update this hardwareAction object as the argument
        logger.info("variables = %s" % self.variables)

    #traits_view for all hardware actions. Just shows the name and lets the user enable or disable
    traits_view = traitsui.View(
        traitsui.HGroup(
            traitsui.Item("hardwareActionName",
                          show_label=False,
                          style="readonly"),
            traitsui.Item("enabled", show_label=False),
            traitsui.Item(
                "examineVariablesButton",
                show_label=False,
                editor=traitsui.ButtonEditor(
                    image=pyface.image_resource.ImageResource(
                        os.path.join(os.getcwd(), 'icons', 'book.png'))),
                style="custom"),
        ))
コード例 #20
0
class ImageContrastEditor(t.HasTraits):
    ss_left_value = t.Float()
    ss_right_value = t.Float()

    view = tu.View(tu.Item('ss_left_value',
                           label='vmin',
                           show_label=True,
                           style='readonly',),
                   tu.Item('ss_right_value',
                           label='vmax',
                           show_label=True,
                           style='readonly'),
                   handler=ImageContrastHandler,
                   buttons=[OKButton,
                            OurApplyButton,
                            OurResetButton,
                            CancelButton, ],
                   title='Constrast adjustment tool',
                   )

    def __init__(self, image):
        super(ImageContrastEditor, self).__init__()
        self.image = image
        f = plt.figure()
        self.ax = f.add_subplot(111)
        self.plot_histogram()

        self.span_selector = None
        self.span_selector_switch(on=True)

    def on_disabling_span_selector(self):
        pass

    def span_selector_switch(self, on):
        if on is True:
            self.span_selector = \
                drawing.widgets.ModifiableSpanSelector(
                    self.ax,
                    onselect=self.update_span_selector_traits,
                    onmove_callback=self.update_span_selector_traits)

        elif self.span_selector is not None:
            self.on_disabling_span_selector()
            self.span_selector.turn_off()
            self.span_selector = None

    def update_span_selector_traits(self, *args, **kwargs):
        self.ss_left_value = self.span_selector.rect.get_x()
        self.ss_right_value = self.ss_left_value + \
            self.span_selector.rect.get_width()

    def plot_histogram(self):
        vmin, vmax = self.image.vmin, self.image.vmax
        pad = (vmax - vmin) * 0.05
        vmin = vmin - pad
        vmax = vmax + pad
        data = self.image.data_function().ravel()
        self.patches = self.ax.hist(data, 100, range=(vmin, vmax),
                                    color='blue')[2]
        self.ax.set_xticks([])
        self.ax.set_yticks([])
        self.ax.set_xlim(vmin, vmax)
        self.ax.figure.canvas.draw()

    def reset(self):
        data = self.image.data_function().ravel()
        self.image.vmin, self.image.vmax = np.nanmin(data), np.nanmax(data)
        self.image.update(auto_contrast=False)
        self.update_histogram()

    def update_histogram(self):
        for patch in self.patches:
            self.ax.patches.remove(patch)
        self.plot_histogram()

    def apply(self):
        if self.ss_left_value == self.ss_right_value:
            return
        self.image.vmin = self.ss_left_value
        self.image.vmax = self.ss_right_value
        self.image.update(auto_contrast=False)
        self.update_histogram()

    def close(self):
        plt.close(self.ax.figure)
コード例 #21
0
ファイル: axes.py プロジェクト: venkatacharan1989/hyperspy
class DataAxis(t.HasTraits):
    name = t.Str()
    units = t.Str()
    scale = t.Float()
    offset = t.Float()
    size = t.CInt()
    low_value = t.Float()
    high_value = t.Float()
    value = t.Range('low_value', 'high_value')
    low_index = t.Int(0)
    high_index = t.Int()
    slice = t.Instance(slice)
    navigate = t.Bool(t.Undefined)
    index = t.Range('low_index', 'high_index')
    axis = t.Array()
    continuous_value = t.Bool(False)

    def __init__(self,
                 size,
                 index_in_array=None,
                 name=t.Undefined,
                 scale=1.,
                 offset=0.,
                 units=t.Undefined,
                 navigate=t.Undefined):
        super(DataAxis, self).__init__()
        self.events = Events()
        self.events.index_changed = Event("""
            Event that triggers when the index of the `DataAxis` changes

            Triggers after the internal state of the `DataAxis` has been
            updated.

            Arguments:
            ---------
            obj : The DataAxis that the event belongs to.
            index : The new index
            """,
                                          arguments=["obj", 'index'])
        self.events.value_changed = Event("""
            Event that triggers when the value of the `DataAxis` changes

            Triggers after the internal state of the `DataAxis` has been
            updated.

            Arguments:
            ---------
            obj : The DataAxis that the event belongs to.
            value : The new value
            """,
                                          arguments=["obj", 'value'])
        self._suppress_value_changed_trigger = False
        self._suppress_update_value = False
        self.name = name
        self.units = units
        self.scale = scale
        self.offset = offset
        self.size = size
        self.high_index = self.size - 1
        self.low_index = 0
        self.index = 0
        self.update_axis()
        self.navigate = navigate
        self.axes_manager = None
        self.on_trait_change(self.update_axis, ['scale', 'offset', 'size'])
        self.on_trait_change(self._update_slice, 'navigate')
        self.on_trait_change(self.update_index_bounds, 'size')
        # The slice must be updated even if the default value did not
        # change to correctly set its value.
        self._update_slice(self.navigate)

    def _index_changed(self, name, old, new):
        self.events.index_changed.trigger(obj=self, index=self.index)
        if not self._suppress_update_value:
            new_value = self.axis[self.index]
            if new_value != self.value:
                self.value = new_value

    def _value_changed(self, name, old, new):
        old_index = self.index
        new_index = self.value2index(new)
        if self.continuous_value is False:  # Only values in the grid allowed
            if old_index != new_index:
                self.index = new_index
                if new == self.axis[self.index]:
                    self.events.value_changed.trigger(obj=self, value=new)
            elif old_index == new_index:
                new_value = self.index2value(new_index)
                if new_value == old:
                    self._suppress_value_changed_trigger = True
                    try:
                        self.value = new_value
                    finally:
                        self._suppress_value_changed_trigger = False

                elif new_value == new and not\
                        self._suppress_value_changed_trigger:
                    self.events.value_changed.trigger(obj=self, value=new)
        else:  # Intergrid values are allowed. This feature is deprecated
            self.events.value_changed.trigger(obj=self, value=new)
            if old_index != new_index:
                self._suppress_update_value = True
                self.index = new_index
                self._suppress_update_value = False

    @property
    def index_in_array(self):
        if self.axes_manager is not None:
            return self.axes_manager._axes.index(self)
        else:
            raise AttributeError(
                "This DataAxis does not belong to an AxesManager"
                " and therefore its index_in_array attribute "
                " is not defined")

    @property
    def index_in_axes_manager(self):
        if self.axes_manager is not None:
            return self.axes_manager._get_axes_in_natural_order().\
                index(self)
        else:
            raise AttributeError(
                "This DataAxis does not belong to an AxesManager"
                " and therefore its index_in_array attribute "
                " is not defined")

    def _get_positive_index(self, index):
        if index < 0:
            index = self.size + index
            if index < 0:
                raise IndexError("index out of bounds")
        return index

    def _get_index(self, value):
        if isfloat(value):
            return self.value2index(value)
        else:
            return value

    def _get_array_slices(self, slice_):
        """Returns a slice to slice the corresponding data axis without
        changing the offset and scale of the DataAxis.

        Parameters
        ----------
        slice_ : {float, int, slice}

        Returns
        -------
        my_slice : slice

        """
        v2i = self.value2index

        if isinstance(slice_, slice):
            start = slice_.start
            stop = slice_.stop
            step = slice_.step
        else:
            if isfloat(slice_):
                start = v2i(slice_)
            else:
                start = self._get_positive_index(slice_)
            stop = start + 1
            step = None

        if isfloat(step):
            step = int(round(step / self.scale))
        if isfloat(start):
            try:
                start = v2i(start)
            except ValueError:
                if start > self.high_value:
                    # The start value is above the axis limit
                    raise IndexError(
                        "Start value above axis high bound for  axis %s."
                        "value: %f high_bound: %f" %
                        (repr(self), start, self.high_value))
                else:
                    # The start value is below the axis limit,
                    # we slice from the start.
                    start = None
        if isfloat(stop):
            try:
                stop = v2i(stop)
            except ValueError:
                if stop < self.low_value:
                    # The stop value is below the axis limits
                    raise IndexError(
                        "Stop value below axis low bound for  axis %s."
                        "value: %f low_bound: %f" %
                        (repr(self), stop, self.low_value))
                else:
                    # The stop value is below the axis limit,
                    # we slice until the end.
                    stop = None

        if step == 0:
            raise ValueError("slice step cannot be zero")

        return slice(start, stop, step)

    def _slice_me(self, slice_):
        """Returns a slice to slice the corresponding data axis and
        change the offset and scale of the DataAxis accordingly.

        Parameters
        ----------
        slice_ : {float, int, slice}

        Returns
        -------
        my_slice : slice

        """
        i2v = self.index2value

        my_slice = self._get_array_slices(slice_)

        start, stop, step = my_slice.start, my_slice.stop, my_slice.step

        if start is None:
            if step is None or step > 0:
                start = 0
            else:
                start = self.size - 1
        self.offset = i2v(start)
        if step is not None:
            self.scale *= step

        return my_slice

    def _get_name(self):
        if self.name is t.Undefined:
            if self.axes_manager is None:
                name = "Unnamed"
            else:
                name = "Unnamed " + ordinal(self.index_in_axes_manager)
        else:
            name = self.name
        return name

    def __repr__(self):
        text = '<%s axis, size: %i' % (
            self._get_name(),
            self.size,
        )
        if self.navigate is True:
            text += ", index: %i" % self.index
        text += ">"
        return text

    def __str__(self):
        return self._get_name() + " axis"

    def update_index_bounds(self):
        self.high_index = self.size - 1

    def update_axis(self):
        self.axis = generate_axis(self.offset, self.scale, self.size)
        if len(self.axis) != 0:
            self.low_value, self.high_value = (self.axis.min(),
                                               self.axis.max())

    def _update_slice(self, value):
        if value is False:
            self.slice = slice(None)
        else:
            self.slice = None

    def get_axis_dictionary(self):
        adict = {
            'name': self.name,
            'scale': self.scale,
            'offset': self.offset,
            'size': self.size,
            'units': self.units,
            'navigate': self.navigate
        }
        return adict

    def copy(self):
        return DataAxis(**self.get_axis_dictionary())

    def __copy__(self):
        return self.copy()

    def __deepcopy__(self, memo):
        cp = self.copy()
        return cp

    def value2index(self, value, rounding=round):
        """Return the closest index to the given value if between the limit.

        Parameters
        ----------
        value : number or numpy array

        Returns
        -------
        index : integer or numpy array

        Raises
        ------
        ValueError if any value is out of the axis limits.

        """
        if value is None:
            return None

        if isinstance(value, (np.ndarray, da.Array)):
            if rounding is round:
                rounding = np.round
            elif rounding is math.ceil:
                rounding = np.ceil
            elif rounding is math.floor:
                rounding = np.floor

        index = rounding((value - self.offset) / self.scale)

        if isinstance(value, np.ndarray):
            index = index.astype(int)
            if np.all(self.size > index) and np.all(index >= 0):
                return index
            else:
                raise ValueError("A value is out of the axis limits")
        else:
            index = int(index)
            if self.size > index >= 0:
                return index
            else:
                raise ValueError("The value is out of the axis limits")

    def index2value(self, index):
        if isinstance(index, da.Array):
            index = index.compute()
        if isinstance(index, np.ndarray):
            return self.axis[index.ravel()].reshape(index.shape)
        else:
            return self.axis[index]

    def calibrate(self, value_tuple, index_tuple, modify_calibration=True):
        scale = (value_tuple[1] - value_tuple[0]) /\
            (index_tuple[1] - index_tuple[0])
        offset = value_tuple[0] - scale * index_tuple[0]
        if modify_calibration is True:
            self.offset = offset
            self.scale = scale
        else:
            return offset, scale

    def value_range_to_indices(self, v1, v2):
        """Convert the given range to index range.

        When an out of the axis limits, the endpoint is used instead.

        Parameters
        ----------
        v1, v2 : float
            The end points of the interval in the axis units. v2 must be
            greater than v1.

        """
        if v1 is not None and v2 is not None and v1 > v2:
            raise ValueError("v2 must be greater than v1.")

        if v1 is not None and self.low_value < v1 <= self.high_value:
            i1 = self.value2index(v1)
        else:
            i1 = 0
        if v2 is not None and self.high_value > v2 >= self.low_value:
            i2 = self.value2index(v2)
        else:
            i2 = self.size - 1
        return i1, i2

    def update_from(self, axis, attributes=["scale", "offset", "units"]):
        """Copy values of specified axes fields from the passed AxesManager.

        Parameters
        ----------
        axis : DataAxis
            The DataAxis instance to use as a source for values.
        attributes : iterable container of strings.
            The name of the attribute to update. If the attribute does not
            exist in either of the AxesManagers, an AttributeError will be
            raised.
        Returns
        -------
        A boolean indicating whether any changes were made.

        """
        any_changes = False
        changed = {}
        for f in attributes:
            if getattr(self, f) != getattr(axis, f):
                changed[f] = getattr(axis, f)
        if len(changed) > 0:
            self.trait_set(**changed)
            any_changes = True
        return any_changes
コード例 #22
0
ファイル: new2dmatmodel.py プロジェクト: simvisage/bmcs
class MATS1D5DP2D(MATSEval):

    node_name = 'Pressure sensitive cumulative damage plasticity'

    E_N = tr.Float(30000,
                   label='E_N',
                   desc='Normal stiffness of the interface',
                   MAT=True,
                   enter_set=True,
                   auto_set=False)

    E_T = tr.Float(12900,
                   label='E_T',
                   desc='Shear modulus of the interface',
                   MAT=True,
                   enter_set=True,
                   auto_set=False)

    gamma = tr.Float(55.0,
                     label='gamma',
                     desc='Kinematic Hardening Modulus',
                     MAT=True,
                     enter_set=True,
                     auto_set=False)

    K = tr.Float(11,
                 label='K',
                 desc='Isotropic hardening modulus',
                 MAT=True,
                 enter_set=True,
                 auto_set=False)

    S_T = tr.Float(0.005,
                   label='S_T',
                   desc='Damage accumulation parameter',
                   MAT=True,
                   enter_set=True,
                   auto_set=False)

    S_N = tr.Float(0.005,
                   label='S_N',
                   desc='Damage accumulation parameter',
                   MAT=True,
                   enter_set=True,
                   auto_set=False)

    c_N = tr.Float(1,
                   Label='c_N',
                   desc='Damage accumulation parameter',
                   MAT=True,
                   enter_set=True,
                   auto_set=False)

    c_T = tr.Float(1,
                   Label='c_T',
                   desc='Damage accumulation parameter',
                   MAT=True,
                   enter_set=True,
                   auto_set=False)

    m = tr.Float(0.3,
                 label='m',
                 desc='Lateral Pressure Coefficient',
                 MAT=True,
                 enter_set=True,
                 auto_set=False)

    sigma_o = tr.Float(4.2,
                       label='sigma_o',
                       desc='Reversibility limit',
                       MAT=True,
                       enter_set=True,
                       auto_set=False)

    sig_t = tr.Float(5.0,
                     label='sig_t',
                     MAT=True,
                     enter_set=True,
                     auto_set=False)

    b = tr.Float(0.2, label='b', MAT=True, enter_set=True, auto_set=False)

    state_var_shapes = dict(ep_p_N=(),
                            ep_pi_T=(),
                            alpha=(),
                            z=(),
                            omega_T=(),
                            omega_N=())

    D_rs = tr.Property(depends_on='E_N,E_T')

    @tr.cached_property
    def _get_D_rs(self):
        print('recalculating D_rs')
        return np.array([[self.E_T, 0], [0, self.E_N]], dtype=np.float_)

    def init(self, ep_p_N, ep_pi_T, alpha, z, omega_T, omega_N):
        r'''
        Initialize the state variables.
        '''
        ep_p_N[...] = 0
        ep_pi_T[...] = 0
        alpha[...] = 0
        z[...] = 0
        omega_T[...] = 0
        omega_N[...] = 0

    algorithmic = tr.Bool(True)

    def get_corr_pred(self, u_r, t_n, ep_p_N, ep_pi_T, alpha, z, omega_T,
                      omega_N):

        ep_T = u_r[..., 0]
        ep_N = u_r[..., 1]

        sig_N_eff_trial = self.E_N * (ep_N - ep_p_N)
        sig_T_eff_trial = self.E_T * (ep_T - ep_pi_T)

        Z = self.K * z
        X = self.gamma * alpha
        f_trial = np.fabs(sig_T_eff_trial - self.gamma * alpha) - \
         (self.sigma_o + self.K * z - self.m * sig_N_eff_trial) * (1.0 - np.heaviside((sig_N_eff_trial), 1) * ((sig_N_eff_trial) ** 2 / (self.sig_t) ** 2))

        I = f_trial > 1e-8

        delta_lamda = 0
        sig_N_eff = 0
        sig_T_eff = 0

        N = (self.m + np.heaviside((sig_N_eff), 1) * ((-self.m * sig_N_eff**2) / (self.sig_t)**2 + \
                                        (self.sigma_o + self.K * (z[I] + delta_lamda) - self.m * sig_N_eff) * (2 * sig_N_eff / (self.sig_t)**2))) / (1. - omega_N[I])

        f1 = self.sigma_o + self.K * (z[I] + delta_lamda) - self.m * sig_N_eff
        #print(f1)
        ft = 1.0 - np.heaviside(sig_N_eff, 1) * (sig_N_eff**2 /
                                                 (self.sig_t)**2)

        f_lamda = np.fabs(sig_T_eff_trial[I] -
                          self.gamma * alpha[I]) - delta_lamda * (
                              self.E_T /
                              (1. - omega_T[I]) + self.gamma) - f1 * ft

        f_N = sig_N_eff - sig_N_eff_trial[I] + (
            delta_lamda / (1 - omega_N[I])) * self.E_N * N

        f_tau = sig_T_eff - sig_T_eff_trial + (
            delta_lamda /
            (1 - omega_T)) * self.E_T * np.sign(sig_T_eff_trial - X)

        sig_N = (1.0 - omega_N) * sig_N_eff

        sig_T = (1.0 - omega_T) * self.E_T * (ep_T - ep_pi_T)

        x0 = np.zeros_like(ep_T)

        def get_sig_N_eff():
            sol = root(lambda sig_N_eff: f(f_N(delta_lamda, sig_N_eff),
                                           f_lamda(delta_lamda, sig_N_eff)),
                       x0=x0,
                       method='lm',
                       tol=1e-6)
            return sol.x

        def get_sig_T_eff():
            sol = root(lambda sig_T_eff: f_tau(delta_lamda, sig_T_eff),
                       x0=x0,
                       method='lm',
                       tol=1e-6)
            return sol.x

        def get_delta_lamda():
            sol = root(lambda delta_lamda: f(f_lamda(sig_N_eff, delta_lamda),
                                             f_N(sig_N_eff, delta_lamda),
                                             f_tau(sig_T_eff, delta_lamda)),
                       x0=x0,
                       method='lm',
                       tol=1e-6)
            return sol.x

        print(delta_lamda)

        ep_p_N[I] += delta_lamda * N / (1 - omega_N[I])
        #print(np.shape(omega_N[I]))
        ep_pi_T[I] += delta_lamda * np.sign(sig_T_eff_trial[I] - self.gamma *
                                            alpha[I]) / (1.0 - omega_T[I])

        z[I] += delta_lamda
        alpha[I] += delta_lamda * np.sign(sig_T_eff_trial[I] - X[I])

        Y_N = 0.5 * self.E_N * (ep_N - ep_p_N)**2.0
        Y_T = 0.5 * self.E_T * (ep_T - ep_pi_T)**2.0

        omega_N[I] += delta_lamda * (1 - omega_N[I])**self.c_N * (
            Y_N[I] / self.S_N + self.b * Y_T[I] / self.S_T) * np.heaviside(
                ep_N[I], 1)

        omega_T[I] += delta_lamda * (1 - omega_T[I])**self.c_T * (
            Y_T[I] / self.S_T + self.b * Y_N[I] / self.S_N)

        sig_N[I] = (1.0 - omega_N[I]) * sig_N_eff

        sig_T[I] = (1.0 - omega_T[I]) * self.E_T * (ep_T[I] - ep_pi_T[I])

        f = np.fabs(sig_T[I] / (1 - omega_T[I]) - self.gamma * alpha[I]) - \
            (self.sigma_o + self.K * z[I] - self.m * sig_N[I] / (1 - omega_N[I])) * (1.0 - np.heaviside((sig_N[I] / (1 - omega_N[I])), 1) * ((sig_N[I] / (1 - omega_N[I])) ** 2 / (self.sig_t) ** 2))

        #=======================================================================
        # sig_N_i = (1 - omega_N) * sig_N_i_eff_trial
        #
        # sig_T_i = (1 - omega_T) * sig_T_i_eff_trial
        #=======================================================================

        f = f_trial
        #print(sig_T[I])
        # Unloading stiffness
        E_alg_T = (1 - omega_T) * self.E_T
        E_alg_N = (1 - omega_N) * self.E_N

        ep = np.zeros_like(u_r)
        ep[..., 0] = ep_T
        ep[..., 1] = ep_N
        E_TN = np.einsum(
            'abEm->Emab',
            np.array([[E_alg_T, np.zeros_like(E_alg_T)],
                      [np.zeros_like(E_alg_N), E_alg_N]]))
        #print('omega-T', omega_T)
        return ep, E_TN

    def _get_var_dict(self):
        var_dict = super(MATS1D5DP2D, self)._get_var_dict()
        var_dict.update(slip=self.get_slip,
                        s_el=self.get_s_el,
                        shear=self.get_shear,
                        omega_T=self.get_omega_T,
                        omega_N=self.get_omega_N,
                        ep_pi_T=self.get_ep_pi_T,
                        ep_p_N=self.get_ep_p_N,
                        alpha=self.get_alpha,
                        z=self.get_z)
        return var_dict

    def get_slip(self, u_r, tn1, **state):
        return self.get_eps(u_r, tn1)[..., 0]

    def get_shear(self, u_r, tn1, **state):
        return self.get_sig(u_r, tn1, **state)[..., 0]

    def get_omega_T(self, u_r, tn1, ep_pi_T, alpha, z, omega_T):
        return omega_T

    def get_omega_N(self, u_r, tn1, ep_p_N, omega_N):
        return omega_N

    def get_ep_pi_T(self, u_r, tn1, ep_pi_T, alpha, z, omega_T):
        return ep_pi_T

    def get_ep_p_N(self, u_r, tn1, ep_p_N, omega_N):
        return ep_p_N

    def get_alpha(self, u_r, tn1, ep_pi_T, alpha, z, omega_T):
        return alpha

    def get_z(self, u_r, tn1, ep_pi_T, alpha, z, omega_T):
        return z

    def get_s_el(self, u_r, tn1, **state):
        ep_T = self.get_slip(u_r, tn1, **state)
        ep_pi_T = self.get_ep_pi_T(u_r, tn1, **state)
        ep_e_T = ep_T - ep_pi_T
        return ep_e_T

    tree_view = ui.View(ui.Item('E_N'), ui.Item('E_T'), ui.Item('gamma'),
                        ui.Item('K'), ui.Item('S_T'), ui.Item('S_N'),
                        ui.Item('c_T'), ui.Item('c_N'), ui.Item('m'),
                        ui.Item('sigma_o'), ui.Item('sig_t'), ui.Item('b'),
                        ui.Item('D_rs', style='readonly'))

    traits_view = tree_view
コード例 #23
0
ファイル: plscr_model.py プロジェクト: TGXnet/consumercheck
class PlsrPcr(Model):
    """Represent the PlsrPcr model between one X and Y data set."""

    # Consumer liking
    ds_C = DataSet()
    # Descriptive analysis / sensory profiling
    ds_S = DataSet()
    ds_X = _traits.Property()
    ds_Y = _traits.Property()
    settings = _traits.WeakRef()
    # Checkbox bool for standardised results
    standardise_x = _traits.Bool(False)
    standardise_y = _traits.Bool(False)
    int_ext_mapping = _traits.Enum('Internal', 'External')
    plscr_method = _traits.Enum('PLSR', 'PCR')
    calc_n_pc = _traits.Int()
    min_pc = 2
    # max_pc = _traits.Property()
    max_pc = 10
    min_std = _traits.Float(0.001)
    C_zero_std = _traits.List()
    S_zero_std = _traits.List()


    def _get_res(self):
        if self._have_zero_std():
            raise InComputeable('Matrix have variables with zero variance',
                                self.C_zero_std, self.S_zero_std)
        n_pc = min(self.settings.calc_n_pc, self._get_max_pc())
        if self.settings.plscr_method == 'PLSR':
            pls = PLSR(self.ds_X.values, self.ds_Y.values,
                       numComp=n_pc, cvType=["loo"],
                       Xstand=self.settings.standardise_x, Ystand=self.settings.standardise_y)
            return self._pack_res(pls)
        elif self.settings.plscr_method == 'PCR':
            pcr = PCR(self.ds_X.values, self.ds_Y.values,
                      numComp=n_pc, cvType=["loo"],
                      Xstand=self.settings.standardise_x, Ystand=self.settings.standardise_y)
            return self._pack_res(pcr)


    def _have_zero_std(self):
        self.C_zero_std = []
        self.S_zero_std = []
        if self._std_C() and self._std_S():
            rC = self._C_have_zero_std_var()
            rS = self._S_have_zero_std_var()
            return rC or rS
        elif self._std_C():
            return self._C_have_zero_std_var()
        elif self._std_S():
            return self._S_have_zero_std_var()


    def _std_C(self):
        if self.settings.int_ext_mapping == 'Internal':
            return self.settings.standardise_x
        else:
            return self.settings.standardise_y


    def _std_S(self):
        if self.settings.int_ext_mapping == 'Internal':
            return self.settings.standardise_y
        else:
            return self.settings.standardise_x


    def _C_have_zero_std_var(self):
        self.C_zero_std = self._check_zero_std(self.ds_C)
        return bool(self.C_zero_std)


    def _S_have_zero_std_var(self):
        self.S_zero_std = self._check_zero_std(self.ds_S)
        return bool(self.S_zero_std)


    def _check_zero_std(self, ds):
        zero_std_var = []
        sv = ds.values.std(axis=0)
        dm = sv < self.min_std
        if _np.any(dm):
            vv = _np.array(ds.var_n)
            zero_std_var = list(vv[_np.nonzero(dm)])
        return zero_std_var


    def _get_ds_X(self):
        if self.settings.int_ext_mapping == 'Internal':
            return self.ds_C
        else:
            return self.ds_S


    def _get_ds_Y(self):
        if self.settings.int_ext_mapping == 'Internal':
            return self.ds_S
        else:
            return self.ds_C


    def _get_max_pc(self):
        if self.settings.int_ext_mapping == 'Internal':
            return max((min(self.ds_C.n_objs, self.ds_C.n_vars, 11) - 1), self.min_pc)
        else:
            return max((min(self.ds_S.n_objs, self.ds_S.n_vars, 11) - 1), self.min_pc)


    def _calc_n_pc_default(self):
        return self.max_pc


    def _mk_pred_ds(self, pred_mat, npc):
        pred_ds = DataSet(
            mat=_pd.DataFrame(
                data=pred_mat,
                index=self.ds_Y.obj_n,
                columns=self.ds_Y.var_n,
            ),
            display_name='Predicted after PC{}'.format(npc))
        return pred_ds


    def _pack_res(self, pls_obj):
        res = Result('PLSR/PCR {0}(X) & {1}(Y)'.format(self.ds_X.display_name, self.ds_Y.display_name))

        if self.settings.int_ext_mapping == 'External':
            res.external_mapping = True
        else:
            res.external_mapping = False

        res.plscr_method = self.settings.plscr_method

        # Scores X
        mT = pls_obj.X_scores()
        res.scores_x = DataSet(
            mat=_pd.DataFrame(
                data=mT,
                index=self.ds_X.obj_n,
                columns=["PC-{0}".format(i+1) for i in range(mT.shape[1])],
                ),
            display_name='X scores')

        # loadings_x
        mP = pls_obj.X_loadings()
        res.loadings_x = DataSet(
            mat=_pd.DataFrame(
                data=mP,
                index=self.ds_X.var_n,
                columns=["PC-{0}".format(i+1) for i in range(mP.shape[1])],
                ),
            display_name='X loadings')

        # loadings_y
        # Same as loading_x in external mapping?
        mQ = pls_obj.Y_loadings()
        res.loadings_y = DataSet(
            mat=_pd.DataFrame(
                data=mQ,
                index=self.ds_Y.var_n,
                columns=["PC-{0}".format(i+1) for i in range(mQ.shape[1])],
                ),
            display_name='Y loadings')

        # expl_var_x
        cal = pls_obj.X_calExplVar()
        cum_cal = pls_obj.X_cumCalExplVar()[1:]
        val = pls_obj.X_valExplVar()
        cum_val = pls_obj.X_cumValExplVar()[1:]
        res.expl_var_x = DataSet(
            mat=_pd.DataFrame(
                data=[cal, cum_cal, val, cum_val],
                index=['calibrated', 'cumulative calibrated', 'validated', 'cumulative validated'],
                columns=["PC-{0}".format(i+1) for i in range(len(cal))],
                ),
            display_name='Explained variance in X')

        # expl_var_y
        cal = pls_obj.Y_calExplVar()
        cum_cal = pls_obj.Y_cumCalExplVar()[1:]
        val = pls_obj.Y_valExplVar()
        cum_val = pls_obj.Y_cumValExplVar()[1:]
        res.expl_var_y = DataSet(
            mat=_pd.DataFrame(
                data=[cal, cum_cal, val, cum_val],
                index=['calibrated', 'cumulative calibrated', 'validated', 'cumulative validated'],
                columns=["PC-{0}".format(i+1) for i in range(len(cal))],
                ),
            display_name='Explained variance in Y')

        # X_corrLoadings()
        # corr_loadings_x
        mXcl = pls_obj.X_corrLoadings()
        res.corr_loadings_x = DataSet(
            mat=_pd.DataFrame(
                data=mXcl,
                index=self.ds_X.var_n,
                columns=["PC-{0}".format(i+1) for i in range(mXcl.shape[1])],
                ),
            display_name='X & Y correlation loadings')

        # Y_corrLoadings()
        # corr_loadings_y
        mYcl = pls_obj.Y_corrLoadings()
        res.corr_loadings_y = DataSet(
            mat=_pd.DataFrame(
                data=mYcl,
                index=self.ds_Y.var_n,
                columns=["PC-{0}".format(i+1) for i in range(mXcl.shape[1])],
                ),
            display_name=self.ds_Y.display_name)

        # Y_predCal()
        # Return a dict with Y pred for each PC
        pYc = pls_obj.Y_predCal()
        ks = pYc.keys()
        pYcs = [self._mk_pred_ds(pYc[k], k) for k in ks]
        res.pred_cal_y = pYcs

        # Y_predVal()
        # Return a dict with Y pred for each PC
        pYv = pls_obj.Y_predVal()
        ks = pYv.keys()
        pYvs = [self._mk_pred_ds(pYv[k], k) for k in ks]
        res.pred_val_y = pYvs

        return res
コード例 #24
0
class PlotAtributes(ta.HasTraits):
    """
        Frame for selecting the plot attributes to change
    """
    window_size_x = ta.Float(12.)
    window_size_y = ta.Float(12.275)
    legend_location = ta.Str('')
    legend_location = ta.Enum(ql.LegLocList)
    legend_number_of_columns = ta.Int(jpl.legcol)
    legend_font_size = ta.Int(jpl.legsize)
    legend_alpha = ta.Float(jpl.legalpha)
    title = ta.Str('')
    x_label = ta.Str('')
    y_label = ta.Str('')
    title_pad = ta.Int(jpl.tpad)
    title_size = ta.Int(jpl.tsize)
    x_label_size = ta.Int(jpl.xsize)
    y_label_size = ta.Int(jpl.ysize)
    grid = ta.Bool(jpl.def_grid)
    dot_size = ta.Int(jpl.dotsize)
    line_width = ta.Int(jpl.linewidth)
    error_cap_len = ta.Int(jpl.errorbarlen)
    x_axis_min = ta.Str('None')
    x_axis_max = ta.Str('None')
    y_axis_min = ta.Str('None')
    y_axis_max = ta.Str('None')
    x_zero_line = ta.Bool(False)
    y_zero_line = ta.Bool(False)
    Apply = ta.Button()
    Reset_Axies = ta.Button()
    Flip_X_Axis = ta.Button()
    Flip_Y_Axis = ta.Button()
    Flip_Axies = ta.Button()

    tick_size = ta.Int(jpl.def_tick_size)
    xTick_min = ta.Float(0)
    xTick_max = ta.Float(0)
    xTick_inc = ta.Float(0)
    yTick_min = ta.Float(0)
    yTick_max = ta.Float(0)
    yTick_inc = ta.Float(0)

    Do_window_size = ta.Bool(False)
    Do_legend_location = ta.Bool(False)
    Do_legend_location = ta.Bool(False)
    Do_legend_number_of_columns = ta.Bool(False)
    Do_legend_font_size = ta.Bool(False)
    Do_legend_alpha = ta.Bool(False)
    Do_title = ta.Bool(False)
    Do_x_label = ta.Bool(False)
    Do_y_label = ta.Bool(False)
    Do_title_pad = ta.Bool(False)
    Do_title_size = ta.Bool(False)
    Do_x_label_size = ta.Bool(False)
    Do_y_label_size = ta.Bool(False)
    Do_grid = ta.Bool(False)
    Do_dot_size = ta.Bool(False)
    Do_line_width = ta.Bool(False)
    Do_error_cap_len = ta.Bool(False)
    Do_xlims = ta.Bool(False)
    Do_x_axis_min = ta.Bool(False)
    Do_x_axis_max = ta.Bool(False)
    Do_ylims = ta.Bool(False)
    Do_y_axis_min = ta.Bool(False)
    Do_y_axis_max = ta.Bool(False)
    Do_x_zero_line = ta.Bool(False)
    Do_y_zero_line = ta.Bool(False)

    Do_tick_size = ta.Bool(False)
    Do_xTick = ta.Bool(False)
    Do_yTick = ta.Bool(False)

    view = tua.View(tua.Group(tua.Item('window_size_x'),
                              tua.Item('window_size_y'),
                              tua.Item('legend_location'),
                              tua.Item('legend_number_of_columns'),
                              tua.Item('legend_font_size'),
                              tua.Item('legend_alpha'),
                              tua.Item('title'),
                              tua.Item('x_label'),
                              tua.Item('y_label'),
                              tua.Item('title_pad'),
                              tua.Item('title_size'),
                              tua.Item('x_label_size'),
                              tua.Item('y_label_size'),
                              tua.Item('grid'),
                              tua.Item('x_zero_line'),
                              tua.Item('y_zero_line'),
                              tua.Item('dot_size'),
                              tua.Item('line_width'),
                              tua.Item('error_cap_len'),
                              tua.Item('x_axis_min'),
                              tua.Item('x_axis_max'),
                              tua.Item('y_axis_min'),
                              tua.Item('y_axis_max'),
                              tua.Item('Apply', show_label=False),
                              tua.Item('Reset_Axies', show_label=False),
                              tua.Item('Flip_X_Axis', show_label=False),
                              tua.Item('Flip_Y_Axis', show_label=False),
                              tua.Item('Flip_Axies', show_label=False),
                              label='Main'),
                    tua.Group(tua.Item('tick_size'),
                              tua.Item('xTick_min'),
                              tua.Item('xTick_max'),
                              tua.Item('xTick_inc'),
                              tua.Item('yTick_min'),
                              tua.Item('yTick_max'),
                              tua.Item('yTick_inc'),
                              tua.Item('Apply', show_label=False),
                              tua.Item('Reset_Axies', show_label=False),
                              tua.Item('Flip_X_Axis', show_label=False),
                              tua.Item('Flip_Y_Axis', show_label=False),
                              tua.Item('Flip_Axies', show_label=False),
                              label='Ticks'),
                    tua.Group(tua.Item('Do_window_size'),
                              tua.Item('Do_legend_location'),
                              tua.Item('Do_legend_number_of_columns'),
                              tua.Item('Do_legend_font_size'),
                              tua.Item('Do_legend_alpha'),
                              tua.Item('Do_title'),
                              tua.Item('Do_x_label'),
                              tua.Item('Do_y_label'),
                              tua.Item('Do_title_pad'),
                              tua.Item('Do_title_size'),
                              tua.Item('Do_x_label_size'),
                              tua.Item('Do_y_label_size'),
                              tua.Item('Do_grid'),
                              tua.Item('Do_x_zero_line'),
                              tua.Item('Do_y_zero_line'),
                              tua.Item('Do_dot_size'),
                              tua.Item('Do_line_width'),
                              tua.Item('Do_error_cap_len'),
                              tua.Item('Do_xlims'),
                              tua.Item('Do_x_axis_min'),
                              tua.Item('Do_x_axis_max'),
                              tua.Item('Do_ylims'),
                              tua.Item('Do_y_axis_min'),
                              tua.Item('Do_y_axis_max'),
                              tua.Item('Do_tick_size'),
                              tua.Item('Do_xTick'),
                              tua.Item('Do_yTick'),
                              tua.Item('Apply', show_label=False),
                              tua.Item('Reset_Axies', show_label=False),
                              tua.Item('Flip_X_Axis', show_label=False),
                              tua.Item('Flip_Y_Axis', show_label=False),
                              tua.Item('Flip_Axies', show_label=False),
                              label='Selection'),
                    buttons=['OK'],
                    resizable=True)

    def _Do_xlims_changed(self):
        if not self.Do_xlims_changed:
            self.Do_x_axis_max = False
            self.Do_x_axis_min = False

    def _Do_ylims_changed(self):
        if not self.Do_xlims_changed:
            self.Do_y_axis_max = False
            self.Do_y_axis_min = False

    def _Flip_Axis_fired(self):
        global files_selected
        for ifile in files_selected.file_list:
            this_plot = jpl.Plotting(plot_info={'save_file': ifile})
            if not os.path.isfile(this_plot.PickleFile):
                print('FNF:', this_plot.PickleFile)
                pass
            this_plot.LoadPickle(DefWipe=False)
            this_plot.Flip_Axies()
            this_plot.PlotAll()
            this_plot.close_fig()

    def _Flip_X_Axis_fired(self):
        global files_selected
        for ifile in files_selected.file_list:
            this_plot = jpl.Plotting(plot_info={'save_file': ifile})
            if not os.path.isfile(this_plot.PickleFile):
                print('FNF:', this_plot.PickleFile)
                pass
            this_plot.LoadPickle(DefWipe=False)
            this_plot.Flip_X_Axis()
            this_plot.PlotAll()
            this_plot.close_fig()

    def _Flip_Y_Axis_fired(self):
        global files_selected
        for ifile in files_selected.file_list:
            this_plot = jpl.Plotting(plot_info={'save_file': ifile})
            if not os.path.isfile(this_plot.PickleFile):
                print('FNF:', this_plot.PickleFile)
                pass
            else:
                print('Fixing:', this_plot.PickleFile)
            this_plot.LoadPickle(DefWipe=False)
            this_plot.Flip_Y_Axis()
            this_plot.close_fig()
        print('Flip Y axis Complete')
        print()
        del this_plot

    def _Reset_Axies_fired(self):
        self.x_axis_min = 'None'
        self.x_axis_max = 'None'
        self.y_axis_min = 'None'
        self.y_axis_max = 'None'

    def _Apply_fired(self):
        # global customleg,customleglist
        # global wtitle,wxlab,wrlab
        # global legloc
        global data_plot
        plot_info = pad.Series()
        if self.Do_legend_location:
            plot_info['leg_loc'] = self.legend_location
        if self.Do_legend_number_of_columns:
            plot_info['leg_ncol'] = self.legend_number_of_columns
        if self.Do_legend_font_size:
            plot_info['leg_fontsize'] = self.legend_font_size
        if self.Do_legend_alpha:
            plot_info['leg_alpha'] = self.legend_alpha
        if self.Do_title:
            plot_info['title'] = self.title
        if self.Do_title_size:
            plot_info['title_dict'] = {'fontsize': self.title_size}
        if self.Do_title_pad:
            if int(self.title_pad) == 0:
                plot_info['title_pad'] = None
            else:
                plot_info['title_pad'] = self.title_pad
        if self.Do_y_label:
            plot_info['ylabel'] = self.y_label
        if self.Do_y_label_size:
            plot_info['ylabel_dict'] = {'fontsize': self.y_label_size}
        if self.Do_x_label:
            plot_info['xlabel'] = self.x_label
        if self.Do_x_label_size:
            plot_info['xlabel_dict'] = {'fontsize': self.x_label_size}
        if self.Do_x_zero_line:
            plot_info['x_zero_line'] = self.x_zero_line
        if self.Do_y_zero_line:
            plot_info['y_zero_line'] = self.y_zero_line
        if self.Do_x_axis_min:
            if self.x_axis_min == 'None':
                xmin = None
            else:
                xmin = float(self.x_axis_min)
        if self.Do_x_axis_max:
            if self.x_axis_max == 'None':
                xmax = None
            else:
                xmax = float(self.x_axis_max)
        if self.Do_y_axis_min:
            if self.y_axis_min == 'None':
                ymin = None
            else:
                ymin = float(self.y_axis_min)
        if self.Do_y_axis_max:
            if self.y_axis_max == 'None':
                ymax = None
            else:
                ymax = float(self.y_axis_max)
        if self.Do_xlims:
            plot_info['xlims'] = [xmin, xmax]

        if self.Do_ylims:
            plot_info['ylims'] = [ymin, ymax]
        if self.Do_xTick:
            if self.xTick_inc > 0:
                plot_info['do_xTicks'] = True
                plot_info['xTick_min'] = self.xTick_min
                plot_info['xTick_max'] = self.xTick_max
                plot_info['xTick_inc'] = self.xTick_inc
        if self.Do_yTick:
            if self.yTick_inc > 0:
                plot_info['do_yTicks'] = True
                plot_info['yTick_min'] = self.yTick_min
                plot_info['yTick_max'] = self.yTick_max
                plot_info['yTick_inc'] = self.yTick_inc
        this_rc = jpl.params
        if self.Do_dot_size:
            this_rc['lines.markersize'] = self.dot_size
        if self.Do_grid:
            this_rc['axes.grid'] = self.grid
        if self.Do_dot_size:
            this_rc['lines.markersize'] = self.dot_size
        if self.Do_line_width:
            this_rc['lines.linewidth'] = self.line_width
        if self.Do_error_cap_len:
            this_rc['errorbar.capsize'] = self.error_cap_len
        if self.Do_tick_size:
            this_rc['xtick.labelsize'] = self.tick_size
            this_rc['ytick.labelsize'] = self.tick_size
        # if self.Do_line_width:
        #     jpl.capwidth = self.line_width
        # data_plot.UpdateInfo(plot_info)
        # data_plot.PrintData()
        global files_selected
        if self.Do_window_size:
            UpdateFileList(
                plot_info,
                this_rc,
                files_selected.file_list,
                window_size=[self.window_size_x, self.window_size_y])
        else:
            UpdateFileList(plot_info, this_rc, files_selected.file_list)
コード例 #25
0
ファイル: signal_tools.py プロジェクト: swang29/hyperspy
class ImageContrastEditor(t.HasTraits):
    ss_left_value = t.Float()
    ss_right_value = t.Float()

    def __init__(self, image):
        super(ImageContrastEditor, self).__init__()
        self.image = image
        f = plt.figure()
        self.ax = f.add_subplot(111)
        self.plot_histogram()

        self.span_selector = None
        self.span_selector_switch(on=True)

    def on_disabling_span_selector(self):
        pass

    def span_selector_switch(self, on):
        if on is True:
            self.span_selector = \
                drawing.widgets.ModifiableSpanSelector(
                    self.ax,
                    onselect=self.update_span_selector_traits,
                    onmove_callback=self.update_span_selector_traits)

        elif self.span_selector is not None:
            self.on_disabling_span_selector()
            self.span_selector.turn_off()
            self.span_selector = None

    def update_span_selector_traits(self, *args, **kwargs):
        self.ss_left_value = self.span_selector.rect.get_x()
        self.ss_right_value = self.ss_left_value + \
            self.span_selector.rect.get_width()

    def plot_histogram(self):
        vmin, vmax = self.image.vmin, self.image.vmax
        pad = (vmax - vmin) * 0.05
        vmin -= pad
        vmax += pad
        data = self.image.data_function().ravel()
        self.patches = self.ax.hist(data, 100, range=(vmin, vmax),
                                    color='blue')[2]
        self.ax.set_xticks([])
        self.ax.set_yticks([])
        self.ax.set_xlim(vmin, vmax)
        self.ax.figure.canvas.draw_idle()

    def reset(self):
        data = self.image.data_function().ravel()
        self.image.vmin, self.image.vmax = np.nanmin(data), np.nanmax(data)
        self.image.update()
        self.update_histogram()

    def update_histogram(self):
        for patch in self.patches:
            self.ax.patches.remove(patch)
        self.plot_histogram()

    def apply(self):
        if self.ss_left_value == self.ss_right_value:
            return
        self.image.vmin = self.ss_left_value
        self.image.vmax = self.ss_right_value
        self.image.update()
        self.update_histogram()

    def close(self):
        plt.close(self.ax.figure)
コード例 #26
0
ファイル: ms1.py プロジェクト: bmcs-group/bmcs_matmod
class MS1(MATS3DEval, InteractiveModel):
    gamma_T = tr.Float(1000000.,
                       label="gamma_T",
                       desc=" Tangential Kinematic hardening modulus",
                       enter_set=True,
                       auto_set=False)

    K_T = tr.Float(10000.,
                   label="K_T",
                   desc="Tangential Isotropic harening",
                   enter_set=True,
                   auto_set=False)

    S_T = tr.Float(0.005,
                   label="S_T",
                   desc="Damage strength",
                   enter_set=True,
                   auto_set=False)

    r_T = tr.Float(9.,
                   label="r",
                   desc="Damage cumulation parameter",
                   enter_set=True,
                   auto_set=False)
    p_T = tr.Float(2.,
                   label="p_T",
                   desc="Damage cumulation parameter",
                   enter_set=True,
                   auto_set=False)

    c_T = tr.Float(3,
                   label="c_T",
                   desc="Damage cumulation parameter",
                   enter_set=True,
                   auto_set=False)

    sigma_T_0 = tr.Float(1.7,
                         label="sigma_T_0",
                         desc="Reversibility limit",
                         enter_set=True,
                         auto_set=False)

    m_T = tr.Float(0.1,
                   label="m_T",
                   desc="Lateral pressure coefficient",
                   enter_set=True,
                   auto_set=False)

    # -------------------------------------------
    # Normal_Tension constitutive law parameters (without cumulative normal strain)
    # -------------------------------------------
    Ad = tr.Float(10.0,
                  label="A_d",
                  desc="brittleness coefficient",
                  enter_set=True,
                  auto_set=False)

    eps_0 = tr.Float(.0001,
                     label="eps_N_0",
                     desc="threshold strain",
                     enter_set=True,
                     auto_set=False)

    # -----------------------------------------------
    # Normal_Compression constitutive law parameters
    # -----------------------------------------------
    K_N = tr.Float(10000.,
                   label="K_N",
                   desc=" Normal isotropic harening",
                   enter_set=True,
                   auto_set=False)

    gamma_N = tr.Float(5000.,
                       label="gamma_N",
                       desc="Normal kinematic hardening",
                       enter_set=True,
                       auto_set=False)

    sigma_N_0 = tr.Float(10.,
                         label="sigma_N_0",
                         desc="Yielding stress",
                         enter_set=True,
                         auto_set=False)

    # -------------------------------------------------------------------------
    # Cached elasticity tensors
    # -------------------------------------------------------------------------

    E = tr.Float(35e+3,
                 label="E",
                 desc="Young's Modulus",
                 auto_set=False,
                 input=True)

    nu = tr.Float(0.2,
                  label='nu',
                  desc="Poison ratio",
                  auto_set=False,
                  input=True)

    ipw_view = View(
        Item('gamma_T', latex=r'\gamma_\mathrm{T}', minmax=(10, 100000)),
        Item('K_T', latex=r'K_\mathrm{T}', minmax=(10, 10000)),
        Item('S_T', latex=r'S_\mathrm{T}', minmax=(0.001, 0.01)),
        Item('r_T', latex=r'r_\mathrm{T}', minmax=(1, 3)),
        Item('p_T', latex=r'e_\mathrm{T}', minmax=(1, 40)),
        Item('c_T', latex=r'c_\mathrm{T}', minmax=(1, 10)),
        Item('sigma_T_0', latex=r'\bar{sigma}^\pi_{T}', minmax=(1, 10)),
        Item('m_T', latex=r'm_\mathrm{T}', minmax=(0.001, 3)),
    )

    n_D = 3

    state_var_shapes = tr.Property

    @tr.cached_property
    def _get_state_var_shapes(self):
        dictionay = {
            name: (self.n_mp, ) + shape
            for name, shape in self.mic_state_var_shapes.items()
        }

        dictionay_macro = {
            name: shape
            for name, shape in self.mac_state_var_shapes.items()
        }
        dictionay.update(dictionay_macro)
        return dictionay

    mic_state_var_shapes = dict(
        omega_N_Emn=(),  # damage N
        z_N_Emn=(),
        alpha_N_Emn=(),
        r_N_Emn=(),
        eps_N_p_Emn=(),
        sigma_N_Emn=(),
        omega_T_Emn=(),
        z_T_Emn=(),
        alpha_T_Emna=(n_D, ),
        eps_T_pi_Emna=(n_D, ),
        sigma_T_Emna=(n_D, ),
        plastic_dissip_T_Emn=(),
        damage_dissip_T_Emn=(),
        plastic_dissip_N_Emn=(),
        damage_dissip_N_Emn=(),
    )

    mac_state_var_shapes = dict(
        total_work_microplane=(),
        total_work_macro=(),
        eps_aux=(
            n_D,
            n_D,
        ),
    )
    '''
    State variables
     1) damage N, 
     2) iso N, 
     3) kin N, 
     4) consolidation N, 
     5) eps p N,
     6) sigma N, 
     7) iso F N, 
     8) kin F N, 
     9) energy release N, 
     10) damage T, 
     11) iso T, 
     12-13) kin T, 
     14-15) eps p T,
     16-17) sigma T, 18) iso F T, 19-20) kin F T, 21) energy release T
    '''

    # --------------------------------------------------------------
    # microplane constitutive law (normal behavior CP + TD)
    # (without cumulative normal strain for fatigue under tension)
    # --------------------------------------------------------------
    def get_normal_law(self, eps_N_Emn, omega_N_Emn, z_N_Emn, alpha_N_Emn,
                       r_N_Emn, eps_N_p_Emn, sigma_N_Emn, omega_T_Emn, z_T_Emn,
                       alpha_T_Emna, eps_T_pi_Emna, sigma_T_Emna,
                       plastic_dissip_T_Emn, damage_dissip_T_Emn,
                       plastic_dissip_N_Emn, damage_dissip_N_Emn,
                       total_work_microplane, total_work_macro, eps_aux):

        E_N = self.E / (1.0 - 2.0 * self.nu)

        sigma_N_Emn_tilde = E_N * (eps_N_Emn - eps_N_p_Emn)

        r_N_Emn_aux = copy.deepcopy(r_N_Emn)
        omega_N_Emn_aux = copy.deepcopy(omega_N_Emn)

        pos = sigma_N_Emn_tilde > 1e-6  # microplanes under tension
        pos2 = sigma_N_Emn_tilde < -1e-6  # microplanes under compression
        tension = 1.0 * pos
        compression = 1.0 * pos2

        # thermo forces

        Z = self.K_N * z_N_Emn * compression
        X = self.gamma_N * alpha_N_Emn * compression
        h = (self.sigma_N_0 + Z) * compression

        f_trial = (abs(sigma_N_Emn_tilde - X) - h) * compression

        # threshold plasticity

        thres_1 = f_trial > 1e-10

        delta_lamda = f_trial / \
                      (E_N / (1 - omega_N_Emn) + abs(self.K_N) + self.gamma_N) * thres_1
        eps_N_p_Emn += delta_lamda * \
                      np.sign(sigma_N_Emn_tilde - X)
        z_N_Emn += delta_lamda
        alpha_N_Emn += delta_lamda * \
                      np.sign(sigma_N_Emn_tilde - X)

        def R_N(r_N_Emn):
            return (1.0 / self.Ad) * (-r_N_Emn / (1.0 + r_N_Emn))

        Y_N = 0.5 * tension * E_N * (eps_N_Emn - eps_N_p_Emn)**2.0
        Y_0 = 0.5 * E_N * self.eps_0**2.0

        f = (Y_N - (Y_0 + R_N(r_N_Emn))) * tension

        # threshold damage

        thres_2 = f > 1e-6

        def f_w(Y):
            return 1.0 - 1.0 / (1.0 + self.Ad * (Y - Y_0))

        omega_N_Emn[f > 1e-6] = f_w(Y_N)[f > 1e-6]
        omega_N_Emn[...] = np.clip(omega_N_Emn, 0, 1.0)
        r_N_Emn[f > 1e-6] = -omega_N_Emn[f > 1e-6]

        sigma_N_Emn[...] = (1.0 - tension * omega_N_Emn) * E_N * (eps_N_Emn -
                                                                  eps_N_p_Emn)
        Z = self.K_N * z_N_Emn * compression
        X = self.gamma_N * alpha_N_Emn * compression
        # sigma_N_Emn = E_N * (eps_N_Emn - eps_N_p_Emn)
        # pos1 = [(eps_N_Emn < -1e-6) & (sigma_trial > 1e-6)]  # looking for microplanes violating strain boundary
        # sigma_N_Emn[pos1[0]] = 0

        delta_eps_N_p_Emn = np.zeros_like(eps_N_p_Emn)
        delta_alpha_N_Emn = np.zeros_like(alpha_N_Emn)
        delta_z_N_Emn = np.zeros_like(z_N_Emn)
        delta_omega_N_Emn = np.zeros_like(omega_N_Emn)
        delta_r_N_Emn = np.zeros_like(r_N_Emn)

        delta_eps_N_p_Emn = delta_lamda * \
                      np.sign(sigma_N_Emn_tilde - X)

        delta_alpha_N_Emn = delta_lamda * \
                      np.sign(sigma_N_Emn_tilde - X)

        delta_z_N_Emn = delta_lamda

        delta_omega_N_Emn = omega_N_Emn - omega_N_Emn_aux

        delta_r_N_Emn = r_N_Emn - r_N_Emn_aux

        plastic_dissip_N_Emn[...] = np.einsum('...n,...n->...n', sigma_N_Emn, delta_eps_N_p_Emn) - \
                                    np.einsum('...n,...n->...n', X, delta_alpha_N_Emn) - np.einsum('...n,...n->...n',
                                                                                                      Z, delta_z_N_Emn)

        damage_dissip_N_Emn[...] = np.einsum('...n,...n->...n', Y_N, delta_omega_N_Emn) - \
                                   np.einsum('...n,...n->...n', R_N(r_N_Emn), delta_r_N_Emn)

        return sigma_N_Emn, Z, X, Y_N

    # -------------------------------------------------------------------------
    # microplane constitutive law (Tangential CSD)-(Pressure sensitive cumulative damage)
    # -------------------------------------------------------------------------
    def get_tangential_law(self, eps_T_Emna, eps_Emab, omega_N_Emn, z_N_Emn,
                           alpha_N_Emn, r_N_Emn, eps_N_p_Emn, sigma_N_Emn,
                           omega_T_Emn, z_T_Emn, alpha_T_Emna, eps_T_pi_Emna,
                           sigma_T_Emna, plastic_dissip_T_Emn,
                           damage_dissip_T_Emn, plastic_dissip_N_Emn,
                           damage_dissip_N_Emn, total_work_microplane,
                           total_work_macro, eps_aux):


        E_T = self.E * (1.0 - 4 * self.nu) / \
            ((1.0 + self.nu) * (1.0 - 2 * self.nu))

        # thermo forces

        sig_pi_trial = E_T * (eps_T_Emna - eps_T_pi_Emna)

        Z = self.K_T * z_T_Emn
        X = self.gamma_T * alpha_T_Emna
        norm_1 = np.sqrt(
            np.einsum('...na,...na->...n', (sig_pi_trial - X),
                      (sig_pi_trial - X)))
        Y = 0.5 * E_T * \
            np.einsum(
                '...na,...na->...n',
                (eps_T_Emna - eps_T_pi_Emna),
                (eps_T_Emna - eps_T_pi_Emna))

        E_N = self.E / (1.0 - 2.0 * self.nu)

        eps_N_Emn = self._get_e_N_Emn(eps_Emab)
        sigma_N_Emn = (1.0 - omega_N_Emn) * E_N * (eps_N_Emn - eps_N_p_Emn)

        f = norm_1 - self.sigma_T_0 - Z + self.m_T * sigma_N_Emn

        plas_1 = f > 1e-15
        elas_1 = f < 1e-15

        delta_lamda = f / \
                      (E_T / (1.0 - omega_T_Emn) + self.gamma_T + self.K_T) * plas_1

        norm_2 = 1.0 * elas_1 + np.sqrt(
            np.einsum('...na,...na->...n', (sig_pi_trial - X),
                      (sig_pi_trial - X))) * plas_1

        eps_T_pi_Emna[..., 0] += plas_1 * delta_lamda * \
                                ((sig_pi_trial[..., 0] - X[..., 0]) /
                                 (1.0 - omega_T_Emn)) / norm_2
        eps_T_pi_Emna[..., 1] += plas_1 * delta_lamda * \
                                ((sig_pi_trial[..., 1] - X[..., 1]) /
                                 (1.0 - omega_T_Emn)) / norm_2

        eps_T_pi_Emna[..., 2] +=  plas_1 * delta_lamda * \
                                ((sig_pi_trial[..., 2] - X[..., 2]) /
                                 (1.0 - omega_T_Emn)) / norm_2
        omega_T_Emn += plas_1 * ((1 - omega_T_Emn) ** self.c_T) * \
                       (delta_lamda * (Y / self.S_T) ** self.r_T) * \
                       (self.sigma_T_0 / (self.sigma_T_0 - self.m_T * sigma_N_Emn)) ** self.p_T
        omega_T_Emn[...] = np.clip(omega_T_Emn, 0, 1.0)

        alpha_T_Emna[..., 0] += plas_1 * delta_lamda * \
                               (sig_pi_trial[..., 0] - X[..., 0]) / norm_2
        alpha_T_Emna[..., 1] += plas_1 * delta_lamda * \
                               (sig_pi_trial[..., 1] - X[..., 1]) / norm_2

        alpha_T_Emna[..., 2] += plas_1 * delta_lamda * \
                               (sig_pi_trial[..., 2] - X[..., 2]) / norm_2

        z_T_Emn += plas_1 * delta_lamda

        sigma_T_Emna[...] = np.einsum('...n,...na->...na', (1 - omega_T_Emn),
                                      E_T * (eps_T_Emna - eps_T_pi_Emna))

        Z = self.K_T * z_T_Emn
        X = self.gamma_T * alpha_T_Emna
        Y = 0.5 * E_T * \
            np.einsum(
                '...na,...na->...n',
                (eps_T_Emna - eps_T_pi_Emna),
                (eps_T_Emna - eps_T_pi_Emna))

        # Energy dissipation evaluation

        delta_eps_T_pi_Emna = np.zeros_like(eps_T_pi_Emna)
        delta_alpha_T_Emna = np.zeros_like(alpha_T_Emna)
        delta_z_T_Emn = np.zeros_like(z_T_Emn)
        delta_omega_T_Emn = np.zeros_like(omega_T_Emn)

        delta_eps_T_pi_Emna[..., 0] = plas_1 * delta_lamda * \
                                ((sig_pi_trial[..., 0] - X[..., 0]) /
                                 (1.0 - omega_T_Emn)) / norm_2
        delta_eps_T_pi_Emna[..., 1] = plas_1 * delta_lamda * \
                                      ((sig_pi_trial[..., 1] - X[..., 1]) /
                                       (1.0 - omega_T_Emn)) / norm_2
        delta_eps_T_pi_Emna[..., 2] = plas_1 * delta_lamda * \
                                      ((sig_pi_trial[..., 2] - X[..., 2]) /
                                       (1.0 - omega_T_Emn)) / norm_2

        delta_alpha_T_Emna[..., 0] = plas_1 * delta_lamda * \
                                (sig_pi_trial[..., 0] - X[..., 0]) / norm_2
        delta_alpha_T_Emna[..., 1] = plas_1 * delta_lamda * \
                                (sig_pi_trial[..., 1] - X[..., 1]) / norm_2

        delta_alpha_T_Emna[..., 2] = plas_1 * delta_lamda * \
                                (sig_pi_trial[..., 2] - X[..., 2]) / norm_2

        delta_z_T_Emn = plas_1 * delta_lamda

        delta_omega_T_Emn = plas_1 * ((1 - omega_T_Emn) ** self.c_T) * \
                       (delta_lamda * (Y / self.S_T) ** self.r_T) * \
                       (self.sigma_T_0 / (self.sigma_T_0 - self.m_T * sigma_N_Emn)) ** self.p_T

        plastic_dissip_T_Emn[...] = np.einsum('...na,...na->...n',
                                              sigma_T_Emna,
                                              delta_eps_T_pi_Emna)  #- \
        #np.einsum('...na,...na->...n', X, delta_alpha_T_Emna) - np.einsum('...n,...n->...n', Z, delta_z_T_Emn)

        damage_dissip_T_Emn[...] = np.einsum('...n,...n->...n', Y,
                                             delta_omega_T_Emn)

        # if plastic_dissip_T_Emn.any() < -1e-15:
        #     print(sigma_T_Emna, delta_eps_T_pi_Emna)
        return sigma_T_Emna, Z, X, Y, plastic_dissip_T_Emn

    #     #-------------------------------------------------------------------------
    #     # MICROPLANE-Kinematic constraints
    #     #-------------------------------------------------------------------------

    # -------------------------------------------------

    # get the operator of the microplane normals
    _MPNN = tr.Property(depends_on='n_mp')

    @tr.cached_property
    def _get__MPNN(self):
        MPNN_nij = np.einsum('ni,nj->nij', self._MPN, self._MPN)
        return MPNN_nij

    # get the third order tangential tensor (operator) for each microplane
    _MPTT = tr.Property(depends_on='n_mp')

    @tr.cached_property
    def _get__MPTT(self):
        delta = self.DELTA
        MPTT_nijr = 0.5 * (
            np.einsum('ni,jr -> nijr', self._MPN, delta) +
            np.einsum('nj,ir -> njir', self._MPN, delta) -
            2 * np.einsum('ni,nj,nr -> nijr', self._MPN, self._MPN, self._MPN))
        return MPTT_nijr

    def _get_e_N_Emn(self, eps_Emab):
        # get the normal strain array for each microplane
        return np.einsum('nij,...ij->...n', self._MPNN, eps_Emab)

    def _get_e_T_Emna(self, eps_Emab):
        # get the tangential strain vector array for each microplane
        MPTT_ijr = self._get__MPTT()
        return np.einsum('nija,...ij->...na', MPTT_ijr, eps_Emab)

    # ---------------------------------------------------------------------
    # Extra homogenization of damage tensor in case of two damage parameters
    # Returns the 4th order damage tensor 'beta4' using (ref. [Baz99], Eq.(63))
    # ---------------------------------------------------------------------

    def _get_beta_Emabcd(self, eps_Emab, omega_N_Emn, z_N_Emn, alpha_N_Emn,
                         r_N_Emn, eps_N_p_Emn, sigma_N_Emn, omega_T_Emn,
                         z_T_Emn, alpha_T_Emna, eps_T_pi_Emna, sigma_T_Emna,
                         plastic_dissip_T_Emn, damage_dissip_T_Emn,
                         plastic_dissip_N_Emn, damage_dissip_N_Emn,
                         total_work_microplane, total_work_macro, eps_aux):
        # Returns the 4th order damage tensor 'beta4' using
        # (cf. [Baz99], Eq.(63))

        eps_N_Emn = self._get_e_N_Emn(eps_Emab)
        eps_T_Emna = self._get_e_T_Emna(eps_Emab)

        sigma_N_Emn, Z, X, Y_N = self.get_normal_law(
            eps_N_Emn, omega_N_Emn, z_N_Emn, alpha_N_Emn, r_N_Emn, eps_N_p_Emn,
            sigma_N_Emn, omega_T_Emn, z_T_Emn, alpha_T_Emna, eps_T_pi_Emna,
            sigma_T_Emna, plastic_dissip_T_Emn, damage_dissip_T_Emn,
            plastic_dissip_N_Emn, damage_dissip_N_Emn, total_work_microplane,
            total_work_macro, eps_aux)

        # sliding tangential strains
        sigma_T_Emna, Z_T, X_T, Y_T, plastic_dissip_T_Emn = self.get_tangential_law(
            eps_T_Emna, eps_Emab, omega_N_Emn, z_N_Emn, alpha_N_Emn, r_N_Emn,
            eps_N_p_Emn, sigma_N_Emn, omega_T_Emn, z_T_Emn, alpha_T_Emna,
            eps_T_pi_Emna, sigma_T_Emna, plastic_dissip_T_Emn,
            damage_dissip_T_Emn, plastic_dissip_N_Emn, damage_dissip_N_Emn,
            total_work_microplane, total_work_macro, eps_aux)

        delta = self.DELTA
        beta_N = np.sqrt(1. - omega_N_Emn)
        beta_T = np.sqrt(1. - omega_T_Emn)

        beta_ijkl = np.einsum('n, ...n,ni, nj, nk, nl -> ...ijkl', self._MPW, beta_N, self._MPN, self._MPN, self._MPN,
                              self._MPN) + \
                    0.25 * (np.einsum('n, ...n,ni, nk, jl -> ...ijkl', self._MPW, beta_T, self._MPN, self._MPN, delta) +
                            np.einsum('n, ...n,ni, nl, jk -> ...ijkl', self._MPW, beta_T, self._MPN, self._MPN, delta) +
                            np.einsum('n, ...n,nj, nk, il -> ...ijkl', self._MPW, beta_T, self._MPN, self._MPN, delta) +
                            np.einsum('n, ...n,nj, nl, ik -> ...ijkl', self._MPW, beta_T, self._MPN, self._MPN, delta) -
                            4.0 * np.einsum('n, ...n, ni, nj, nk, nl -> ...ijkl', self._MPW, beta_T, self._MPN,
                                            self._MPN, self._MPN, self._MPN))

        return beta_ijkl

    def _get_phi(self, eps_Emab, omega_N_Emn, z_N_Emn, alpha_N_Emn, r_N_Emn,
                 eps_N_p_Emn, sigma_N_Emn, omega_T_Emn, z_T_Emn, alpha_T_Emna,
                 eps_T_pi_Emna, plastic_dissip_T_Emn):

        phi_n = np.sqrt(1.0 - omega_N_Emn) * np.sqrt(1.0 - omega_T_Emn)
        phi_ab = np.einsum('...n,n,nab->...ab', phi_n, self._MPW, self._MPNN)
        return phi_ab

    DELTA = np.identity(n_D)

    # -----------------------------------------------------------
    # Integration of the (inelastic) strains for each microplane
    # -----------------------------------------------------------

    def _get_eps_p_Emab(self, eps_Emab, omega_N_Emn, z_N_Emn, alpha_N_Emn,
                        r_N_Emn, eps_N_p_Emn, sigma_N_Emn, omega_T_Emn,
                        z_T_Emn, alpha_T_Emna, eps_T_pi_Emna, sigma_T_Emna,
                        plastic_dissip_T_Emn, damage_dissip_T_Emn,
                        plastic_dissip_N_Emn, damage_dissip_N_Emn,
                        total_work_microplane, total_work_macro, eps_aux):
        eps_N_Emn = self._get_e_N_Emn(eps_Emab)
        eps_T_Emna = self._get_e_T_Emna(eps_Emab)

        sigma_N_Emn, Z, X, Y_N = self.get_normal_law(
            eps_N_Emn, omega_N_Emn, z_N_Emn, alpha_N_Emn, r_N_Emn, eps_N_p_Emn,
            sigma_N_Emn, omega_T_Emn, z_T_Emn, alpha_T_Emna, eps_T_pi_Emna,
            sigma_T_Emna, plastic_dissip_T_Emn, damage_dissip_T_Emn,
            plastic_dissip_N_Emn, damage_dissip_N_Emn, total_work_microplane,
            total_work_macro, eps_aux)

        # sliding tangential strains
        sigma_T_Emna, Z_T, X_T, Y_T, plastic_dissip_T_Emn = self.get_tangential_law(
            eps_T_Emna, eps_Emab, omega_N_Emn, z_N_Emn, alpha_N_Emn, r_N_Emn,
            eps_N_p_Emn, sigma_N_Emn, omega_T_Emn, z_T_Emn, alpha_T_Emna,
            eps_T_pi_Emna, sigma_T_Emna, plastic_dissip_T_Emn,
            damage_dissip_T_Emn, plastic_dissip_N_Emn, damage_dissip_N_Emn,
            total_work_microplane, total_work_macro, eps_aux)

        # 2-nd order plastic (inelastic) tensor
        eps_p_Emab = (np.einsum('n,...n,na,nb->...ab', self._MPW, eps_N_p_Emn,
                                self._MPN, self._MPN) + 0.5 *
                      (np.einsum('n,...nf,na,fb->...ab', self._MPW,
                                 eps_T_pi_Emna, self._MPN, self.DELTA) +
                       np.einsum('n,...nf,nb,fa->...ab', self._MPW,
                                 eps_T_pi_Emna, self._MPN, self.DELTA)))

        return eps_p_Emab

    def get_total_work(self, eps_Emab, sig_Emab, omega_N_Emn, z_N_Emn,
                       alpha_N_Emn, r_N_Emn, eps_N_p_Emn, sigma_N_Emn,
                       omega_T_Emn, z_T_Emn, alpha_T_Emna, eps_T_pi_Emna,
                       sigma_T_Emna, plastic_dissip_T_Emn, damage_dissip_T_Emn,
                       plastic_dissip_N_Emn, damage_dissip_N_Emn,
                       total_work_microplane, total_work_macro, eps_aux):

        delta_eps_Emab = eps_Emab - eps_aux
        delta_eps_N_Emn = self._get_e_N_Emn(eps_Emab) - self._get_e_N_Emn(
            eps_aux)
        delta_eps_T_Emna = self._get_e_T_Emna(eps_Emab) - self._get_e_T_Emna(
            eps_aux)

        # work_microplane = np.einsum('...n,...n->...n', sigma_N_Emn, delta_eps_N_Emn) + np.einsum('...na,...na->...n',
        #                                                                                          sigma_T_Emna,
        #                                                                                          delta_eps_T_Emna)
        delta_eps_vector = np.einsum('...na,...n->...na', self._MPN,
                                     delta_eps_N_Emn) + delta_eps_T_Emna
        sigma_vector = np.einsum('...na,...n->...na', self._MPN,
                                 sigma_N_Emn) + sigma_T_Emna

        work_microplane = np.einsum('...na,...na->...n', sigma_vector,
                                    delta_eps_vector)

        total_work_microplane[...] = np.einsum('...n,...n->...', self._MPW,
                                               work_microplane)

        total_work_macro[...] = np.einsum('...ij,...ij->...', sig_Emab,
                                          delta_eps_Emab)

        eps_aux[...] = eps_Emab

        return total_work_microplane, total_work_macro

    def get_corr_pred(self, eps_Emab, t_n1, **Eps_k):
        """Evaluation - get the corrector and predictor
        """
        # Corrector predictor computation.

        # ------------------------------------------------------------------
        # Damage tensor (4th order) using product- or sum-type symmetrization:
        # ------------------------------------------------------------------
        beta_Emabcd = self._get_beta_Emabcd(eps_Emab, **Eps_k)

        # ------------------------------------------------------------------
        # Damaged stiffness tensor calculated based on the damage tensor beta4:
        # ------------------------------------------------------------------

        D_Emabcd = np.einsum('...ijab, abef, ...cdef->...ijcd', beta_Emabcd,
                             self.D_abef, beta_Emabcd)

        # phi_ab = self._get_phi(eps_Emab, **Eps_k)
        #
        #
        # D_Emabcd = 0.25 * (
        #        np.einsum('mjnl, ...im, ...kn -> ...ijkl', self.D_abef, phi_ab, phi_ab) +
        #        np.einsum('imnl, ...jm, ...kn -> ...ijkl', self.D_abef, phi_ab, phi_ab) +
        #        np.einsum('mjkn, ...im, ...ln -> ...ijkl', self.D_abef, phi_ab, phi_ab) +
        #        np.einsum('imkn, ...jm, ...ln -> ...ijkl', self.D_abef, phi_ab, phi_ab)
        # )

        # ----------------------------------------------------------------------
        # Return stresses (corrector) and damaged secant stiffness matrix (predictor)
        # ----------------------------------------------------------------------
        # plastic strain tensor
        eps_p_Emab = self._get_eps_p_Emab(eps_Emab, **Eps_k)

        # elastic strain tensor
        eps_e_Emab = eps_Emab - eps_p_Emab

        # calculation of the stress tensor
        sig_Emab = np.einsum('...abcd,...cd->...ab', D_Emabcd, eps_e_Emab)
        total_work_micro, total_work_macro = self.get_total_work(
            eps_Emab, sig_Emab, **Eps_k)

        return sig_Emab, D_Emabcd
コード例 #27
0
ファイル: signal_tools.py プロジェクト: swang29/hyperspy
class LineInSignal1D(t.HasTraits):

    """Adds a vertical draggable line to a spectrum that reports its
    position to the position attribute of the class.

    Attributes:
    -----------
    position : float
        The position of the vertical line in the one dimensional signal. Moving
        the line changes the position but the reverse is not true.
    on : bool
        Turns on and off the line
    color : wx.Colour
        The color of the line. It automatically redraws the line.

    """
    position = t.Float()
    is_ok = t.Bool(False)
    on = t.Bool(False)
    # The following is disabled because as of traits 4.6 the Color trait
    # imports traitsui (!)
    # try:
    #     color = t.Color("black")
    # except ModuleNotFoundError:  # traitsui is not installed
    #     pass
    color_str = t.Str("black")

    def __init__(self, signal):
        if signal.axes_manager.signal_dimension != 1:
            raise SignalDimensionError(
                signal.axes_manager.signal_dimension, 1)

        self.signal = signal
        self.signal.plot()
        axis_dict = signal.axes_manager.signal_axes[0].get_axis_dictionary()
        am = AxesManager([axis_dict, ])
        am._axes[0].navigate = True
        # Set the position of the line in the middle of the spectral
        # range by default
        am._axes[0].index = int(round(am._axes[0].size / 2))
        self.axes_manager = am
        self.axes_manager.events.indices_changed.connect(
            self.update_position, [])
        self.on_trait_change(self.switch_on_off, 'on')

    def draw(self):
        self.signal._plot.signal_plot.figure.canvas.draw_idle()

    def switch_on_off(self, obj, trait_name, old, new):
        if not self.signal._plot.is_active:
            return

        if new is True and old is False:
            self._line = VerticalLineWidget(self.axes_manager)
            self._line.set_mpl_ax(self.signal._plot.signal_plot.ax)
            self._line.patch.set_linewidth(2)
            self._color_changed("black", "black")
            # There is not need to call draw because setting the
            # color calls it.

        elif new is False and old is True:
            self._line.close()
            self._line = None
            self.draw()

    def update_position(self, *args, **kwargs):
        if not self.signal._plot.is_active:
            return
        self.position = self.axes_manager.coordinates[0]

    def _color_changed(self, old, new):
        if self.on is False:
            return

        self._line.patch.set_color((self.color.Red() / 255.,
                                    self.color.Green() / 255.,
                                    self.color.Blue() / 255.,))
        self.draw()
コード例 #28
0
class config(HasTraits):
    uuid = traits.Str(desc="UUID")

    # Directories
    working_dir = Directory(mandatory=True,
                            desc="Location of the Nipype working directory")
    base_dir = Directory(
        os.path.abspath('.'),
        mandatory=True,
        desc='Base directory of data. (Should be subject-independent)')
    sink_dir = Directory(mandatory=True,
                         desc="Location where the BIP will store the results")
    crash_dir = Directory(mandatory=False,
                          desc="Location to store crash files")
    save_script_only = traits.Bool(False)

    # Execution
    run_using_plugin = Bool(
        False,
        usedefault=True,
        desc="True to run pipeline with plugin, False to run serially")
    plugin = traits.Enum("PBS",
                         "MultiProc",
                         "SGE",
                         "Condor",
                         usedefault=True,
                         desc="plugin to use, if run_using_plugin=True")
    plugin_args = traits.Dict({"qsub_args": "-q many"},
                              usedefault=True,
                              desc='Plugin arguments.')
    test_mode = Bool(
        False,
        mandatory=False,
        usedefault=True,
        desc='Affects whether where and if the workflow keeps its \
                            intermediary files. True to keep intermediary files. '
    )
    timeout = traits.Float(14.0)
    datagrabber = traits.Instance(Data, ())

    # Regression
    run_one_sample_T_test = traits.Bool(True)
    run_regression = traits.Bool()
    design_csv = traits.File(desc="design .csv file")
    reg_contrasts = traits.Code(
        desc=
        "function named reg_contrasts which takes in 0 args and returns contrasts"
    )
    use_regressors = traits.Bool()
    estimation_method = traits.Enum('Classical', 'Bayesian', 'Bayesian2')
    include_intercept = traits.Bool(True)
    #Normalization

    norm_template = traits.File(desc='Template of files')
    use_mask = traits.Bool(False)
    mask_file = traits.File(desc='already binarized mask file to use')

    #Correction:
    p_threshold = traits.Float(0.05)
    height_threshold = traits.Float(0.05)
    min_cluster_size = traits.Int(25)
    # Advanced Options
    use_advanced_options = traits.Bool()
    advanced_script = traits.Code()

    # Buttons
    check_func_datagrabber = Button("Check")
コード例 #29
0
class MATS3DDesmorat(MATS3DEval):
    '''Damage - plasticity model by Desmorat.
    '''

    #-------------------------------------------------------------------------
    # Material parameters
    #-------------------------------------------------------------------------

    E_1 = tr.Float(16.0e+3,
                   label="E_1",
                   desc="first Young's Modulus",
                   auto_set=False,
                   input=True)
    E_2 = tr.Float(19.0e+3,
                   label="E_2",
                   desc="second Young's Modulus",
                   auto_set=False,
                   input=True)

    nu = tr.Float(0.2,
                  label='nu',
                  desc="Poisson ratio",
                  auto_set=False,
                  input=True)

    gamma = Float(110.0,
                  label="Gamma",
                  desc="kinematic hardening modulus",
                  MAT=True,
                  symbol=r'\gamma',
                  unit='MPa/mm',
                  enter_set=True,
                  auto_set=False)

    K = Float(130.0,
              label="K",
              desc="isotropic hardening modulus",
              MAT=True,
              symbol='K',
              unit='MPa/mm',
              enter_set=True,
              auto_set=False)

    S = Float(476.0e-6,
              label="S",
              desc="damage strength",
              MAT=True,
              symbol='S',
              unit='MPa/mm',
              enter_set=True,
              auto_set=False)

    tau_bar = Float(6.0,
                    label="Tau_0 ",
                    desc="yield stress",
                    symbol=r'\bar{\tau}',
                    unit='MPa',
                    MAT=True,
                    enter_set=True,
                    auto_set=False)

    #=========================================================================
    # Configurational parameters
    #=========================================================================
    U_var_shape = (6,)
    '''Shape of the primary variable required by the TStepState.
    '''

    state_var_shapes = {'sigma_ab': (3, 3),
                        'sigma_pi_ab': (3, 3),
                        'eps_pi_ab': (3, 3),
                        'alpha_ab': (3, 3),
                        'z_a': (),
                        'omega_a': ()}
    r'''
    Shapes of the state variables
    to be stored in the global array at the level 
    of the domain.
    '''

    node_name = 'Desmorat model'

    tree_node_list = List([])

    def _get_lame_1_params(self):
        la = self.E_1 * self.nu / ((1. + self.nu) * (1. - 2. * self.nu))
        # second Lame parameter (shear modulus)
        mu = self.E_1 / (2. + 2. * self.nu)
        return la, mu

    D_1_abef = tr.Property(tr.Array, depends_on='+input')

    @tr.cached_property
    def _get_D_1_abef(self):
        la, mu = self._get_lame_1_params()
        delta = np.identity(3)
        D_1_abef = (np.einsum(',ij,kl->ijkl', la, delta, delta) +
                    np.einsum(',ik,jl->ijkl', mu, delta, delta) +
                    np.einsum(',il,jk->ijkl', mu, delta, delta))

        return D_1_abef

    def _get_lame_2_params(self):
        la = self.E_2 * self.nu / ((1. + self.nu) * (1. - 2. * self.nu))
        # second Lame parameter (shear modulus)
        mu = self.E_2 / (2. + 2. * self.nu)
        return la, mu

    D_2_abef = tr.Property(tr.Array, depends_on='+input')

    @tr.cached_property
    def _get_D_2_abef(self):
        la = self._get_lame_2_params()[0]
        mu = self._get_lame_2_params()[1]
        delta = np.identity(3)
        D_2_abef = (np.einsum(',ij,kl->ijkl', la, delta, delta) +
                    np.einsum(',ik,jl->ijkl', mu, delta, delta) +
                    np.einsum(',il,jk->ijkl', mu, delta, delta))

        return D_2_abef

    def get_corr_pred(self, eps_ab, tn1,
                      sigma_ab, sigma_pi_ab, eps_pi_ab,
                      alpha_ab, z_a, omega_a):
        r'''
        Corrector predictor computation.
        '''
        D_1_abef = self.D_1_abef
        D_2_abef = self.D_2_abef
        sigma_pi_ab_trial = np.einsum(
            '...ijkl,...kl->...ij',
            D_2_abef, eps_ab - eps_pi_ab
        )
        a = sigma_pi_ab_trial - self.gamma * alpha_ab
        norm_a = np.sqrt(np.einsum(
            '...ij,...ij',
            a, a)
        )
        f = norm_a - self.tau_bar - self.K * z_a
        # identify the inelastic material points to perform return mapping
        I = np.where(f > 1e-6)
        delta_pi_I = (
            f[I] /
            (self.E_2 + (self.K + self.gamma) * (1. - omega_a[I]))
        )
        b_I = norm_a[I]
        return_ab_I = np.einsum(
            '...ij,...->...ij',
            a[I], delta_pi_I / b_I
        )
        eps_pi_ab[I] += return_ab_I
        eps_diff_ab_I = eps_ab[I] - eps_pi_ab[I]
        Y_a_I = 0.5 * (
            np.einsum(
                '...ij,...ijkl,...kl',
                eps_ab[I], D_1_abef, eps_ab[I]
            )
            +
            np.einsum(
                '...ij,...ijkl,...kl',
                eps_diff_ab_I, D_2_abef, eps_diff_ab_I
            )
        )
        omega_a[I] += (Y_a_I / self.S) * delta_pi_I
        omega_a[I][np.where(omega_a[I] >= 0.99)] = 0.99
        alpha_ab[I] += np.einsum(
            '...ij,...->...ij',
            return_ab_I, (1.0 - omega_a[I])
        )
        z_a[I] += delta_pi_I * (1.0 - omega_a[I])

        # evaluate the material stress and stiffness tensors
        phi_n = 1.0 - omega_a
        # this is a side effect - recording a returned value
        # simultaneously as a state variable - not ideal! dangerous.
        sigma_ab[...] = (
            np.einsum(
                '...,...ijkl,...kl->...ij',
                phi_n, D_1_abef, eps_ab
            ) +
            np.einsum(
                '...,...ijkl,...kl->...ij',
                phi_n, D_2_abef, eps_ab - eps_pi_ab
            )
        )
        # secant stiffness matrix
        D_abef = np.einsum(
            '...,...ijkl->...ijkl',
            phi_n, D_1_abef + D_2_abef
        )
        return sigma_ab, D_abef

    traits_view = View(
        VGroup(
            Item('E_1', full_size=True, resizable=True),
            Item('E_2'),
            Item('nu'),
            label='Elastic parameters'
        ),
        VGroup(
            Item('gamma', full_size=True, resizable=True),
            Item('K'),
            Item("S"),
            Item("tau_bar"),
            label='Inelastic parameters'
        )
    )
    tree_view = traits_view
コード例 #30
0
class MS1(MATS3DEval, InteractiveModel):
    gamma_T = tr.Float(100000.,
                       label="gamma_T",
                       desc=" Tangential Kinematic hardening modulus",
                       enter_set=True,
                       auto_set=False)

    K_T = tr.Float(10000.,
                   label="K_T",
                   desc="Tangential Isotropic harening",
                   enter_set=True,
                   auto_set=False)

    S_T = tr.Float(0.005,
                   label="S_T",
                   desc="Damage strength",
                   enter_set=True,
                   auto_set=False)

    r_T = tr.Float(9.,
                   label="r",
                   desc="Damage cumulation parameter",
                   enter_set=True,
                   auto_set=False)
    p_T = tr.Float(12.,
                   label="p_T",
                   desc="Damage cumulation parameter",
                   enter_set=True,
                   auto_set=False)

    c_T = tr.Float(4.6,
                   label="c_T",
                   desc="Damage cumulation parameter",
                   enter_set=True,
                   auto_set=False)

    sigma_T_0 = tr.Float(1.7,
                         label="sigma_T_0",
                         desc="Reversibility limit",
                         enter_set=True,
                         auto_set=False)

    m_T = tr.Float(0.003,
                   label="m_T",
                   desc="Lateral pressure coefficient",
                   enter_set=True,
                   auto_set=False)

    # -------------------------------------------
    # Normal_Tension constitutive law parameters (without cumulative normal strain)
    # -------------------------------------------
    Ad = tr.Float(100.0,
                  label="A_d",
                  desc="brittleness coefficient",
                  enter_set=True,
                  auto_set=False)

    eps_0 = tr.Float(0.00008,
                     label="eps_N_0",
                     desc="threshold strain",
                     enter_set=True,
                     auto_set=False)

    # -----------------------------------------------
    # Normal_Compression constitutive law parameters
    # -----------------------------------------------
    K_N = tr.Float(10000.,
                   label="K_N",
                   desc=" Normal isotropic harening",
                   enter_set=True,
                   auto_set=False)

    gamma_N = tr.Float(5000.,
                       label="gamma_N",
                       desc="Normal kinematic hardening",
                       enter_set=True,
                       auto_set=False)

    sigma_N_0 = tr.Float(30.,
                         label="sigma_N_0",
                         desc="Yielding stress",
                         enter_set=True,
                         auto_set=False)

    # -------------------------------------------------------------------------
    # Cached elasticity tensors
    # -------------------------------------------------------------------------

    E = tr.Float(35e+3,
                 label="E",
                 desc="Young's Modulus",
                 auto_set=False,
                 input=True)

    nu = tr.Float(0.2,
                  label='nu',
                  desc="Poison ratio",
                  auto_set=False,
                  input=True)

    ipw_view = View(
        Item('gamma_T', latex=r'\gamma_\mathrm{T}', minmax=(10, 100000)),
        Item('K_T', latex=r'K_\mathrm{T}', minmax=(10, 10000)),
        Item('S_T', latex=r'S_\mathrm{T}', minmax=(0.001, 0.01)),
        Item('r_T', latex=r'r_\mathrm{T}', minmax=(1, 3)),
        Item('p_T', latex=r'e_\mathrm{T}', minmax=(1, 40)),
        Item('c_T', latex=r'c_\mathrm{T}', minmax=(1, 10)),
        Item('sigma_T_0', latex=r'\bar{sigma}^\pi_{T}', minmax=(1, 10)),
        Item('m_T', latex=r'm_\mathrm{T}', minmax=(0.001, 3)),
    )

    n_D = 3

    state_var_shapes = tr.Property

    @tr.cached_property
    def _get_state_var_shapes(self):
        return {
            name: (self.n_mp, ) + shape
            for name, shape in self.mic_state_var_shapes.items()
        }

    mic_state_var_shapes = dict(
        omega_N_Emn=(),  # damage N
        z_N_Emn=(),
        alpha_N_Emn=(),
        r_N_Emn=(),
        eps_N_p_Emn=(),
        sigma_N_Emn=(),
        omega_T_Emn=(),
        z_T_Emn=(),
        alpha_T_Emna=(n_D, ),
        eps_T_pi_Emna=(n_D, ),
    )
    '''
    State variables
     1) damage N, 
     2) iso N, 
     3) kin N, 
     4) consolidation N, 
     5) eps p N,
     6) sigma N, 
     7) iso F N, 
     8) kin F N, 
     9) energy release N, 
     10) damage T, 
     11) iso T, 
     12-13) kin T, 
     14-15) eps p T,
     16-17) sigma T, 18) iso F T, 19-20) kin F T, 21) energy release T
    '''

    # --------------------------------------------------------------
    # microplane constitutive law (normal behavior CP + TD)
    # (without cumulative normal strain for fatigue under tension)
    # --------------------------------------------------------------
    def get_normal_law(self, eps_N_Emn, omega_N_Emn, z_N_Emn, alpha_N_Emn,
                       r_N_Emn, eps_N_p_Emn):

        E_N = self.E / (1.0 - 2.0 * self.nu)

        # When deciding if a microplane is in tensile or compression,
        # we define a strain boundary such that
        # sigmaN <= 0 if eps_N < 0, avoiding entering in the quadrant
        # of compressive strains and traction

        sigma_trial = E_N * (eps_N_Emn - eps_N_p_Emn)
        # looking for microplanes violating strain boundary
        pos1 = [(eps_N_Emn < -1e-6) & (sigma_trial > 1e-6)]
        sigma_trial[pos1[0]] = 0
        pos = eps_N_Emn > 1e-6  # microplanes under traction
        pos2 = eps_N_Emn < -1e-6  # microplanes under compression
        H = 1.0 * pos
        H2 = 1.0 * pos2

        # thermo forces
        sigma_N_Emn_tilde = E_N * (eps_N_Emn - eps_N_p_Emn)
        sigma_N_Emn_tilde[pos1[0]] = 0  # imposing strain boundary

        Z = self.K_N * z_N_Emn
        X = self.gamma_N * alpha_N_Emn * H2
        h = (self.sigma_N_0 + Z) * H2

        f_trial = (abs(sigma_N_Emn_tilde - X) - h) * H2

        # threshold plasticity

        thres_1 = f_trial > 1e-6

        delta_lamda = f_trial / \
                      (E_N / (1 - omega_N_Emn) + abs(self.K_N) + self.gamma_N) * thres_1
        eps_N_p_Emn += delta_lamda * \
                      np.sign(sigma_N_Emn_tilde - X)
        z_N_Emn += delta_lamda
        alpha_N_Emn += delta_lamda * \
                      np.sign(sigma_N_Emn_tilde - X)

        def R_N(r_N_Emn):
            return (1.0 / self.Ad) * (-r_N_Emn / (1.0 + r_N_Emn))

        Y_N = 0.5 * H * E_N * (eps_N_Emn - eps_N_p_Emn)**2.0
        Y_0 = 0.5 * E_N * self.eps_0**2.0

        f = (Y_N - (Y_0 + R_N(r_N_Emn)))

        # threshold damage

        thres_2 = f > 1e-6

        def f_w(Y):
            return 1.0 - 1.0 / (1.0 + self.Ad * (Y - Y_0))

        omega_N_Emn[f > 1e-6] = f_w(Y_N)[f > 1e-6]
        omega_N_Emn[...] = np.clip(omega_N_Emn, 0, 0.9999)
        r_N_Emn[f > 1e-6] = -omega_N_Emn[f > 1e-6]

        sigma_N_Emn = (1.0 - H * omega_N_Emn) * E_N * (eps_N_Emn - eps_N_p_Emn)
        pos1 = [(eps_N_Emn < -1e-6) & (sigma_trial > 1e-6)
                ]  # looking for microplanes violating strain boundary
        sigma_N_Emn[pos1[0]] = 0

        Z = self.K_N * z_N_Emn
        X = self.gamma_N * alpha_N_Emn * H2

        return sigma_N_Emn, Z, X, Y_N

    # -------------------------------------------------------------------------
    # microplane constitutive law (Tangential CSD)-(Pressure sensitive cumulative damage)
    # -------------------------------------------------------------------------
    def get_tangential_law(self, eps_T_Emna, omega_T_Emn, z_T_Emn,
                           alpha_T_Emna, eps_T_pi_Emna, sigma_N_Emn):

        E_T = self.E * (1.0 - 4 * self.nu) / \
            ((1.0 + self.nu) * (1.0 - 2 * self.nu))

        # thermo forces

        sig_pi_trial = E_T * (eps_T_Emna - eps_T_pi_Emna)

        Z = self.K_T * z_T_Emn
        X = self.gamma_T * alpha_T_Emna
        norm_1 = np.sqrt(
            np.einsum('...na,...na->...n', (sig_pi_trial - X),
                      (sig_pi_trial - X)))
        Y = 0.5 * E_T * \
            np.einsum(
                '...na,...na->...n',
                (eps_T_Emna - eps_T_pi_Emna),
                (eps_T_Emna - eps_T_pi_Emna))

        # threshold

        f = norm_1 - self.sigma_T_0 - Z + self.m_T * sigma_N_Emn

        plas_1 = f > 1e-6
        elas_1 = f < 1e-6

        delta_lamda = f / \
                      (E_T / (1.0 - omega_T_Emn) + self.gamma_T + self.K_T) * plas_1

        norm_2 = 1.0 * elas_1 + np.sqrt(
            np.einsum('...na,...na->...n', (sig_pi_trial - X),
                      (sig_pi_trial - X))) * plas_1

        eps_T_pi_Emna[..., 0] += plas_1 * delta_lamda * \
                                ((sig_pi_trial[..., 0] - X[..., 0]) /
                                 (1.0 - omega_T_Emn)) / norm_2
        eps_T_pi_Emna[..., 1] += plas_1 * delta_lamda * \
                                ((sig_pi_trial[..., 1] - X[..., 1]) /
                                 (1.0 - omega_T_Emn)) / norm_2

        eps_T_pi_Emna[..., 2] +=  plas_1 * delta_lamda * \
                                ((sig_pi_trial[..., 2] - X[..., 2]) /
                                 (1.0 - omega_T_Emn)) / norm_2

        omega_T_Emn += ((1 - omega_T_Emn) ** self.c_T) * \
                       (delta_lamda * (Y / self.S_T) ** self.r_T) * \
                       (self.sigma_T_0 / (self.sigma_T_0 + self.m_T * sigma_N_Emn)) ** self.p_T
        omega_T_Emn[...] = np.clip(omega_T_Emn, 0, 0.9999)

        alpha_T_Emna[..., 0] += plas_1 * delta_lamda * \
                               (sig_pi_trial[..., 0] - X[..., 0]) / norm_2
        alpha_T_Emna[..., 1] += plas_1 * delta_lamda * \
                               (sig_pi_trial[..., 1] - X[..., 1]) / norm_2

        alpha_T_Emna[..., 2] += plas_1 * delta_lamda * \
                               (sig_pi_trial[..., 2] - X[..., 2]) / norm_2

        z_T_Emn += delta_lamda

        sigma_T_Emna = np.einsum('...n,...na->...na', (1 - omega_T_Emn),
                                 E_T * (eps_T_Emna - eps_T_pi_Emna))

        Z = self.K_T * z_T_Emn
        X = self.gamma_T * alpha_T_Emna
        Y = 0.5 * E_T * \
            np.einsum(
                '...na,...na->...n',
                (eps_T_Emna - eps_T_pi_Emna),
                (eps_T_Emna - eps_T_pi_Emna))

        return sigma_T_Emna, Z, X, Y

    #     #-------------------------------------------------------------------------
    #     # MICROPLANE-Kinematic constraints
    #     #-------------------------------------------------------------------------

    # -------------------------------------------------

    # get the operator of the microplane normals
    _MPNN = tr.Property(depends_on='n_mp')

    @tr.cached_property
    def _get__MPNN(self):
        MPNN_nij = np.einsum('ni,nj->nij', self._MPN, self._MPN)
        return MPNN_nij

    # get the third order tangential tensor (operator) for each microplane
    _MPTT = tr.Property(depends_on='n_mp')

    @tr.cached_property
    def _get__MPTT(self):
        delta = self.DELTA
        MPTT_nijr = 0.5 * (
            np.einsum('ni,jr -> nijr', self._MPN, delta) +
            np.einsum('nj,ir -> njir', self._MPN, delta) -
            2 * np.einsum('ni,nj,nr -> nijr', self._MPN, self._MPN, self._MPN))
        return MPTT_nijr

    def _get_e_N_Emn_2(self, eps_Emab):
        # get the normal strain array for each microplane
        return np.einsum('nij,...ij->...n', self._MPNN, eps_Emab)

    def _get_e_T_Emnar_2(self, eps_Emab):
        # get the tangential strain vector array for each microplane
        MPTT_ijr = self._get__MPTT()
        return np.einsum('nija,...ij->...na', MPTT_ijr, eps_Emab)

    # ---------------------------------------------------------------------
    # Extra homogenization of damage tensor in case of two damage parameters
    # Returns the 4th order damage tensor 'beta4' using (ref. [Baz99], Eq.(63))
    # ---------------------------------------------------------------------

    def _get_beta_Emabcd_2(self, eps_Emab, omega_N_Emn, z_N_Emn, alpha_N_Emn,
                           r_N_Emn, eps_N_p_Emn, sigma_N_Emn, omega_T_Emn,
                           z_T_Emn, alpha_T_Emna, eps_T_pi_Emna):
        # Returns the 4th order damage tensor 'beta4' using
        # (cf. [Baz99], Eq.(63))

        eps_N_Emn = self._get_e_N_Emn_2(eps_Emab)
        eps_T_Emna = self._get_e_T_Emnar_2(eps_Emab)

        sigma_N_Emn, Z_n, X_n, Y_n = self.get_normal_law(
            eps_N_Emn, omega_N_Emn, z_N_Emn, alpha_N_Emn, r_N_Emn, eps_N_p_Emn)

        sigma_T_Emna, Z_T, X_T, Y_T = self.get_tangential_law(
            eps_T_Emna, omega_T_Emn, z_T_Emn, alpha_T_Emna, eps_T_pi_Emna,
            sigma_N_Emn)

        delta = self.DELTA
        beta_N = np.sqrt(1. - omega_N_Emn)
        beta_T = np.sqrt(1. - omega_T_Emn)

        beta_ijkl = np.einsum('n, ...n,ni, nj, nk, nl -> ...ijkl', self._MPW, beta_N, self._MPN, self._MPN, self._MPN,
                              self._MPN) + \
                    0.25 * (np.einsum('n, ...n,ni, nk, jl -> ...ijkl', self._MPW, beta_T, self._MPN, self._MPN, delta) +
                            np.einsum('n, ...n,ni, nl, jk -> ...ijkl', self._MPW, beta_T, self._MPN, self._MPN, delta) +
                            np.einsum('n, ...n,nj, nk, il -> ...ijkl', self._MPW, beta_T, self._MPN, self._MPN, delta) +
                            np.einsum('n, ...n,nj, nl, ik -> ...ijkl', self._MPW, beta_T, self._MPN, self._MPN, delta) -
                            4.0 * np.einsum('n, ...n, ni, nj, nk, nl -> ...ijkl', self._MPW, beta_T, self._MPN,
                                            self._MPN, self._MPN, self._MPN))

        return beta_ijkl

    DELTA = np.identity(n_D)

    # -----------------------------------------------------------
    # Integration of the (inelastic) strains for each microplane
    # -----------------------------------------------------------

    def _get_eps_p_Emab(self, eps_Emab, omega_N_Emn, z_N_Emn, alpha_N_Emn,
                        r_N_Emn, eps_N_p_Emn, omega_T_Emn, z_T_Emn,
                        alpha_T_Emna, eps_T_pi_Emna, sigma_N_Emn):
        eps_N_Emn = self._get_e_N_Emn_2(eps_Emab)
        eps_T_Emna = self._get_e_T_Emnar_2(eps_Emab)

        # plastic normal strains
        sigma_N_Emn, Z_n, X_n, Y_n = self.get_normal_law(
            eps_N_Emn, omega_N_Emn, z_N_Emn, alpha_N_Emn, r_N_Emn, eps_N_p_Emn)

        # sliding tangential strains
        sigma_T_Emna, Z_T, X_T, Y_T = self.get_tangential_law(
            eps_T_Emna, omega_T_Emn, z_T_Emn, alpha_T_Emna, eps_T_pi_Emna,
            sigma_N_Emn)

        # 2-nd order plastic (inelastic) tensor
        eps_p_Emab = (np.einsum('n,...n,na,nb->...ab', self._MPW, eps_N_p_Emn,
                                self._MPN, self._MPN) + 0.5 *
                      (np.einsum('n,...nf,na,fb->...ab', self._MPW,
                                 eps_T_pi_Emna, self._MPN, self.DELTA) +
                       np.einsum('n,...nf,nb,fa->...ab', self._MPW,
                                 eps_T_pi_Emna, self._MPN, self.DELTA)))

        return eps_p_Emab

    def get_corr_pred(self, eps_Emab, t_n1, **Eps_k):
        """Evaluation - get the corrector and predictor
        """
        # Corrector predictor computation.

        # ------------------------------------------------------------------
        # Damage tensor (4th order) using product- or sum-type symmetrization:
        # ------------------------------------------------------------------
        beta_Emabcd = self._get_beta_Emabcd_2(eps_Emab, **Eps_k)

        # ------------------------------------------------------------------
        # Damaged stiffness tensor calculated based on the damage tensor beta4:
        # ------------------------------------------------------------------

        D_Emabcd = np.einsum('...ijab, abef, ...cdef->...ijcd', beta_Emabcd,
                             self.D_abef, beta_Emabcd)

        # ----------------------------------------------------------------------
        # Return stresses (corrector) and damaged secant stiffness matrix (predictor)
        # ----------------------------------------------------------------------
        # plastic strain tensor
        eps_p_Emab = self._get_eps_p_Emab(eps_Emab, **Eps_k)

        # elastic strain tensor
        eps_e_Emab = eps_Emab - eps_p_Emab

        # calculation of the stress tensor
        sig_Emab = np.einsum('...abcd,...cd->...ab', D_Emabcd, eps_e_Emab)

        return sig_Emab, D_Emabcd