Example #1
0
 def format_cryomag(self):
     data = self.machine_data.float_data
     header = self.machine_data.float_header
     data = RockPyData(column_names=header, data=data)
     data.define_alias('m', ( 'x', 'y', 'z'))
     # data = data.append_columns('mag', data.magnitude('m'))
     self._raw_data  = {'data': data.append_columns('mag', data.magnitude('m'))}
Example #2
0
class Generic(object):
    create_logger('RockPy.series')

    def __init__(self, stype, value, unit, comment=None):
        #self.log = logging.getLogger('RockPy.series.' + type(self).__name__)
        #self.log.info('CREATING series << %s >>' % stype)
        self.stype = stype.lower()
        self.value = float(value)
        self.data = RockPyData(column_names=stype, data=value)
        self.unit = unit
        self.comment = comment

    @property
    def label(self):
        return '%.2f [%s]' % (self.value, self.unit)

    def add_value(self, type, value, unit=None):
        self.data.append_columns(column_names=type)
        self.data[type] = value

    def __repr__(self):
        return '<RockPy.series> %s, %.2f, [%s]' % (self.stype, self.value,
                                                   self.unit)

    @property
    def v(self):
        return self.value

    @property
    def u(self):
        return self.unit
Example #3
0
    def simulate(cls, sample_obj, color=None, **parameter):
        """
        return simulated instance of measurement depending on parameters
        """
        # get measurement directions in array of D,I pairs
        mdirs = parameter.get('mdirs', [[0.0, 0.0], [90.0, 0.0], [0.0, 90.0]])
        # get eigenvalues
        evals = list(parameter.get('evals', [1.0, 1.0, 1.0]))
        if len(evals) != 3:
            raise RuntimeError('got %d eigenvalues instead of 3' % len(evals))

        # get random measurement errors
        measerr = parameter.get('measerr', 0)

        # todo: normalize evals to 1?

        R = Anisotropy.createDiagonalTensor(*evals)

        #todo: also implement 1D measurement

        data = RockPyData(column_names=['d', 'i', 'x', 'y', 'z'])

        for mdir in mdirs:
            # M = R * H
            errs = [measerr * random() * 2 - measerr for i in (1,2,3)]
            measurement = np.dot(R, DIL2XYZ((mdir[0], mdir[1], 1))) + errs
            data = data.append_rows(np.hstack([np.array(mdir), measurement]))

        data.define_alias('variable', ('d', 'i'))

        mdata = {'data': data}

        return cls(sample_obj, 'anisotropy', mfile=None, mdata=mdata, machine='simulation', color=color, **parameter)
Example #4
0
    def format_vsm(self):
        """
        formats the vsm output to be compatible with backfield measurements
        :return:
        """
        data = self.machine_data.out_backfield()
        header = self.machine_data.header

        # check for IRM acquisition -> index is changed
        data_idx = 0

        if self.machine_data.measurement_header['SCRIPT'][
                'Include IRM?'] == 'Yes':
            data_idx += 1
            self.logger.info(
                'IRM acquisition measured, adding new measurement')
            irm = self.sample_obj.add_measurement(mtype='irm_acquisition',
                                                  mfile=self.mfile,
                                                  machine=self.machine)
            irm._series = self._series

        self._raw_data['remanence'] = RockPyData(column_names=['field', 'mag'],
                                                 data=data[data_idx][:,
                                                                     [0, 1]])

        if self.machine_data.measurement_header['SCRIPT'][
                'Include direct moment?'] == 'Yes':
            self._raw_data['induced'] = RockPyData(
                column_names=['field', 'mag'],
                data=data[data_idx + 1][:, [0, 2]])
        else:
            self._raw_data['induced'] = None
Example #5
0
    def calc_all_mean_results(self, filtered=False, **parameter):
        """
        Calculates the mean out of all results

        Parameters
        ----------
           filtered:
           parameter:
        """
        out = None
        for mtype in self.mtypes:
            for stype in self.mtype_stype_dict[mtype]:
                for sval in self.stype_sval_dict[stype]:
                    results = self.all_results(mtype=mtype, stype=stype, sval=sval,
                                               filtered=filtered,
                                               **parameter)

                    results.define_alias('variable', ['stype ' + stype])

                    data = np.mean(results.v, axis=0)
                    err = np.std(results.v, axis=0)
                    if not out:
                        out = RockPyData(column_names=results.column_names, data=data)
                        out.e = err.reshape(1, len(err))
                    else:
                        append = RockPyData(column_names=results.column_names, data=data)
                        append.e = err.reshape(1, len(err))
                        out = out.append_rows(data=append.data)
        self._mean_results = out
        return out
Example #6
0
 def format_jr6(self):
     data =  self.machine_data.get_data()
     data = RockPyData(column_names=['x', 'y', 'z'],
                             data=data,
                             units=['A m^2', 'A m^2', 'A m^2'])
     data.define_alias('m', ( 'x', 'y', 'z'))
     self._raw_data  = {'data': data.append_columns('mag', data.magnitude('m'))}
Example #7
0
class Generic(object):
    create_logger('RockPy.series')

    def __init__(self, stype, value, unit, comment=None):
        #self.log = logging.getLogger('RockPy.series.' + type(self).__name__)
        #self.log.info('CREATING series << %s >>' % stype)
        self.stype = stype.lower()
        self.value = float(value)
        self.data = RockPyData(column_names=stype, data=value)
        self.unit = unit
        self.comment = comment

    @property
    def label(self):
        return  '%.2f [%s]' %(self.value, self.unit)

    def add_value(self, type, value, unit=None):
        self.data.append_columns(column_names=type)
        self.data[type] = value

    def __repr__(self):
        return '<RockPy.series> %s, %.2f, [%s]' %(self.stype, self.value, self.unit)

    @property
    def v(self):
        return self.value

    @property
    def u(self):
        return self.unit
Example #8
0
    def format_vsm(self):
        data = self.machine_data.out
        header = self.machine_data.header
        segments = self.machine_data.segment_info
        aux = np.array([j for i in data for j in i])  # combine all data arrays
        a = np.array([(i, v) for i, v in enumerate(np.diff(aux, axis=0)[:, 0])])

        sign = np.sign(np.diff(aux, axis=0)[:, 1])

        threshold = 3
        zero_crossings = [i+1 for i in xrange(len(a[:, 1]) - 1)
                if a[i, 1] > 0 > a[i + 1, 1] and a[i, 1] > 0 > a[i + 2, 1]
                or a[i, 1] < 0 < a[i + 1, 1] and a[i, 1] < 0 < a[i + 2, 1]]
        zero_crossings = [0] + zero_crossings  # start with zero index
        zero_crossings += [len(aux)] # append last index

        ut = 0  # running number warming
        dt = 0  # running number cooling

        for i, v in enumerate(zero_crossings):
            if v < zero_crossings[-1]:  # prevents index Error
                if sum(a[v:zero_crossings[i + 1], 1]) < 0:  # cooling
                    name = 'cool%02i' % (ut)
                    ut += 1
                else:
                    name = 'warm%02i' % (dt)
                    dt += 1
                data = aux[v:zero_crossings[i + 1] + 1]
                rpd = RockPyData(column_names=header, data=data)
                rpd.rename_column('temperature', 'temp')
                rpd.rename_column('moment', 'mag')
                self._data.update({name: rpd})
Example #9
0
 def __init__(self, stype, value, unit, comment=None):
     #self.log = logging.getLogger('RockPy.series.' + type(self).__name__)
     #self.log.info('CREATING series << %s >>' % stype)
     self.stype = stype.lower()
     self.value = float(value)
     self.data = RockPyData(column_names=stype, data=value)
     self.unit = unit
     self.comment = comment
Example #10
0
 def format_sushibar(self):
     data = RockPyData(column_names=['temp', 'x', 'y', 'z', 'sm'],
                       data=self.machine_data.out_trm(),
                       # units=['C', 'mT', 'A m^2', 'A m^2', 'A m^2']
                       )
     data.define_alias('m', ( 'x', 'y', 'z'))
     # data = data.append_columns('mag', data.magnitude('m'))
     self._raw_data  = {'data': data.append_columns('mag', data.magnitude('m'))}
Example #11
0
    def test_add_errors(self):
        d = RockPyData(column_names=['A', 'B'])
        #d['A'].v = 1  # Attribute Error NoneType has no attribute, maybe initialize to np.nan?
        #d['B'] = 2
        #d['A'].e = 4
        #d['B'].e = 5
        d = d.append_rows([1, 2])
        #print d
        d.e = [[4, 5]]

        self.assertEqual(5., d['B'].e)
Example #12
0
    def __initialize(self):
        """
        Initialize function is called inside the __init__ function, it is also called when the object is reconstructed
        with pickle.

        :return:
        """

        # dynamical creation of entries in results data. One column for each results_* method.
        # calculation_* methods are not creating columns -> if a result is calculated a result_* method
        # has to be written
        self.result_methods = [
            i[7:] for i in dir(self) if i.startswith('result_')
            if not i.endswith('generic') if not i.endswith('result')
        ]  # search for implemented results methods

        self.results = RockPyData(
            column_names=self.result_methods,
            data=[np.nan for i in self.result_methods
                  ])  # dynamic entry creation for all available result methods

        # ## warning with calculation of results:
        # M.result_slope() -> 1.2
        # M.calculate_vds(t_min=300) -> ***
        # M.results['slope'] -> 1.2
        # M.result_slope(t_min=300) -> 0.9
        #
        # the results are stored for the calculation parameters that were used to calculate it.
        # This means calculating a different result with different parameters can lead to inconsistencies.
        # One has to be aware that comparing the two may not be useful

        # dynamically generating the calculation and standard parameters for each calculation method.
        # This just sets the values to non, the values have to be specified in the class itself
        self.calculation_methods = [
            i for i in dir(self) if i.startswith('calculate_')
            if not i.endswith('generic')
        ]
        self.calculation_parameter = {i: dict() for i in self.result_methods}
        self._standard_parameter = {
            i[10:]: None
            for i in dir(self) if i.startswith('calculate_')
            if not i.endswith('generic')
        }

        if self._series:
            for t in self._series:
                self._add_sval_to_results(t)
        # self._add_sval_to_data(t)

        self.is_normalized = False  # normalized flag for visuals, so its not normalized twize
        self.norm = None  # the actual parameters

        self._info_dict = self.__create_info_dict()
Example #13
0
    def setUp(self):
        # run before each test
        self.testdata = ((1, 2, 3, 4),
                         (1, 6, 7, 8),
                         (1, 2, 11, 12),
                         (1, 6, 55, 66))

        self.col_names = ('F', 'Mx', 'My', 'Mz')
        self.row_names = ('1.Zeile', '2.Zeile_A', '3.Zeile', '4.Zeile_A')
        self.units = ('T', 'mT', 'fT', 'pT')

        self.RPD = RockPyData(column_names=self.col_names, row_names=self.row_names, units=self.units,
                              data=self.testdata)
Example #14
0
 def format_vftb(self):
     data = self.machine_data.out_thermocurve()
     header = self.machine_data.header
     if len(data) > 2:
         print(
             'LENGTH of machine.out_thermocurve =! 2. Assuming data[0] = heating data[1] = cooling'
         )
         # self.log.warning('LENGTH of machine.out_thermocurve =! 2. Assuming data[0] = heating data[1] = cooling')
     if len(data) > 1:
         self._raw_data['up_temp'] = RockPyData(column_names=header,
                                                data=data[0])
         self._raw_data['down_temp'] = RockPyData(column_names=header,
                                                  data=data[1])
     else:
         print('LENGTH of machine.out_thermocurve < 2.')
Example #15
0
    def segment_info(self):
        segment_start_idx = [
            i for i, v in enumerate(self.raw_out)
            if v.strip().lower().startswith('segment')
            or v.strip().lower().startswith('number')
        ]
        if segment_start_idx:
            segment_numbers_idx = [
                i for i, v in enumerate(self.raw_out) if v.startswith('0')
            ]

            segment_data = [
                v.strip('\n').split(',') for i, v in enumerate(self.raw_out)
                if i in segment_numbers_idx
            ]
            sifw = [len(i) + 1 for i in segment_data[0]]
            sifw += [len(segment_data[0])]
            sifw = [sum(sifw[:i]) for i in range(len(sifw))]
            segment_data = np.array(segment_data).astype(float)
            segment_info = np.array(
                [[v[sifw[i]:sifw[i + 1]] for i in range(len(sifw) - 1)]
                 for j, v in enumerate(self.raw_out)
                 if j in segment_start_idx]).T
            segment_info = [' '.join(i) for i in segment_info]
            segment_info = np.array(
                [' '.join(j.split()).lower() for j in segment_info])
            out = RockPyData(column_names=segment_info, data=segment_data)
        else:
            out = None
        return out
Example #16
0
    def interpolate_smoothing_spline(self,
                                     y_component='mag',
                                     x_component='field',
                                     out_spline=False):
        """
        Interpolates using a smoothing spline between a x and y component
        :param y_component:
        :param x_component:
        :param out_spline:
        :return:
        """
        from scipy.interpolate import UnivariateSpline

        x_old = self._data['data'][x_component].v  #
        y_old = self._data['data'][y_component].v
        smoothing_spline = UnivariateSpline(x_old, y_old, s=1)

        if not out_spline:
            x_new = np.linspace(min(x_old), max(x_old), 100)
            y_new = smoothing_spline(x_new)
            out = RockPyData(column_names=[x_component, y_component],
                             data=np.c_[x_new, y_new])
            return out
        else:
            return smoothing_spline
Example #17
0
 def calc_all(self, **parameter):
     for sample in self.sample_list:
         label = sample.name
         sample.calc_all(**parameter)
         results = sample.results
         if self.results is None:
             self.results = RockPyData(
                 column_names=results.column_names,
                 data=results.data,
                 row_names=[label for i in results.data])
         else:
             rpdata = RockPyData(column_names=results.column_names,
                                 data=results.data,
                                 row_names=[label for i in results.data])
             self.results = self.results.append_rows(rpdata)
     return self.results
Example #18
0
    def format_cryomag(self):
        #self.log.debug('FORMATTING << %s >> raw_data for << cryomag >> data structure' % (self.mtype))

        data = self.machine_data.out_trm()
        header = self.machine_data.float_header
        self._raw_data['remanence'] = RockPyData(column_names=header,
                                                 data=data)
        self._raw_data['induced'] = None
Example #19
0
 def __init__(self, stype, value, unit, comment=None):
     #self.log = logging.getLogger('RockPy.series.' + type(self).__name__)
     #self.log.info('CREATING series << %s >>' % stype)
     self.stype = stype.lower()
     self.value = float(value)
     self.data = RockPyData(column_names=stype, data=value)
     self.unit = unit
     self.comment = comment
Example #20
0
    def get_mean_results(self,
                         mtype=None,
                         stype=None, sval=None, sval_range=None,
                         mlist=None,
                         filtered=False,
                         **parameter):
        """
        calculates all results and returns the mean

        Parameters
        ----------
           mtype: str
           stype: str
           sval: float
           sval_range: list
           mlist: list
              *optional*
           filtered: bool
              is used to specify if the filtered_data_measurement list is used to get all corresponding
              measurements. In the case of a mean measurements it genreally is not wanted to have the
              result of the mean but get the mean of the result.
              if *True* the results(Mean) will be returned
              if *False* the Mean(Results) wil be returned, filtered data will still be calculated.
        """

        if not mlist:
            mlist = self.get_measurements(mtypes=mtype,
                                          stypes=stype, svals=sval, sval_range=sval_range,
                                          filtered=filtered)

        all_results = self.all_results(mlist=mlist, **parameter)

        if 'stype' in ''.join(all_results.column_names):  # check for stype
            self.logger.warning('series/S found check if measurement list correct'
                                )

        v = np.nanmean(all_results.v, axis=0)
        errors = np.nanstd(all_results.v, axis=0)

        mean_results = RockPyData(column_names=all_results.column_names,
                                  # row_names='mean ' + '_'.join(all_results.row_names),
                                  data=v)

        mean_results.e = errors.reshape((1, len(errors)))
        return mean_results
Example #21
0
 def format_vftb(self):
     '''
     formats the output from vftb to measurement.data
     :return:
     '''
     data = self.machine_data.out_backfield()
     header = self.machine_data.header
     self._raw_data['remanence'] = RockPyData(column_names=header,
                                              data=data[0])
     self._raw_data['induced'] = None
Example #22
0
    def format_sushibar(self):
        data = self.machine_data.out_parm_spectra()
        self.af3 = RockPyData(column_names=data[1],
                              data=data[0][0],
                              units=data[2])
        self.af3.define_alias('m', ( 'x', 'y', 'z'))
        self.af3 = self.af3.append_columns('mag', self.af3.magnitude('m'))
        self.af3 = self.af3.append_columns(column_names='mean_window',
                                           data=np.array([self.af3['upper_window'].v + self.af3['lower_window'].v])[
                                                    0] / 2)

        self.data = RockPyData(column_names=data[1],
                               data=data[0][1:],
                               units=data[2])
        self.data.define_alias('m', ( 'x', 'y', 'z'))
        self.data = self.data.append_columns('mag', self.data.magnitude('m'))
        self.data = self.data.append_columns(column_names='mean_window',
                                             data=np.array([self.data['upper_window'].v + self.data['lower_window'].v])[
                                                      0] / 2)
        self.data.define_alias('variable', 'mean_window')
Example #23
0
    def format_ani(self):
        self.header = self.machine_data.header

        mdirs = self.machine_data.mdirs
        measurements = self.machine_data.data

        #do we have scalar or vectorial measurements?
        if len(measurements.flatten()) == len(mdirs):  #scalar
            data = RockPyData(column_names=['d', 'i', 'm'])
        elif len(measurements.flatten()) / len(mdirs) == 3:  #vectorial
            data = RockPyData(column_names=['d', 'i', 'x', 'y', 'z'])
        else:
            Anisotropy.logger.error("anisotropy measurements have %d components")
            return

        for idx in range(len(mdirs)):
            data = data.append_rows(np.hstack([np.array(mdirs[idx]), measurements[idx]]))

        data.define_alias('variable', ('d', 'i'))
        self._data['data'] = data
Example #24
0
 def format_vsm(self):
     """
     Formatting for viscosity Measurements from VSM machine
     """
     data = self.machine_data.out
     header = self.machine_data.header
     self._raw_data['data'] = RockPyData(
         column_names=['time', 'mag', 'field'], data=data[0][:])
     self._raw_data['data'] = self._raw_data['data'].append_columns(
         column_names=['ln_time'],
         data=np.log(self._raw_data['data']['time'].v))
Example #25
0
 def format_sushibar(self):
     data = RockPyData(column_names=['field', 'x', 'y', 'z'],
                       data=self.machine_data.out_afdemag()
                       )  # , units=['mT', 'Am2', 'Am2', 'Am2'])
     data.define_alias('m', ('x', 'y', 'z'))
     self._raw_data['data'] = data.append_columns('mag',
                                                  data.magnitude('m'))
Example #26
0
    def format_vsm(self):
        data = self.machine_data.out
        header = self.machine_data.header
        segments = self.machine_data.segment_info
        aux = np.array([j for i in data for j in i])  # combine all data arrays
        a = np.array([(i, v)
                      for i, v in enumerate(np.diff(aux, axis=0)[:, 0])])

        sign = np.sign(np.diff(aux, axis=0)[:, 1])

        threshold = 3
        zero_crossings = [
            i + 1 for i in xrange(len(a[:, 1]) - 1)
            if a[i, 1] > 0 > a[i + 1, 1] and a[i, 1] > 0 > a[i + 2, 1]
            or a[i, 1] < 0 < a[i + 1, 1] and a[i, 1] < 0 < a[i + 2, 1]
        ]
        zero_crossings = [0] + zero_crossings  # start with zero index
        zero_crossings += [len(aux)]  # append last index

        ut = 0  # running number warming
        dt = 0  # running number cooling

        for i, v in enumerate(zero_crossings):
            if v < zero_crossings[-1]:  # prevents index Error
                if sum(a[v:zero_crossings[i + 1], 1]) < 0:  # cooling
                    name = 'cool%02i' % (ut)
                    ut += 1
                else:
                    name = 'warm%02i' % (dt)
                    dt += 1
                data = aux[v:zero_crossings[i + 1] + 1]
                rpd = RockPyData(column_names=header, data=data)
                rpd.rename_column('temperature', 'temp')
                rpd.rename_column('moment', 'mag')
                self._data.update({name: rpd})
Example #27
0
 def calc_all(self, **parameter):
     for sample in self.sample_list:
         label = sample.name
         sample.calc_all(**parameter)
         results = sample.results
         if self.results is None:
             self.results = RockPyData(column_names=results.column_names,
                                       data=results.data, row_names=[label for i in results.data])
         else:
             rpdata = RockPyData(column_names=results.column_names,
                                 data=results.data, row_names=[label for i in results.data])
             self.results = self.results.append_rows(rpdata)
     return self.results
Example #28
0
    def simulate(cls, sample_obj, color=None, **parameter):
        """
        return simulated instance of measurement depending on parameters
        """
        # get measurement directions in array of D,I pairs
        mdirs = parameter.get('mdirs', [[0.0, 0.0], [90.0, 0.0], [0.0, 90.0]])
        # get eigenvalues
        evals = list(parameter.get('evals', [1.0, 1.0, 1.0]))
        if len(evals) != 3:
            raise RuntimeError('got %d eigenvalues instead of 3' % len(evals))

        # get random measurement errors
        measerr = parameter.get('measerr', 0)

        # todo: normalize evals to 1?

        R = Anisotropy.createDiagonalTensor(*evals)

        #todo: also implement 1D measurement

        data = RockPyData(column_names=['d', 'i', 'x', 'y', 'z'])

        for mdir in mdirs:
            # M = R * H
            errs = [measerr * random() * 2 - measerr for i in (1, 2, 3)]
            measurement = np.dot(R, DIL2XYZ((mdir[0], mdir[1], 1))) + errs
            data = data.append_rows(np.hstack([np.array(mdir), measurement]))

        data.define_alias('variable', ('d', 'i'))

        mdata = {'data': data}

        return cls(sample_obj,
                   'anisotropy',
                   mfile=None,
                   mdata=mdata,
                   machine='simulation',
                   color=color,
                   **parameter)
Example #29
0
    def format_vsm(self):
        """
        formats the vsm output to be compatible with backfield measurements
        :return:
        """
        data = self.machine_data.out_backfield()
        header = self.machine_data.header

        #check for IRM acquisition
        if self.machine_data.measurement_header['SCRIPT'][
                'Include IRM?'] == 'Yes':
            self._raw_data['remanence'] = RockPyData(
                column_names=['field', 'mag'], data=data[0][:, [0, 1]])
Example #30
0
 def format_cryomag(self):
     data = RockPyData(column_names=self.machine_data.float_header,
                            data=self.machine_data.get_float_data())
     if self.demag_type != 'af3':
         idx = [i for i, v in enumerate(self.machine_data.steps) if v == self.demag_type]
         data = data.filter_idx(idx)
     data.define_alias('m', ('x', 'y', 'z'))
     self._raw_data['data'] = data.append_columns('mag', data.magnitude('m'))
     self._raw_data['data'].rename_column('step', 'field')
Example #31
0
 def format_cryomag(self):
     data = self.machine_data.float_data
     header = self.machine_data.float_header
     data = RockPyData(column_names=header, data=data)
     data.define_alias('m', ('x', 'y', 'z'))
     # data = data.append_columns('mag', data.magnitude('m'))
     self._raw_data = {
         'data': data.append_columns('mag', data.magnitude('m'))
     }
Example #32
0
 def format_jr6(self):
     data = self.machine_data.get_data()
     data = RockPyData(column_names=['x', 'y', 'z'],
                       data=data,
                       units=['A m^2', 'A m^2', 'A m^2'])
     data.define_alias('m', ('x', 'y', 'z'))
     self._raw_data = {
         'data': data.append_columns('mag', data.magnitude('m'))
     }
Example #33
0
 def format_cryomag(self):
     data = RockPyData(column_names=self.machine_data.float_header,
                       data=self.machine_data.get_float_data())
     if self.demag_type != 'af3':
         idx = [
             i for i, v in enumerate(self.machine_data.steps)
             if v == self.demag_type
         ]
         data = data.filter_idx(idx)
     data.define_alias('m', ('x', 'y', 'z'))
     self._raw_data['data'] = data.append_columns('mag',
                                                  data.magnitude('m'))
     self._raw_data['data'].rename_column('step', 'field')
Example #34
0
    def format_sushibar(self):
        data = self.machine_data.out_parm_spectra()
        self.af3 = RockPyData(column_names=data[1],
                              data=data[0][0],
                              units=data[2])
        self.af3.define_alias('m', ('x', 'y', 'z'))
        self.af3 = self.af3.append_columns('mag', self.af3.magnitude('m'))
        self.af3 = self.af3.append_columns(
            column_names='mean_window',
            data=np.array(
                [self.af3['upper_window'].v + self.af3['lower_window'].v])[0] /
            2)

        self.data = RockPyData(column_names=data[1],
                               data=data[0][1:],
                               units=data[2])
        self.data.define_alias('m', ('x', 'y', 'z'))
        self.data = self.data.append_columns('mag', self.data.magnitude('m'))
        self.data = self.data.append_columns(
            column_names='mean_window',
            data=np.array([
                self.data['upper_window'].v + self.data['lower_window'].v
            ])[0] / 2)
        self.data.define_alias('variable', 'mean_window')
Example #35
0
 def format_sushibar(self):
     data = RockPyData(
         column_names=['temp', 'x', 'y', 'z', 'sm'],
         data=self.machine_data.out_trm(),
         # units=['C', 'mT', 'A m^2', 'A m^2', 'A m^2']
     )
     data.define_alias('m', ('x', 'y', 'z'))
     # data = data.append_columns('mag', data.magnitude('m'))
     self._raw_data = {
         'data': data.append_columns('mag', data.magnitude('m'))
     }
Example #36
0
    def __initialize(self):
        """
        Initialize function is called inside the __init__ function, it is also called when the object is reconstructed
        with pickle.

        :return:
        """


        # dynamical creation of entries in results data. One column for each results_* method.
        # calculation_* methods are not creating columns -> if a result is calculated a result_* method
        # has to be written
        self.result_methods = [i[7:] for i in dir(self) if i.startswith('result_') if
                               not i.endswith('generic') if
                               not i.endswith('result')]  # search for implemented results methods

        self.results = RockPyData(
            column_names=self.result_methods,
            data=[np.nan for i in self.result_methods])  # dynamic entry creation for all available result methods

        # ## warning with calculation of results:
        # M.result_slope() -> 1.2
        # M.calculate_vds(t_min=300) -> ***
        # M.results['slope'] -> 1.2
        # M.result_slope(t_min=300) -> 0.9
        #
        # the results are stored for the calculation parameters that were used to calculate it.
        # This means calculating a different result with different parameters can lead to inconsistencies.
        # One has to be aware that comparing the two may not be useful

        # dynamically generating the calculation and standard parameters for each calculation method.
        # This just sets the values to non, the values have to be specified in the class itself
        self.calculation_methods = [i for i in dir(self) if i.startswith('calculate_') if not i.endswith('generic')]
        self.calculation_parameter = {i: dict() for i in self.result_methods}
        self._standard_parameter = {i[10:]: None for i in dir(self) if i.startswith('calculate_') if
                                    not i.endswith('generic')}

        if self._series:
            for t in self._series:
                self._add_sval_to_results(t)
        # self._add_sval_to_data(t)

        self.is_normalized = False  # normalized flag for visuals, so its not normalized twize
        self.norm = None  # the actual parameters

        self._info_dict = self.__create_info_dict()
Example #37
0
    def format_ani(self):
        self.header = self.machine_data.header

        mdirs = self.machine_data.mdirs
        measurements = self.machine_data.data

        #do we have scalar or vectorial measurements?
        if len(measurements.flatten()) == len(mdirs):  #scalar
            data = RockPyData(column_names=['d', 'i', 'm'])
        elif len(measurements.flatten()) / len(mdirs) == 3:  #vectorial
            data = RockPyData(column_names=['d', 'i', 'x', 'y', 'z'])
        else:
            Anisotropy.logger.error(
                "anisotropy measurements have %d components")
            return

        for idx in range(len(mdirs)):
            data = data.append_rows(
                np.hstack([np.array(mdirs[idx]), measurements[idx]]))

        data.define_alias('variable', ('d', 'i'))
        self._data['data'] = data
Example #38
0
class Measurement(object):
    """

    HOW TO get at stuff
    ===================
    HowTo:
    ======
       stypeS
       ++++++
        want all series types (e.g. pressure, temperature...):

        as list: measurement.stypes
           print measurement.stypes
           >>> ['pressure']
        as dictionary with corresponding series as value: measurement.tdict
           print measurement.tdict
           >>> {'pressure': <RockPy.Series> pressure, 0.60, [GPa]}
        as dictionary with itself as value: measurement._self_tdict
           >>> {'pressure': {0.6: <RockPy.Measurements.thellier.Thellier object at 0x10e2ef890>}}

       svalUES
       +++++++
       want all values for any series in a measurement

        as list: measurement.svals
        print measuremtn.svals
        >>> [0.6]


    """

    logger = logging.getLogger('RockPy.MEASUREMENT')

    # @classmethod
    # def _standard_parameter(cls):
    #     return {i:{} for i in cls.result_methods}

    @classmethod
    def simulate(cls, **parameter):
        """
        pseudo abstract method that should be overridden in subclasses to return a simulated measurement
        based on given parameters
        """
        return None

    @classmethod
    def implemented_machines(cls):
        # setting implemented machines
        # looking for all subclasses of RockPy.Readin.base.Machine
        # generating a dictionary of implemented machines : {implemented out_* method : machine_class}
        implemented_machines = {
            cl.__name__.lower(): cl
            for cl in RockPy.Readin.base.Machine.__subclasses__()
        }
        return implemented_machines

    @classmethod
    def inheritors(cls):
        """
        Method that gets all children and childrens-children ... from a class

        Returns
        -------
           list
        """
        subclasses = set()
        work = [cls]
        while work:
            parent = work.pop()
            for child in parent.__subclasses__():
                if child not in subclasses:
                    subclasses.add(child)
                    work.append(child)
        return subclasses

    @classmethod
    def implemented_measurements(self):
        """
        method that dynamically creates a dictionary with "obj.name : obj" entries

        Returns
        -------
           dict
        """
        return {i.__name__.lower(): i for i in Measurement.inheritors()}

    @classmethod
    def measurement_formatters(cls):
        # measurement formatters are important!
        # if they are not inside the measurement class, the measurement has not been implemented for this machine.
        # the following machine formatters:
        # 1. looks through all implemented measurements
        # 2. for each measurement stores the machine and the applicable readin class in a dictonary

        measurement_formatters = {
            cl.__name__.lower(): {
                '_'.join(i.split('_')[1:]).lower():
                Measurement.implemented_machines()['_'.join(
                    i.split('_')[1:]).lower()]
                for i in dir(cl) if i.startswith('format_')
            }
            for cl in Measurement.inheritors()
        }
        return measurement_formatters

    @classmethod
    def get_subclass_name(cls):
        return cls.__name__

    @property
    def standard_parameter(self):
        """
        property that returns the standard calculation parameter if specified, otherwise returns an empty dictionary
        :return:
        """
        print self.__class__._standard_parameter
        if hasattr(self.__class__, '_standard_parameter'):
            return self.__class__._standard_parameter
        else:
            print 'test'
            return {}

    def __init__(self,
                 sample_obj,
                 mtype,
                 mfile,
                 machine,
                 mdata=None,
                 color=None,
                 series=None,
                 **options):
        """
           sample_obj:
           mtype:
           mfile:
           machine:
           mdata: when mdata is set, this will be directly used as measurement data without formatting from file
           color: color used for plotting if specified
           options:
        :return:
        """

        self.logger = logging.getLogger('RockPy.MEASURMENT.' +
                                        self.get_subclass_name())

        self.color = color
        self.has_data = True
        self._data = {}
        self._raw_data = {}
        self.is_initial_state = False
        self.is_mean = False  # flag for mean measurements

        if machine is not None:
            machine = machine.lower()  # for consistency in code

        if mtype is not None:
            mtype = mtype.lower()  # for consistency in code
        ''' initialize parameters '''
        self.machine_data = None  # returned data from Readin.machines()
        self.suffix = options.get('suffix', '')
        ''' initial state '''
        self.is_machine_data = None  # returned data from Readin.machines()
        self.initial_state = None
        ''' series '''
        self._series = []
        self._series_opt = series

        self.__initialize()

        if mtype in Measurement.measurement_formatters():
            self.logger.debug('MTYPE << %s >> implemented' % mtype)
            self.mtype = mtype  # set mtype

            if mdata is not None:  # we have mdata -> ignore mfile and just use that data directly
                self.logger.debug(
                    'mdata passed -> using as measurement data without formatting'
                )
                self.sample_obj = sample_obj
                self._data = mdata
                return  # done
            if machine in Measurement.measurement_formatters(
            )[mtype] or machine == 'combined':
                self.logger.debug('MACHINE << %s >> implemented' % machine)
                self.machine = machine  # set machine
                self.sample_obj = sample_obj  # set sample_obj
                if not mfile:
                    self.logger.debug(
                        'NO machine or mfile passed -> no raw_data will be generated'
                    )
                    return
                else:
                    self.mfile = mfile
                    self.import_data()
                    self.has_data = self.machine_data.has_data
                    if not self.machine_data.has_data:
                        self.logger.error(
                            'NO DATA passed: check sample name << %s >>' %
                            sample_obj.name)
            else:
                self.logger.error('UNKNOWN MACHINE: << %s >>' % machine)
                self.logger.error(
                    'most likely cause is the \"format_%s\" method is missing in the measurement << %s >>'
                    % (machine, mtype))
        else:
            self.logger.error('UNKNOWN\t MTYPE: << %s >>' % mtype)

        # dynamic data formatting
        # checks if format_'machine_name' exists. If exists it formats self.raw_data according to format_'machine_name'
        if machine == 'combined':
            pass
        elif callable(getattr(self, 'format_' + machine)):
            if self.has_data:
                self.logger.debug('FORMATTING raw data from << %s >>' %
                                  machine)
                getattr(self, 'format_' + machine)()
            else:
                self.logger.debug('NO raw data transfered << %s >>' % machine)
        else:
            self.logger.error(
                'FORMATTING raw data from << %s >> not possible, probably not implemented, yet.'
                % machine)

        # add series if provied
        # has to come past __initialize()
        if self._series_opt:
            self._add_series_from_opt()

    @property
    def m_idx(self):
        return self.sample_obj.measurements.index(self)

    @property
    def fname(self):
        """
        Returns only filename from self.file

        Returns
        -------
           str: filename from full path
        """
        return os.path.split(self.mfile)[-1]

    def __initialize(self):
        """
        Initialize function is called inside the __init__ function, it is also called when the object is reconstructed
        with pickle.

        :return:
        """

        # dynamical creation of entries in results data. One column for each results_* method.
        # calculation_* methods are not creating columns -> if a result is calculated a result_* method
        # has to be written
        self.result_methods = [
            i[7:] for i in dir(self) if i.startswith('result_')
            if not i.endswith('generic') if not i.endswith('result')
        ]  # search for implemented results methods

        self.results = RockPyData(
            column_names=self.result_methods,
            data=[np.nan for i in self.result_methods
                  ])  # dynamic entry creation for all available result methods

        # ## warning with calculation of results:
        # M.result_slope() -> 1.2
        # M.calculate_vds(t_min=300) -> ***
        # M.results['slope'] -> 1.2
        # M.result_slope(t_min=300) -> 0.9
        #
        # the results are stored for the calculation parameters that were used to calculate it.
        # This means calculating a different result with different parameters can lead to inconsistencies.
        # One has to be aware that comparing the two may not be useful

        # dynamically generating the calculation and standard parameters for each calculation method.
        # This just sets the values to non, the values have to be specified in the class itself
        self.calculation_methods = [
            i for i in dir(self) if i.startswith('calculate_')
            if not i.endswith('generic')
        ]
        self.calculation_parameter = {i: dict() for i in self.result_methods}
        self._standard_parameter = {
            i[10:]: None
            for i in dir(self) if i.startswith('calculate_')
            if not i.endswith('generic')
        }

        if self._series:
            for t in self._series:
                self._add_sval_to_results(t)
        # self._add_sval_to_data(t)

        self.is_normalized = False  # normalized flag for visuals, so its not normalized twize
        self.norm = None  # the actual parameters

        self._info_dict = self.__create_info_dict()

    def __getstate__(self):
        '''
        returned dict will be pickled
        :return:
        '''
        pickle_me = {
            k: v
            for k, v in self.__dict__.iteritems() if k in (
                'mtype',
                'machine',
                'mfile',
                'has_data',
                'machine_data',
                '_raw_data',
                '_data',
                'initial_state',
                'is_initial_state',
                'sample_obj',
                '_series_opt',
                '_series',
                'suffix',
            )
        }
        return pickle_me

    def __setstate__(self, d):
        '''
        d is unpickled data
           d:
        :return:
        '''
        self.__dict__.update(d)
        self.__initialize()

    def reset__data(self, recalc_mag=False):
        pass

    def __getattr__(self, attr):
        # print attr, self.__dict__.keys()
        if attr in self.__getattribute__('_data').keys():
            return self._data[attr]
        if attr in self.__getattribute__('result_methods'):
            return getattr(self, 'result_' + attr)().v[0]
        raise AttributeError(attr)

    def import_data(self, rtn_raw_data=None, **options):
        '''
        Importing the data from mfile and machine
           rtn_raw_data:
           options:
        :return:
        '''

        self.logger.info('IMPORTING << %s , %s >> data' %
                         (self.machine, self.mtype))

        machine = options.get('machine', self.machine)
        mtype = options.get('mtype', self.mtype)
        mfile = options.get('mfile', self.mfile)
        raw_data = self.measurement_formatters()[mtype][machine](
            mfile, self.sample_obj.name)
        if raw_data is None:
            self.logger.error(
                'IMPORTING\t did not transfer data - CHECK sample name and data file'
            )
            return
        else:
            if rtn_raw_data:
                self.logger.info('RETURNING raw_data for << %s , %s >> data' %
                                 (machine, mtype))
                return raw_data
            else:
                self.machine_data = raw_data

    def set_initial_state(
            self,
            mtype,
            mfile,
            machine,  # standard
            **options):
        """
        creates a new measurement (ISM) as initial state of base measurement (BSM).
        It dynamically calls the measurement _init_ function and assigns the created measurement to the
        self.initial_state value. It also sets a flag for the ISM to check if a measurement is a MIS.

        Parameters
        ----------
           mtype: str
              measurement type
           mfile: str
              measurement data file
           machine: str
              measurement machine
           options:
        """
        mtype = mtype.lower()
        machnine = machine.lower()

        self.logger.info(
            'CREATING << %s >> initial state measurement << %s >> data' %
            (mtype, self.mtype))
        implemented = {i.__name__.lower(): i for i in Measurement.inheritors()}

        # can only be created if the measurement is actually implemented
        if mtype in implemented:
            self.initial_state = implemented[mtype](self.sample_obj, mtype,
                                                    mfile, machine)
            self.initial_state.is_initial_state = True
            return self.initial_state
        else:
            self.logger.error('UNABLE to find measurement << %s >>' % (mtype))

    ### INFO DICTIONARY

    @property
    def info_dict(self):
        if not hasattr(self, '_info_dict'):
            self._info_dict = self.__create_info_dict()
        if not all(i in self._info_dict['series'] for i in self.series):
            self._recalc_info_dict()
        return self._info_dict

    def __create_info_dict(self):
        """
        creates all info dictionaries

        Returns
        -------
           dict
              Dictionary with a permutation of ,type, stype and sval.
        """
        d = ['stype', 'sval']
        keys = [
            '_'.join(i) for n in range(3)
            for i in itertools.permutations(d, n) if not len(i) == 0
        ]
        out = {i: {} for i in keys}
        out.update({'series': []})
        return out

    def _recalc_info_dict(self):
        """
        Re-calculates the info_dictionary for the measurement
        """
        self._info_dict = self.__create_info_dict()
        map(self.add_s2_info_dict, self.series)

    def add_s2_info_dict(self, series):
        """
        adds a measurement to the info dictionary.

        Parameters
        ----------
           series: RockPy.Series
              Series to be added to the info_dictionary
        """

        if not series in self._info_dict['series']:
            self._info_dict['stype'].setdefault(series.stype, []).append(self)
            self._info_dict['sval'].setdefault(series.value, []).append(self)

            self._info_dict['sval_stype'].setdefault(series.value, {})
            self._info_dict['sval_stype'][series.value].setdefault(
                series.stype, []).append(self)
            self._info_dict['stype_sval'].setdefault(series.stype, {})
            self._info_dict['stype_sval'][series.stype].setdefault(
                series.value, []).append(self)

            self._info_dict['series'].append(series)

    @property
    def stypes(self):
        """
        list of all stypes
        """
        out = [t.stype for t in self.series]
        return self.__sort_list_set(out)

    @property
    def svals(self):
        """
        list of all stypes
        """
        out = [t.value for t in self.series]
        return self.__sort_list_set(out)

    @property
    def stype_dict(self):
        """
        dictionary of stype: series}
        """
        out = {t.stype: t for t in self.series}
        return out

    @property
    def tdict(self):
        """
        dictionary of stype: series}
        """
        out = {t.stype: t.value for t in self.series}
        return out

    @property
    def _self_tdict(self):
        """
        dictionary of stype: {svalue: self}
        """
        out = {i.stype: {i.value: self} for i in self.series}
        return out

    @property
    def data(self):
        if self._data == {}:
            self._data = deepcopy(self._raw_data)
        return self._data

    # ## DATA RELATED
    ### Calculation and parameters

    def result_generic(self, recalc=False):
        '''
        Generic for for result implementation. Every calculation of result should be in the self.results data structure
        before calculation.
        It should then be tested if a value for it exists, and if not it should be created by calling
        _calculate_result_(result_name).

        '''
        parameter = {}

        self.calc_result(parameter, recalc)
        return self.results['generic']

    def calculate_generic(self, **parameter):
        '''
        actual calculation of the result

        :return:
        '''

        self.results['generic'] = 0

    def calculate_result(self, result, **parameter):
        """
        Helper function to dynamically call a result. Used in VisualizeV3

        Parameters
        ----------
           result:
           parameter:
        """

        if not self.has_result(result):
            self.logger.warning(
                '%s does not have result << %s >>' % self.mtype, result)
            return
        else:
            # todo figuer out why logger wrong when called from VisualizeV3
            self.logger = logging.getLogger('RockPy.MEASURMENT.' + self.mtype +
                                            '[%s]' % self.sample_obj.name)
            self.logger.info('CALCULATING << %s >>' % result)
            out = getattr(self, 'result_' + result)(**parameter)
        return out

    def calc_generic(self, **parameter):
        '''
        helper function
        actual calculation of the result

        :return:
        '''

        self.results['generic'] = 0

    def calc_result(self, parameter=None, recalc=False, force_method=None):
        '''
        Helper function:
        Calls any calculate_* function, but checks first:

            1. does this calculation method exist
            2. has it been calculated before

               NO : calculate the result

               YES: are given parameters equal to previous calculation parameters

               if YES::

                  NO : calculate result with new parameters
                  YES: return previous result

           parameter: dict
                        dictionary with parameters needed for calculation
           force_caller: not dynamically retrieved caller name.

        :return:
        '''

        caller = '_'.join(inspect.stack()[1][3].split(
            '_')[1:])  # get calling function #todo get rid of inspect

        if not parameter:  # todo streamline the generation of standard parameters
            try:
                parameter = self.standard_parameter[caller]
            except AttributeError:
                parameter = dict(caller={})
            except KeyError:
                parameter = dict(caller={})

        # get the method to be used for calculation. It is either the calling method determined by inspect
        # or the method specified with force_method
        if force_method is not None:
            method = force_method  # method for calculation if any: result_CALLER_method
        else:
            method = caller  # if CALLER = METHOD

        if callable(getattr(self, 'calculate_' +
                            method)):  # check if calculation function exists
            # check for None and replaces it with standard
            parameter = self.compare_parameters(method, parameter, recalc)

            # if results dont exist or force recalc
            if self.results[caller] is None or self.results[
                    caller] == np.nan or recalc:
                # recalc causes a forced racalculation of the result
                if recalc:
                    self.logger.debug('FORCED recalculation of << %s >>' %
                                      (method))
                else:
                    self.logger.debug(
                        'CANNOT find result << %s >> -> calculating' %
                        (method))
                getattr(self, 'calculate_' +
                        method)(**parameter)  # calling calculation method
            else:
                self.logger.debug('FOUND previous << %s >> parameters' %
                                  (method))
                if self.check_parameters(
                        caller, parameter
                ):  # are parameters equal to previous parameters
                    self.logger.debug(
                        'RESULT parameters different from previous calculation -> recalculating'
                    )
                    getattr(self, 'calculate_' + method)(
                        **parameter)  # recalculating if parameters different
                else:
                    self.logger.debug(
                        'RESULT parameters equal to previous calculation')
        else:
            self.logger.error(
                'CALCULATION of << %s >> not possible, probably not implemented, yet.'
                % method)

    def calc_all(self, **parameter):
        parameter['recalc'] = True
        for result_method in self.result_methods:
            getattr(self, 'result_' + result_method)(**parameter)
        return self.results

    def compare_parameters(self, caller, parameter, recalc):
        """
        checks if given parameter[key] is None and replaces it with standard parameter or calculation_parameter.

        e.g. calculation_generic(A=1, B=2)
             calculation_generic() # will calculate with A=1, B=2
             calculation_generic(A=3) # will calculate with A=3, B=2
             calculation_generic(A=2, recalc=True) # will calculate with A=2 B=standard_parameter['B']

           caller: str
                     name of calling function ('result_generic' should be given as 'generic')
           parameter:
                        Parameters to check
           recalc: Boolean
                     True if forced recalculation, False if not
        :return:
        """
        if not parameter: parameter = dict()

        for key, value in parameter.iteritems():
            if value is None:
                if self.calculation_parameter[caller] and not recalc:
                    parameter[key] = self.calculation_parameter[caller][key]
                else:
                    parameter[key] = self.standard_parameter[caller][key]
        return parameter

    def delete_dtype_var_val(self, dtype, var, val):
        """
        deletes step with var = var and val = val

           dtype: the step type to be deleted e.g. th
           var: the variable e.g. temperature
           val: the value of that step e.g. 500

        example: measurement.delete_step(step='th', var='temp', val=500) will delete the th step where the temperature is 500
        """
        idx = self._get_idx_dtype_var_val(dtype=dtype, var=var, val=val)
        self.data[dtype] = self.data[dtype].filter_idx(idx, invert=True)
        return self

    def check_parameters(self, caller, parameter):
        '''
        Checks if previous calculation used the same parameters, if yes returns the previous calculation
        if no calculates with new parameters

        Parameters
        ----------
           caller: str
               name of calling function ('result_generic' should be given as 'generic')
           parameter:
        Returns
        -------
           bool
              returns true is parameters are not the same
        '''
        if self.calculation_parameter[caller]:
            # parameter for new calculation
            a = []
            for key in self.calculation_parameter[caller]:
                if key in parameter:
                    a.append(parameter[key])
                else:
                    a.append(self.calculation_parameter[caller][key])
                # a = [parameter[i] for i in self.calculation_parameter[caller]]
            # get parameter values used for calculation
            b = [
                self.calculation_parameter[caller][i]
                for i in self.calculation_parameter[caller]
            ]
            if a != b:
                return True
            else:
                return False
        else:
            return True

    def has_result(self, result):
        """
        Checks if the measurement contains a certain result

        Parameters
        ----------
           result: str
              the result that should be found e.g. result='ms' would give True for 'hys' and 'backfield'
        Returns
        -------
           out: bool
              True if it has result, False if not
        """
        if result in self.result_methods:
            return True
        else:
            return False

    ### series RELATED
    def has_series(self, stype=None, sval=None):
        """
        checks if a measurement actually has a series
        :return:
        """
        if self._series and not stype:
            return True
        if self._series and self.get_series(stypes=stype, svals=sval):
            return True
        else:
            return False

    @property
    def series(self):
        if self.has_series():
            return self._series
        else:
            series = RockPy.Series(stype='none', value=np.nan, unit='')
            return [series]

    def _get_series_from_suffix(self):
        """
        takes a given suffix and extracts series data-for quick assessment. For more series control
        use add_series method.

        suffix must be given in the form of
            stype: s_value [s_unit] | next series...
        :return:
        """
        if self.suffix:
            s_type = self.suffix.split(':')[0]
            if len(s_type) > 1:
                s_value = float(self.suffix.split()[1])
                try:
                    s_unit = self.suffix.split('[')[1].strip(']')
                except IndexError:
                    s_unit = None
                return s_type, s_value, s_unit
        else:
            return None

    def _add_series_from_opt(self):
        """
        Takes series specified in options and adds them to self.series
        :return:
        """
        series = self._get_series_from_opt()
        for t in series:
            self.add_sval(stype=t[0], sval=t[1], unit=t[2])

    def _get_series_from_opt(self):
        """
        creates a list of series from the series option

        e.g. Pressure_1_GPa;Temp_200_C
        :return:
        """
        if self._series_opt:
            series = self._series_opt.replace(' ', '').replace(',', '.').split(
                ';')  # split ; for multiple series
            series = [i.split('_')
                      for i in series]  # split , for type, value, unit
            for i in series:
                try:
                    i[1] = float(i[1])
                except:
                    raise TypeError('%s can not be converted to float' % i)
        else:
            series = None
        return series

    def get_series(self, stypes=None, svals=None):
        """
        searches for given stypes and svals in self.series and returns them

        Parameters
        ----------
           stypes: list, str
              stype or stypes to be looked up
           svals: float
              sval or svals to be looked up

        Returns
        """
        out = self.series
        if stypes:
            stypes = to_list(stypes)
            out = [i for i in out if i.stype in stypes]
        if svals:
            svals = to_list(svals)
            out = [i for i in out if i.value in svals]
        return out

    def add_sval(self,
                 stype=None,
                 sval=None,
                 unit=None,
                 series_obj=None,
                 comment=''):
        """
        adds a series to measurement.series, then adds is to the data and results datastructure
        
        Parameters
        ----------
           stype: str
              series type to be added
           sval: float or int
              series value to be added
           unit: str
              unit to be added. can be None #todo change so it uses Pint
           comment: str
              adds a comment to the series

        Returns
        -------
           RockPy.Series instance
        """

        if series_obj:
            series = series_obj
        else:
            series = RockPy.Series(stype=stype,
                                   value=sval,
                                   unit=unit,
                                   comment=comment)
        self._series.append(series)
        self._add_sval_to_data(series)
        self._add_sval_to_results(series)
        self.sample_obj.add_series2_mdict(series=series, mobj=self)
        return series

    def _add_sval_to_data(self, sobj):
        """
        Adds stype as a column and adds svals to data. Only if stype != none.

        Parameter
        ---------
           sobj: series instance
        """
        if sobj.stype != 'none':
            for dtype in self._raw_data:
                if self._raw_data[dtype]:
                    data = np.ones(len(
                        self.data[dtype]['variable'].v)) * sobj.value
                    if not 'stype ' + sobj.stype in self.data[
                            dtype].column_names:
                        self.data[dtype] = self.data[dtype].append_columns(
                            column_names='stype ' + sobj.stype,
                            data=data)  # , unit=sobj.unit) #todo add units

    def _add_sval_to_results(self, sobj):
        """
        Adds the stype as a column and the value as value to the results. Only if stype != none.

        Parameter
        ---------
           sobj: series instance
        """
        if sobj.stype != 'none':
            # data = np.ones(len(self.results['variable'].v)) * sobj.value
            if not 'stype ' + sobj.stype in self.results.column_names:
                self.results = self.results.append_columns(
                    column_names='stype ' + sobj.stype,
                    data=[sobj.value])  # , unit=sobj.unit) #todo add units

    def __sort_list_set(self, values):
        """
        returns a sorted list of non duplicate values
           values:
        :return:
        """
        return sorted(list(set(values)))

    def _get_idx_dtype_var_val(self, dtype, var, val, *args):
        """
        returns the index of the closest value with the variable(var) and the step(step) to the value(val)

        option: inverse:
           returns all indices except this one

        """
        out = [np.argmin(abs(self.data[dtype][var].v - val))]
        return out

    """
    Normalize functions
    +++++++++++++++++++
    """

    def normalize(self,
                  reference='data',
                  ref_dtype='mag',
                  norm_dtypes='all',
                  vval=None,
                  norm_method='max',
                  norm_factor=None,
                  normalize_variable=False,
                  dont_normalize=None,
                  norm_initial_state=True):
        """
        normalizes all available data to reference value, using norm_method

        Parameter
        ---------
           reference: str
              reference state, to which to normalize to e.g. 'NRM'
              also possible to normalize to mass
           ref_dtype: str
              component of the reference, if applicable. standard - 'mag'
           norm_dtypes: list
              dtype to be normalized, if dtype = 'all' all variables will be normalized
           vval: float
              variable value, if reference == value then it will search for the point closest to the vval
           norm_method: str
              how the norm_factor is generated, could be min
           normalize_variable: bool
              if True, variable is also normalized
              default: False
           dont_normalize: list
              list of dtypes that will not be normalized
              default: None
           norm_initial_state: bool
              if true, initial state values are normalized in the same manner as normal data
              default: True
        """
        # todo normalize by results
        #getting normalization factor
        if not norm_factor:  # if norm_factor specified
            norm_factor = self._get_norm_factor(reference, ref_dtype, vval,
                                                norm_method)

        norm_dtypes = _to_tuple(norm_dtypes)  # make sure its a list/tuple
        for dtype, dtype_data in self.data.iteritems(
        ):  #cycling through all dtypes in data
            if dtype_data:  #if dtype_data == None
                if 'all' in norm_dtypes:  # if all, all non stype data will be normalized
                    norm_dtypes = [
                        i for i in dtype_data.column_names if not 'stype' in i
                    ]

                ### DO not normalize:
                # variable
                if not normalize_variable:
                    variable = dtype_data.column_names[
                        dtype_data.column_dict['variable'][0]]
                    norm_dtypes = [i for i in norm_dtypes if not i == variable]

                if dont_normalize:
                    dont_normalize = _to_tuple(dont_normalize)
                    norm_dtypes = [
                        i for i in norm_dtypes if not i in dont_normalize
                    ]

                for ntype in norm_dtypes:  #else use norm_dtypes specified
                    try:
                        dtype_data[ntype] = dtype_data[ntype].v / norm_factor
                    except KeyError:
                        self.logger.warning(
                            'CAN\'T normalize << %s, %s >> to %s' %
                            (self.sample_obj.name, self.mtype, ntype))

                if 'mag' in dtype_data.column_names:
                    try:
                        self.data[dtype]['mag'] = self.data[dtype].magnitude(
                            ('x', 'y', 'z'))
                    except:
                        self.logger.debug(
                            'no (x,y,z) data found keeping << mag >>')

        if self.initial_state and norm_initial_state:
            for dtype, dtype_rpd in self.initial_state.data.iteritems():
                self.initial_state.data[dtype] = dtype_rpd / norm_factor
                if 'mag' in self.initial_state.data[dtype].column_names:
                    self.initial_state.data[dtype][
                        'mag'] = self.initial_state.data[dtype].magnitude(
                            ('x', 'y', 'z'))
        return self

    def _get_norm_factor(self, reference, rtype, vval, norm_method):
        """
        Calculates the normalization factor from the data according to specified input

        Parameter
        ---------
           reference: str
              the type of data to be referenced. e.g. 'NRM' -> norm_factor will be calculated from self.data['NRM']
              if not given, will return 1
           rtype:
           vval:
           norm_method:

        Returns
        -------
           normalization factor: float
        """
        norm_factor = 1  # inititalize

        if reference:
            if reference == 'nrm' and reference not in self.data and 'data' in self.data:
                reference = 'data'

            if reference in self.data:
                norm_factor = self._norm_method(norm_method, vval, rtype,
                                                self.data[reference])

            if reference in ['is', 'initial', 'initial_state']:
                if self.initial_state:
                    norm_factor = self._norm_method(
                        norm_method, vval, rtype,
                        self.initial_state.data['data'])
                if self.is_initial_state:
                    norm_factor = self._norm_method(norm_method, vval, rtype,
                                                    self.data['data'])

            if reference == 'mass':
                m = self.get_mtype_prior_to(mtype='mass')
                norm_factor = m.data['data']['mass'].v[0]

            if isinstance(reference, float) or isinstance(reference, int):
                norm_factor = float(reference)
        else:
            self.logger.warning(
                'NO reference specified, do not know what to normalize to.')
        return norm_factor

    def _norm_method(self, norm_method, vval, rtype, data):
        methods = {
            'max': max,
            'min': min,
            # 'val': self.get_val_from_data,
        }
        if not vval:
            if not norm_method in methods:
                raise NotImplemented('NORMALIZATION METHOD << %s >>' %
                                     norm_method)
                return
            else:
                return methods[norm_method](data[rtype].v)

        if vval:
            idx = np.argmin(abs(data['variable'].v - vval))
            out = data.filter_idx([idx])[rtype].v[0]
            return out

    def get_mtype_prior_to(self, mtype, include_parameter_m=False):
        """
        search for last mtype prior to self

        Parameters
        ----------
           mtype: str
              the type of measurement that is supposed to be returned
           include_parameter_m: bool
              if True measurements from the parameter category are also normalized. e.g. mass, volume, length...

        Returns
        -------
           RockPy.Measurement
        """

        measurements = self.sample_obj.get_measurements(mtypes=mtype, )

        if measurements:
            out = [i for i in measurements if i.m_idx <= self.m_idx]
            return out[-1]

        else:
            return None

    def _add_stype_to_results(self):
        """
        adds a column with stype stype.name to the results for each stype in measurement.series
        :return:
        """
        if self._series:
            for t in self.series:
                if t.stype:
                    if t.stype not in self.results.column_names:
                        self.results.append_columns(
                            column_names='stype ' + t.stype,
                            data=t.value,
                            # unit = t.unit      # todo add units
                        )

    def get_series_labels(self):
        out = ''
        if self.has_series():
            for series in self.series:
                if not str(series.value) + ' ' + series.unit in out:
                    out += str(series.value) + ' ' + series.unit
                    out += ' '
        return out

    """
    CORRECTIONS
    """

    def correct_dtype(self,
                      dtype='th',
                      var='variable',
                      val='last',
                      initial_state=True):
        """
        corrects the remaining moment from the last th_step

           dtype:
           var:
           val:
           initial_state: also corrects the iinitial state if one exists
        """

        try:
            calc_data = self.data[dtype]
        except KeyError:
            self.log.error('REFERENCE << %s >> can not be found ' % (dtype))

        if val == 'last':
            val = calc_data[var].v[-1]
        if val == 'first':
            val = calc_data[var].v[0]

        idx = self._get_idx_dtype_var_val(step=dtype, var=var, val=val)

        correction = self.data[dtype].filter_idx(idx)  # correction step

        for dtype in self.data:
            # calculate correction
            self._data[dtype][
                'm'] = self._data[dtype]['m'].v - correction['m'].v
            # recalc mag for safety
            self.data[dtype]['mag'] = self.data[dtype].magnitude(
                ('x', 'y', 'z'))
        self.reset__data()

        if self.initial_state and initial_state:
            for dtype in self.initial_state.data:
                self.initial_state.data[dtype]['m'] = self.initial_state.data[
                    dtype]['m'].v - correction['m'].v
                self.initial_state.data[dtype][
                    'mag'] = self.initial_state.data[dtype].magnitude(
                        ('x', 'y', 'z'))
        return self

    '''' PLOTTING ''' ''

    @property
    def plottable(self):
        """
        returns a list of all possible Visuals for this measurement
        :return:
        """
        out = {}
        for visual in RockPy.Visualize.base.Generic.inheritors():
            if visual._required == [self.mtype]:
                out.update({visual.__name__: visual})
        return out

    def show_plots(self):
        for visual in self.plottable:
            self.plottable[visual](self, show=True)

    def set_get_attr(self, attr, value=None):
        """
        checks if attribute exists, if not, creates attribute with value None
           attr:
        :return:
        """
        if not hasattr(self, attr):
            setattr(self, attr, value)
        return getattr(self, attr)
Example #39
0
 def format_vftb(self):
     data = self.machine_data.get_data()
     header = self.machine_data.header
     # self.log.debug('FORMATTING << %s >> raw_data for << VFTB >> data structure' % (self.mtype))
     self._raw_data['remanence'] = RockPyData(column_names=header,
                                              data=data[0])
Example #40
0
class Parm_Spectra(base.Measurement):
    '''
    '''

    def __init__(self, sample_obj,
                 mtype, mfile, machine,
                 mag_method='',
                 **options):
        super(Parm_Spectra, self).__init__(sample_obj,
                                           mtype, mfile, machine,
                                           **options)

    def format_sushibar(self):
        data = self.machine_data.out_parm_spectra()
        self.af3 = RockPyData(column_names=data[1],
                              data=data[0][0],
                              units=data[2])
        self.af3.define_alias('m', ( 'x', 'y', 'z'))
        self.af3 = self.af3.append_columns('mag', self.af3.magnitude('m'))
        self.af3 = self.af3.append_columns(column_names='mean_window',
                                           data=np.array([self.af3['upper_window'].v + self.af3['lower_window'].v])[
                                                    0] / 2)

        self.data = RockPyData(column_names=data[1],
                               data=data[0][1:],
                               units=data[2])
        self.data.define_alias('m', ( 'x', 'y', 'z'))
        self.data = self.data.append_columns('mag', self.data.magnitude('m'))
        self.data = self.data.append_columns(column_names='mean_window',
                                             data=np.array([self.data['upper_window'].v + self.data['lower_window'].v])[
                                                      0] / 2)
        self.data.define_alias('variable', 'mean_window')

    def _get_cumulative_data(self, subtract_af3=True):
        cumulative_data = deepcopy(self.data)

        if subtract_af3:
            cumulative_data['x'] = cumulative_data['x'].v - self.af3['x'].v
            cumulative_data['y'] = cumulative_data['y'].v - self.af3['y'].v
            cumulative_data['z'] = cumulative_data['z'].v - self.af3['z'].v
            cumulative_data['mag'] = cumulative_data.magnitude('m')

        cumulative_data['x'] = [np.sum(cumulative_data['x'].v[:i]) for i,v in enumerate(cumulative_data['x'].v)]
        cumulative_data['y'] = [np.sum(cumulative_data['y'].v[:i]) for i,v in enumerate(cumulative_data['y'].v)]
        cumulative_data['z'] = [np.sum(cumulative_data['z'].v[:i]) for i,v in enumerate(cumulative_data['z'].v)]
        cumulative_data['mag'] = cumulative_data.magnitude('m')

        return cumulative_data

    def plt_parm_spectra(self, subtract_af3=True, rtn=False, norm=False, fill=False):
        plot_data = deepcopy(self.data)

        if subtract_af3:
            plot_data['x'] = plot_data['x'].v - self.af3['x'].v
            plot_data['y'] = plot_data['y'].v - self.af3['y'].v
            plot_data['z'] = plot_data['z'].v - self.af3['z'].v
            plot_data['mag'] = plot_data.magnitude('m')

        if norm:
            norm_factor = max(plot_data['mag'].v)
        else:
            norm_factor = 1

        if fill:
            plt.fill_between(plot_data['mean_window'].v, 0, plot_data['mag'].v / norm_factor,
                             alpha=0.1,
                             label='pARM spetra')
        else:
            plt.plot(plot_data['mean_window'].v, 0, plot_data['mag'].v / norm_factor,
                     label='pARM spetra')
        if not rtn:
            plt.show()


    def plt_parm_acquisition(self, subtract_af3=True, rtn=False, norm=False):
        plot_data = self._get_cumulative_data(subtract_af3=subtract_af3)

        if norm:
            norm_factor = max(plot_data['mag'].v)
        else:
            norm_factor = 1

        plt.plot(plot_data['mean_window'].v, plot_data['mag'].v / norm_factor, label='pARM acquisition')

        if not rtn:
            plt.show()


    def plt_acq_spec(self, subtract_af3=True, norm=True):
        self.plt_parm_spectra(subtract_af3=subtract_af3, rtn=True, norm=norm, fill=True)
        self.plt_parm_acquisition(subtract_af3=subtract_af3, rtn=True, norm=norm)
        plt.xlabel('AF field [mT]')
        plt.grid()
        plt.show()
Example #41
0
class Measurement(object):
    """

    HOW TO get at stuff
    ===================
    HowTo:
    ======
       stypeS
       ++++++
        want all series types (e.g. pressure, temperature...):

        as list: measurement.stypes
           print measurement.stypes
           >>> ['pressure']
        as dictionary with corresponding series as value: measurement.tdict
           print measurement.tdict
           >>> {'pressure': <RockPy.Series> pressure, 0.60, [GPa]}
        as dictionary with itself as value: measurement._self_tdict
           >>> {'pressure': {0.6: <RockPy.Measurements.thellier.Thellier object at 0x10e2ef890>}}

       svalUES
       +++++++
       want all values for any series in a measurement

        as list: measurement.svals
        print measuremtn.svals
        >>> [0.6]


    """

    logger = logging.getLogger('RockPy.MEASUREMENT')

    # @classmethod
    # def _standard_parameter(cls):
    #     return {i:{} for i in cls.result_methods}

    @classmethod
    def simulate(cls, **parameter):
        """
        pseudo abstract method that should be overridden in subclasses to return a simulated measurement
        based on given parameters
        """
        return None

    @classmethod
    def implemented_machines(cls):
        # setting implemented machines
        # looking for all subclasses of RockPy.Readin.base.Machine
        # generating a dictionary of implemented machines : {implemented out_* method : machine_class}
        implemented_machines = {cl.__name__.lower(): cl for cl in RockPy.Readin.base.Machine.__subclasses__()}
        return implemented_machines

    @classmethod
    def inheritors(cls):
        """
        Method that gets all children and childrens-children ... from a class

        Returns
        -------
           list
        """
        subclasses = set()
        work = [cls]
        while work:
            parent = work.pop()
            for child in parent.__subclasses__():
                if child not in subclasses:
                    subclasses.add(child)
                    work.append(child)
        return subclasses

    @classmethod
    def implemented_measurements(self):
        """
        method that dynamically creates a dictionary with "obj.name : obj" entries

        Returns
        -------
           dict
        """
        return {i.__name__.lower(): i for i in Measurement.inheritors()}

    @classmethod
    def measurement_formatters(cls):
        # measurement formatters are important!
        # if they are not inside the measurement class, the measurement has not been implemented for this machine.
        # the following machine formatters:
        # 1. looks through all implemented measurements
        # 2. for each measurement stores the machine and the applicable readin class in a dictonary

        measurement_formatters = {cl.__name__.lower():
                                      {'_'.join(i.split('_')[1:]).lower():
                                           Measurement.implemented_machines()['_'.join(i.split('_')[1:]).lower()]
                                       for i in dir(cl) if i.startswith('format_')}
                                  for cl in Measurement.inheritors()}
        return measurement_formatters

    @classmethod
    def get_subclass_name(cls):
        return cls.__name__

    @property
    def standard_parameter(self):
        """
        property that returns the standard calculation parameter if specified, otherwise returns an empty dictionary
        :return:
        """
        print self.__class__._standard_parameter
        if hasattr(self.__class__, '_standard_parameter'):
            return self.__class__._standard_parameter
        else:
            print 'test'
            return {}

    def __init__(self, sample_obj,
                 mtype, mfile, machine, mdata=None, color=None,
                 series = None,
                 **options):
        """
           sample_obj:
           mtype:
           mfile:
           machine:
           mdata: when mdata is set, this will be directly used as measurement data without formatting from file
           color: color used for plotting if specified
           options:
        :return:
        """

        self.logger = logging.getLogger('RockPy.MEASURMENT.' + self.get_subclass_name())

        self.color = color
        self.has_data = True
        self._data = {}
        self._raw_data = {}
        self.is_initial_state = False
        self.is_mean = False  # flag for mean measurements

        if machine is not None:
            machine = machine.lower()  # for consistency in code

        if mtype is not None:
            mtype = mtype.lower()  # for consistency in code

        ''' initialize parameters '''
        self.machine_data = None  # returned data from Readin.machines()
        self.suffix = options.get('suffix', '')

        ''' initial state '''
        self.is_machine_data = None  # returned data from Readin.machines()
        self.initial_state = None

        ''' series '''
        self._series = []
        self._series_opt = series

        self.__initialize()

        if mtype in Measurement.measurement_formatters():
            self.logger.debug('MTYPE << %s >> implemented' % mtype)
            self.mtype = mtype  # set mtype

            if mdata is not None:  # we have mdata -> ignore mfile and just use that data directly
                self.logger.debug('mdata passed -> using as measurement data without formatting')
                self.sample_obj = sample_obj
                self._data = mdata
                return  # done
            if machine in Measurement.measurement_formatters()[mtype] or machine == 'combined':
                self.logger.debug('MACHINE << %s >> implemented' % machine)
                self.machine = machine  # set machine
                self.sample_obj = sample_obj  # set sample_obj
                if not mfile:
                    self.logger.debug('NO machine or mfile passed -> no raw_data will be generated')
                    return
                else:
                    self.mfile = mfile
                    self.import_data()
                    self.has_data = self.machine_data.has_data
                    if not self.machine_data.has_data:
                        self.logger.error('NO DATA passed: check sample name << %s >>' % sample_obj.name)
            else:
                self.logger.error('UNKNOWN MACHINE: << %s >>' % machine)
                self.logger.error(
                    'most likely cause is the \"format_%s\" method is missing in the measurement << %s >>' % (
                        machine, mtype))
        else:
            self.logger.error('UNKNOWN\t MTYPE: << %s >>' % mtype)


        # dynamic data formatting
        # checks if format_'machine_name' exists. If exists it formats self.raw_data according to format_'machine_name'
        if machine == 'combined':
            pass
        elif callable(getattr(self, 'format_' + machine)):
            if self.has_data:
                self.logger.debug('FORMATTING raw data from << %s >>' % machine)
                getattr(self, 'format_' + machine)()
            else:
                self.logger.debug('NO raw data transfered << %s >>' % machine)
        else:
            self.logger.error(
                'FORMATTING raw data from << %s >> not possible, probably not implemented, yet.' % machine)

        # add series if provied
        # has to come past __initialize()
        if self._series_opt:
            self._add_series_from_opt()

    @property
    def m_idx(self):
        return self.sample_obj.measurements.index(self)

    @property
    def fname(self):
        """
        Returns only filename from self.file

        Returns
        -------
           str: filename from full path
        """
        return os.path.split(self.mfile)[-1]

    def __initialize(self):
        """
        Initialize function is called inside the __init__ function, it is also called when the object is reconstructed
        with pickle.

        :return:
        """


        # dynamical creation of entries in results data. One column for each results_* method.
        # calculation_* methods are not creating columns -> if a result is calculated a result_* method
        # has to be written
        self.result_methods = [i[7:] for i in dir(self) if i.startswith('result_') if
                               not i.endswith('generic') if
                               not i.endswith('result')]  # search for implemented results methods

        self.results = RockPyData(
            column_names=self.result_methods,
            data=[np.nan for i in self.result_methods])  # dynamic entry creation for all available result methods

        # ## warning with calculation of results:
        # M.result_slope() -> 1.2
        # M.calculate_vds(t_min=300) -> ***
        # M.results['slope'] -> 1.2
        # M.result_slope(t_min=300) -> 0.9
        #
        # the results are stored for the calculation parameters that were used to calculate it.
        # This means calculating a different result with different parameters can lead to inconsistencies.
        # One has to be aware that comparing the two may not be useful

        # dynamically generating the calculation and standard parameters for each calculation method.
        # This just sets the values to non, the values have to be specified in the class itself
        self.calculation_methods = [i for i in dir(self) if i.startswith('calculate_') if not i.endswith('generic')]
        self.calculation_parameter = {i: dict() for i in self.result_methods}
        self._standard_parameter = {i[10:]: None for i in dir(self) if i.startswith('calculate_') if
                                    not i.endswith('generic')}

        if self._series:
            for t in self._series:
                self._add_sval_to_results(t)
        # self._add_sval_to_data(t)

        self.is_normalized = False  # normalized flag for visuals, so its not normalized twize
        self.norm = None  # the actual parameters

        self._info_dict = self.__create_info_dict()

    def __getstate__(self):
        '''
        returned dict will be pickled
        :return:
        '''
        pickle_me = {k: v for k, v in self.__dict__.iteritems() if k in
                     (
                         'mtype', 'machine', 'mfile',
                         'has_data', 'machine_data',
                         '_raw_data', '_data',
                         'initial_state', 'is_initial_state',
                         'sample_obj',
                         '_series_opt', '_series',
                         'suffix',
                     )
                     }
        return pickle_me

    def __setstate__(self, d):
        '''
        d is unpickled data
           d:
        :return:
        '''
        self.__dict__.update(d)
        self.__initialize()

    def reset__data(self, recalc_mag=False):
        pass

    def __getattr__(self, attr):
        # print attr, self.__dict__.keys()
        if attr in self.__getattribute__('_data').keys():
            return self._data[attr]
        if attr in self.__getattribute__('result_methods'):
            return getattr(self, 'result_' + attr)().v[0]
        raise AttributeError(attr)

    def import_data(self, rtn_raw_data=None, **options):
        '''
        Importing the data from mfile and machine
           rtn_raw_data:
           options:
        :return:
        '''

        self.logger.info('IMPORTING << %s , %s >> data' % (self.machine, self.mtype))

        machine = options.get('machine', self.machine)
        mtype = options.get('mtype', self.mtype)
        mfile = options.get('mfile', self.mfile)
        raw_data = self.measurement_formatters()[mtype][machine](mfile, self.sample_obj.name)
        if raw_data is None:
            self.logger.error('IMPORTING\t did not transfer data - CHECK sample name and data file')
            return
        else:
            if rtn_raw_data:
                self.logger.info('RETURNING raw_data for << %s , %s >> data' % (machine, mtype))
                return raw_data
            else:
                self.machine_data = raw_data


    def set_initial_state(self,
                          mtype, mfile, machine,  # standard
                          **options):
        """
        creates a new measurement (ISM) as initial state of base measurement (BSM).
        It dynamically calls the measurement _init_ function and assigns the created measurement to the
        self.initial_state value. It also sets a flag for the ISM to check if a measurement is a MIS.

        Parameters
        ----------
           mtype: str
              measurement type
           mfile: str
              measurement data file
           machine: str
              measurement machine
           options:
        """
        mtype = mtype.lower()
        machnine = machine.lower()

        self.logger.info('CREATING << %s >> initial state measurement << %s >> data' % (mtype, self.mtype))
        implemented = {i.__name__.lower(): i for i in Measurement.inheritors()}

        # can only be created if the measurement is actually implemented
        if mtype in implemented:
            self.initial_state = implemented[mtype](self.sample_obj, mtype, mfile, machine)
            self.initial_state.is_initial_state = True
            return self.initial_state
        else:
            self.logger.error('UNABLE to find measurement << %s >>' % (mtype))


    ### INFO DICTIONARY

    @property
    def info_dict(self):
        if not hasattr(self,'_info_dict'):
            self._info_dict = self.__create_info_dict()
        if not all( i in self._info_dict['series'] for i in self.series):
            self._recalc_info_dict()
        return self._info_dict

    def __create_info_dict(self):
        """
        creates all info dictionaries

        Returns
        -------
           dict
              Dictionary with a permutation of ,type, stype and sval.
        """
        d = ['stype', 'sval']
        keys = ['_'.join(i) for n in range(3) for i in itertools.permutations(d, n) if not len(i) == 0]
        out = {i: {} for i in keys}
        out.update({'series': []})
        return out

    def _recalc_info_dict(self):
        """
        Re-calculates the info_dictionary for the measurement
        """
        self._info_dict = self.__create_info_dict()
        map(self.add_s2_info_dict, self.series)

    def add_s2_info_dict(self, series):
        """
        adds a measurement to the info dictionary.

        Parameters
        ----------
           series: RockPy.Series
              Series to be added to the info_dictionary
        """

        if not series in self._info_dict['series']:
            self._info_dict['stype'].setdefault(series.stype, []).append(self)
            self._info_dict['sval'].setdefault(series.value, []).append(self)

            self._info_dict['sval_stype'].setdefault(series.value, {})
            self._info_dict['sval_stype'][series.value].setdefault(series.stype, []).append(self)
            self._info_dict['stype_sval'].setdefault(series.stype, {})
            self._info_dict['stype_sval'][series.stype].setdefault(series.value, []).append(self)

            self._info_dict['series'].append(series)

    @property
    def stypes(self):
        """
        list of all stypes
        """
        out = [t.stype for t in self.series]
        return self.__sort_list_set(out)

    @property
    def svals(self):
        """
        list of all stypes
        """
        out = [t.value for t in self.series]
        return self.__sort_list_set(out)

    @property
    def stype_dict(self):
        """
        dictionary of stype: series}
        """
        out = {t.stype: t for t in self.series}
        return out

    @property
    def tdict(self):
        """
        dictionary of stype: series}
        """
        out = {t.stype: t.value for t in self.series}
        return out

    @property
    def _self_tdict(self):
        """
        dictionary of stype: {svalue: self}
        """
        out = {i.stype: {i.value: self} for i in self.series}
        return out


    @property
    def data(self):
        if self._data == {}:
            self._data = deepcopy(self._raw_data)
        return self._data

    # ## DATA RELATED
    ### Calculation and parameters

    def result_generic(self, recalc=False):
        '''
        Generic for for result implementation. Every calculation of result should be in the self.results data structure
        before calculation.
        It should then be tested if a value for it exists, and if not it should be created by calling
        _calculate_result_(result_name).

        '''
        parameter = {}

        self.calc_result(parameter, recalc)
        return self.results['generic']

    def calculate_generic(self, **parameter):
        '''
        actual calculation of the result

        :return:
        '''

        self.results['generic'] = 0

    def calculate_result(self, result, **parameter):
        """
        Helper function to dynamically call a result. Used in VisualizeV3

        Parameters
        ----------
           result:
           parameter:
        """

        if not self.has_result(result):
            self.logger.warning('%s does not have result << %s >>' % self.mtype, result)
            return
        else:
            # todo figuer out why logger wrong when called from VisualizeV3
            self.logger = logging.getLogger('RockPy.MEASURMENT.' + self.mtype+'[%s]'%self.sample_obj.name)
            self.logger.info('CALCULATING << %s >>' % result)
            out = getattr(self, 'result_'+result)(**parameter)
        return out


    def calc_generic(self, **parameter):
        '''
        helper function
        actual calculation of the result

        :return:
        '''

        self.results['generic'] = 0

    def calc_result(self, parameter=None, recalc=False, force_method=None):
        '''
        Helper function:
        Calls any calculate_* function, but checks first:

            1. does this calculation method exist
            2. has it been calculated before

               NO : calculate the result

               YES: are given parameters equal to previous calculation parameters

               if YES::

                  NO : calculate result with new parameters
                  YES: return previous result

           parameter: dict
                        dictionary with parameters needed for calculation
           force_caller: not dynamically retrieved caller name.

        :return:
        '''

        caller = '_'.join(inspect.stack()[1][3].split('_')[1:])  # get calling function #todo get rid of inspect

        if not parameter: # todo streamline the generation of standard parameters
            try:
                parameter = self.standard_parameter[caller]
            except AttributeError:
                parameter = dict(caller = {})
            except KeyError:
                parameter = dict(caller = {})

        # get the method to be used for calculation. It is either the calling method determined by inspect
        # or the method specified with force_method
        if force_method is not None:
            method = force_method  # method for calculation if any: result_CALLER_method
        else:
            method = caller  # if CALLER = METHOD

        if callable(getattr(self, 'calculate_' + method)):  # check if calculation function exists
            # check for None and replaces it with standard
            parameter = self.compare_parameters(method, parameter, recalc)

            # if results dont exist or force recalc
            if self.results[caller] is None or self.results[caller] == np.nan or recalc:
                # recalc causes a forced racalculation of the result
                if recalc:
                    self.logger.debug('FORCED recalculation of << %s >>' % (method))
                else:
                    self.logger.debug('CANNOT find result << %s >> -> calculating' % (method))
                getattr(self, 'calculate_' + method)(**parameter)  # calling calculation method
            else:
                self.logger.debug('FOUND previous << %s >> parameters' % (method))
                if self.check_parameters(caller, parameter):  # are parameters equal to previous parameters
                    self.logger.debug('RESULT parameters different from previous calculation -> recalculating')
                    getattr(self, 'calculate_' + method)(**parameter)  # recalculating if parameters different
                else:
                    self.logger.debug('RESULT parameters equal to previous calculation')
        else:
            self.logger.error(
                'CALCULATION of << %s >> not possible, probably not implemented, yet.' % method)

    def calc_all(self, **parameter):
        parameter['recalc'] = True
        for result_method in self.result_methods:
            getattr(self, 'result_' + result_method)(**parameter)
        return self.results

    def compare_parameters(self, caller, parameter, recalc):
        """
        checks if given parameter[key] is None and replaces it with standard parameter or calculation_parameter.

        e.g. calculation_generic(A=1, B=2)
             calculation_generic() # will calculate with A=1, B=2
             calculation_generic(A=3) # will calculate with A=3, B=2
             calculation_generic(A=2, recalc=True) # will calculate with A=2 B=standard_parameter['B']

           caller: str
                     name of calling function ('result_generic' should be given as 'generic')
           parameter:
                        Parameters to check
           recalc: Boolean
                     True if forced recalculation, False if not
        :return:
        """
        if not parameter: parameter = dict()

        for key, value in parameter.iteritems():
            if value is None:
                if self.calculation_parameter[caller] and not recalc:
                    parameter[key] = self.calculation_parameter[caller][key]
                else:
                    parameter[key] = self.standard_parameter[caller][key]
        return parameter

    def delete_dtype_var_val(self, dtype, var, val):
        """
        deletes step with var = var and val = val

           dtype: the step type to be deleted e.g. th
           var: the variable e.g. temperature
           val: the value of that step e.g. 500

        example: measurement.delete_step(step='th', var='temp', val=500) will delete the th step where the temperature is 500
        """
        idx = self._get_idx_dtype_var_val(dtype=dtype, var=var, val=val)
        self.data[dtype] = self.data[dtype].filter_idx(idx, invert=True)
        return self

    def check_parameters(self, caller, parameter):
        '''
        Checks if previous calculation used the same parameters, if yes returns the previous calculation
        if no calculates with new parameters

        Parameters
        ----------
           caller: str
               name of calling function ('result_generic' should be given as 'generic')
           parameter:
        Returns
        -------
           bool
              returns true is parameters are not the same
        '''
        if self.calculation_parameter[caller]:
            # parameter for new calculation
            a = []
            for key in self.calculation_parameter[caller]:
                if key in parameter:
                    a.append(parameter[key])
                else:
                    a.append(self.calculation_parameter[caller][key])
                # a = [parameter[i] for i in self.calculation_parameter[caller]]
            # get parameter values used for calculation
            b = [self.calculation_parameter[caller][i] for i in self.calculation_parameter[caller]]
            if a != b:
                return True
            else:
                return False
        else:
            return True

    def has_result(self, result):
        """
        Checks if the measurement contains a certain result

        Parameters
        ----------
           result: str
              the result that should be found e.g. result='ms' would give True for 'hys' and 'backfield'
        Returns
        -------
           out: bool
              True if it has result, False if not
        """
        if result in self.result_methods:
            return True
        else:
            return False

    ### series RELATED
    def has_series(self, stype=None, sval=None):
        """
        checks if a measurement actually has a series
        :return:
        """
        if self._series and not stype:
            return True
        if self._series and self.get_series(stypes=stype, svals=sval):
            return True
        else:
            return False

    @property
    def series(self):
        if self.has_series():
            return self._series
        else:
            series = RockPy.Series(stype='none', value=np.nan, unit='')
            return [series]

    def _get_series_from_suffix(self):
        """
        takes a given suffix and extracts series data-for quick assessment. For more series control
        use add_series method.

        suffix must be given in the form of
            stype: s_value [s_unit] | next series...
        :return:
        """
        if self.suffix:
            s_type = self.suffix.split(':')[0]
            if len(s_type) > 1:
                s_value = float(self.suffix.split()[1])
                try:
                    s_unit = self.suffix.split('[')[1].strip(']')
                except IndexError:
                    s_unit = None
                return s_type, s_value, s_unit
        else:
            return None

    def _add_series_from_opt(self):
        """
        Takes series specified in options and adds them to self.series
        :return:
        """
        series = self._get_series_from_opt()
        for t in series:
            self.add_sval(stype=t[0], sval=t[1], unit=t[2])

    def _get_series_from_opt(self):
        """
        creates a list of series from the series option

        e.g. Pressure_1_GPa;Temp_200_C
        :return:
        """
        if self._series_opt:
            series = self._series_opt.replace(' ', '').replace(',', '.').split(';')  # split ; for multiple series
            series = [i.split('_') for i in series]  # split , for type, value, unit
            for i in series:
                try:
                    i[1] = float(i[1])
                except:
                    raise TypeError('%s can not be converted to float' % i)
        else:
            series = None
        return series

    def get_series(self, stypes=None, svals=None):
        """
        searches for given stypes and svals in self.series and returns them

        Parameters
        ----------
           stypes: list, str
              stype or stypes to be looked up
           svals: float
              sval or svals to be looked up

        Returns
        """
        out = self.series
        if stypes:
            stypes = to_list(stypes)
            out = [i for i in out if i.stype in stypes]
        if svals:
            svals = to_list(svals)
            out = [i for i in out if i.value in svals]
        return out

    def add_sval(self, stype=None, sval=None, unit=None, series_obj=None, comment=''):
        """
        adds a series to measurement.series, then adds is to the data and results datastructure
        
        Parameters
        ----------
           stype: str
              series type to be added
           sval: float or int
              series value to be added
           unit: str
              unit to be added. can be None #todo change so it uses Pint
           comment: str
              adds a comment to the series

        Returns
        -------
           RockPy.Series instance
        """

        if series_obj:
            series = series_obj
        else:
            series = RockPy.Series(stype=stype, value=sval, unit=unit, comment=comment)
        self._series.append(series)
        self._add_sval_to_data(series)
        self._add_sval_to_results(series)
        self.sample_obj.add_series2_mdict(series=series, mobj=self)
        return series

    def _add_sval_to_data(self, sobj):
        """
        Adds stype as a column and adds svals to data. Only if stype != none.

        Parameter
        ---------
           sobj: series instance
        """
        if sobj.stype != 'none':
            for dtype in self._raw_data:
                if self._raw_data[dtype]:
                    data = np.ones(len(self.data[dtype]['variable'].v)) * sobj.value
                    if not 'stype ' + sobj.stype in self.data[dtype].column_names:
                        self.data[dtype] = self.data[dtype].append_columns(column_names='stype ' + sobj.stype,
                                                                           data=data)  # , unit=sobj.unit) #todo add units

    def _add_sval_to_results(self, sobj):
        """
        Adds the stype as a column and the value as value to the results. Only if stype != none.

        Parameter
        ---------
           sobj: series instance
        """
        if sobj.stype != 'none':
            # data = np.ones(len(self.results['variable'].v)) * sobj.value
            if not 'stype ' + sobj.stype in self.results.column_names:
                self.results = self.results.append_columns(column_names='stype ' + sobj.stype,
                                                           data=[sobj.value])  # , unit=sobj.unit) #todo add units

    def __sort_list_set(self, values):
        """
        returns a sorted list of non duplicate values
           values:
        :return:
        """
        return sorted(list(set(values)))

    def _get_idx_dtype_var_val(self, dtype, var, val, *args):
        """
        returns the index of the closest value with the variable(var) and the step(step) to the value(val)

        option: inverse:
           returns all indices except this one

        """
        out = [np.argmin(abs(self.data[dtype][var].v - val))]
        return out

    """
    Normalize functions
    +++++++++++++++++++
    """

    def normalize(self,
                  reference='data', ref_dtype='mag', norm_dtypes='all', vval=None,
                  norm_method='max', norm_factor=None,
                  normalize_variable=False, dont_normalize=None,
                  norm_initial_state=True):
        """
        normalizes all available data to reference value, using norm_method

        Parameter
        ---------
           reference: str
              reference state, to which to normalize to e.g. 'NRM'
              also possible to normalize to mass
           ref_dtype: str
              component of the reference, if applicable. standard - 'mag'
           norm_dtypes: list
              dtype to be normalized, if dtype = 'all' all variables will be normalized
           vval: float
              variable value, if reference == value then it will search for the point closest to the vval
           norm_method: str
              how the norm_factor is generated, could be min
           normalize_variable: bool
              if True, variable is also normalized
              default: False
           dont_normalize: list
              list of dtypes that will not be normalized
              default: None
           norm_initial_state: bool
              if true, initial state values are normalized in the same manner as normal data
              default: True
        """
        # todo normalize by results
        #getting normalization factor
        if not norm_factor: # if norm_factor specified
            norm_factor = self._get_norm_factor(reference, ref_dtype, vval, norm_method)

        norm_dtypes = _to_tuple(norm_dtypes)  # make sure its a list/tuple
        for dtype, dtype_data in self.data.iteritems():  #cycling through all dtypes in data
            if dtype_data: #if dtype_data == None
                if 'all' in norm_dtypes:  # if all, all non stype data will be normalized
                    norm_dtypes = [i for i in dtype_data.column_names if not 'stype' in i]

                ### DO not normalize:
                # variable
                if not normalize_variable:
                    variable = dtype_data.column_names[dtype_data.column_dict['variable'][0]]
                    norm_dtypes = [i for i in norm_dtypes if not i == variable]

                if dont_normalize:
                    dont_normalize = _to_tuple(dont_normalize)
                    norm_dtypes = [i for i in norm_dtypes if not i in dont_normalize]

                for ntype in norm_dtypes:  #else use norm_dtypes specified
                    try:
                        dtype_data[ntype] = dtype_data[ntype].v / norm_factor
                    except KeyError:
                        self.logger.warning('CAN\'T normalize << %s, %s >> to %s' %(self.sample_obj.name, self.mtype, ntype))

                if 'mag' in dtype_data.column_names:
                    try:
                        self.data[dtype]['mag'] = self.data[dtype].magnitude(('x', 'y', 'z'))
                    except:
                        self.logger.debug('no (x,y,z) data found keeping << mag >>')

        if self.initial_state and norm_initial_state:
            for dtype, dtype_rpd in self.initial_state.data.iteritems():
                self.initial_state.data[dtype] = dtype_rpd / norm_factor
                if 'mag' in self.initial_state.data[dtype].column_names:
                    self.initial_state.data[dtype]['mag'] = self.initial_state.data[dtype].magnitude(('x', 'y', 'z'))
        return self

    def _get_norm_factor(self, reference, rtype, vval, norm_method):
        """
        Calculates the normalization factor from the data according to specified input

        Parameter
        ---------
           reference: str
              the type of data to be referenced. e.g. 'NRM' -> norm_factor will be calculated from self.data['NRM']
              if not given, will return 1
           rtype:
           vval:
           norm_method:

        Returns
        -------
           normalization factor: float
        """
        norm_factor = 1  # inititalize

        if reference:
            if reference == 'nrm' and reference not in self.data and 'data' in self.data:
                reference = 'data'

            if reference in self.data:
                norm_factor = self._norm_method(norm_method, vval, rtype, self.data[reference])

            if reference in ['is', 'initial', 'initial_state']:
                if self.initial_state:
                    norm_factor = self._norm_method(norm_method, vval, rtype, self.initial_state.data['data'])
                if self.is_initial_state:
                    norm_factor = self._norm_method(norm_method, vval, rtype, self.data['data'])

            if reference == 'mass':
                m = self.get_mtype_prior_to(mtype='mass')
                norm_factor = m.data['data']['mass'].v[0]

            if isinstance(reference, float) or isinstance(reference, int):
                norm_factor = float(reference)
        else:
            self.logger.warning('NO reference specified, do not know what to normalize to.')
        return norm_factor

    def _norm_method(self, norm_method, vval, rtype, data):
        methods = {'max': max,
                   'min': min,
                   # 'val': self.get_val_from_data,
                   }
        if not vval:
            if not norm_method in methods:
                raise NotImplemented('NORMALIZATION METHOD << %s >>' % norm_method)
                return
            else:
                return methods[norm_method](data[rtype].v)

        if vval:
            idx = np.argmin(abs(data['variable'].v - vval))
            out = data.filter_idx([idx])[rtype].v[0]
            return out

    def get_mtype_prior_to(self, mtype, include_parameter_m = False):
        """
        search for last mtype prior to self

        Parameters
        ----------
           mtype: str
              the type of measurement that is supposed to be returned
           include_parameter_m: bool
              if True measurements from the parameter category are also normalized. e.g. mass, volume, length...

        Returns
        -------
           RockPy.Measurement
        """

        measurements = self.sample_obj.get_measurements(mtypes=mtype, )

        if measurements:
            out = [i for i in measurements if i.m_idx <= self.m_idx]
            return out[-1]

        else:
            return None

    def _add_stype_to_results(self):
        """
        adds a column with stype stype.name to the results for each stype in measurement.series
        :return:
        """
        if self._series:
            for t in self.series:
                if t.stype:
                    if t.stype not in self.results.column_names:
                        self.results.append_columns(column_names='stype ' + t.stype,
                                                    data=t.value,
                                                    # unit = t.unit      # todo add units
                                                    )

    def get_series_labels(self):
        out = ''
        if self.has_series():
            for series in self.series:
                if not str(series.value) + ' ' + series.unit in out:
                    out += str(series.value) + ' ' + series.unit
                    out += ' '
        return out

    """
    CORRECTIONS
    """

    def correct_dtype(self, dtype='th', var='variable', val='last', initial_state=True):
        """
        corrects the remaining moment from the last th_step

           dtype:
           var:
           val:
           initial_state: also corrects the iinitial state if one exists
        """

        try:
            calc_data = self.data[dtype]
        except KeyError:
            self.log.error('REFERENCE << %s >> can not be found ' % (dtype))

        if val == 'last':
            val = calc_data[var].v[-1]
        if val == 'first':
            val = calc_data[var].v[0]

        idx = self._get_idx_dtype_var_val(step=dtype, var=var, val=val)

        correction = self.data[dtype].filter_idx(idx)  # correction step

        for dtype in self.data:
            # calculate correction
            self._data[dtype]['m'] = self._data[dtype]['m'].v - correction['m'].v
            # recalc mag for safety
            self.data[dtype]['mag'] = self.data[dtype].magnitude(('x', 'y', 'z'))
        self.reset__data()

        if self.initial_state and initial_state:
            for dtype in self.initial_state.data:
                self.initial_state.data[dtype]['m'] = self.initial_state.data[dtype]['m'].v - correction['m'].v
                self.initial_state.data[dtype]['mag'] = self.initial_state.data[dtype].magnitude(('x', 'y', 'z'))
        return self


    '''' PLOTTING '''''

    @property
    def plottable(self):
        """
        returns a list of all possible Visuals for this measurement
        :return:
        """
        out = {}
        for visual in RockPy.Visualize.base.Generic.inheritors():
            if visual._required == [self.mtype]:
                out.update({visual.__name__: visual})
        return out

    def show_plots(self):
        for visual in self.plottable:
            self.plottable[visual](self, show=True)

    def set_get_attr(self, attr, value=None):
        """
        checks if attribute exists, if not, creates attribute with value None
           attr:
        :return:
        """
        if not hasattr(self, attr):
            setattr(self, attr, value)
        return getattr(self, attr)
Example #42
0
class SampleGroup(object):
    """
    Container for Samples, has special calculation methods
    """
    log = logging.getLogger(__name__)

    count = 0

    def __init__(self,
                 name=None,
                 sample_list=None,
                 sample_file=None,
                 **options):
        SampleGroup.count += 1

        SampleGroup.log.info('CRATING new << samplegroup >>')

        # ## initialize
        if name is None:
            name = 'SampleGroup %04i' % (self.count)

        self.name = name
        self.samples = {}
        self.results = None

        self.color = None

        if sample_file:
            self.import_multiple_samples(sample_file, **options)

        self._info_dict = self.__create_info_dict()

        if sample_list:
            self.add_samples(sample_list)

    def __getstate__(self):
        '''
        returned dict will be pickled
        :return:
        '''
        state = {
            k: v
            for k, v in self.__dict__.iteritems()
            if k in ('name', 'samples', 'results')
        }

        return state

    def __setstate__(self, state):
        self.__dict__.update(state)
        # self.recalc_info_dict()

    def __repr__(self):
        # return super(SampleGroup, self).__repr__()
        return "<RockPy.SampleGroup - << %s - %i samples >> >" % (
            self.name, len(self.sample_names))

    def __getitem__(self, item):
        if item in self.sdict:
            return self.samples[item]
        try:
            return self.sample_list[item]
        except KeyError:
            raise KeyError('SampleGroup has no Sample << %s >>' % item)

    def import_multiple_samples(self,
                                sample_file,
                                length_unit='mm',
                                mass_unit='mg',
                                **options):
        """
        imports a csv file with sample_names masses and dimensions and creates the sample_objects
        :param sample_file:
        :param length_unit:
        :param mass_unit:
        :return:
        """
        reader_object = csv.reader(open(sample_file), delimiter='\t')
        r_list = [i for i in reader_object if not '#' in i]
        header = r_list[0]
        d_dict = {
            i[0]: {header[j].lower(): float(i[j])
                   for j in range(1, len(i))}
            for i in r_list[1:]
        }
        for sample in d_dict:
            mass = d_dict[sample].get('mass', None)
            height = d_dict[sample].get('height', None)
            diameter = d_dict[sample].get('diameter', None)
            S = Sample(sample,
                       mass=mass,
                       height=height,
                       diameter=diameter,
                       mass_unit=mass_unit,
                       length_unit=length_unit)
            self.samples.update({sample: S})

    def pop_sample(self, sample_name):
        """
        remove samples from sample_group will take str(sample_name), list(sample_name)
        """
        if not isinstance(sample_name, list):
            sample_name = [sample_name]
        for sample in sample_name:
            if sample in self.samples:
                self.samples.pop(sample)
        return self

    # ### DATA properties
    @property
    def sample_list(self):
        return self.slist

    @property
    def sample_names(self):
        return sorted(self.samples.keys())

    def add_samples(self, s_list):
        """
        Adds a sample to the sample dictionary and adds the sample_group to sample.sample_groups

        Parameters
        ----------
           s_list: single item or list
              single items get transformed to list

        Note
        ----
           Uses _item_to_list for list conversion
        """

        s_list = _to_list(s_list)
        self.samples.update(self._sdict_from_slist(s_list=s_list))
        self.log.info('ADDING sample(s) %s' % [s.name for s in s_list])
        for s in s_list:
            s.sgroups.append(self)
            self.add_s2_info_dict(s)

    def remove_samples(self, s_list):
        """
        Removes a sample from the sgroup.samples dictionary and removes the sgroup from sample.sgroups

        Parameters
        ----------
           s_list: single item or list
              single items get transformed to list

        Note
        ----
           Uses _item_to_list for list conversion
        """

        s_list = _to_list(s_list)
        for s in s_list:
            self.sdict[s].sgroups.remove(self)
            self.samples.pop(s)

    # ## components of container
    # lists
    @property
    def slist(self):
        out = [self.samples[i] for i in sorted(self.samples.keys())]
        return out

    @property
    def sdict(self):
        return {s.name: s for s in self.sample_list}

    @property
    def mtypes(self):
        out = [sample.mtypes for sample in self.sample_list]
        return self.__sort_list_set(out)

    @property
    def stypes(self):
        out = []
        for sample in self.sample_list:
            out.extend(sample.stypes)
        return self.__sort_list_set(out)

    @property
    def svals(self):
        out = []
        for sample in self.sample_list:
            out.extend(sample.svals)
        return self.__sort_list_set(out)

    # measurement: samples
    @property
    def mtype_sdict(self):
        out = {mtype: self.get_samples(mtypes=mtype) for mtype in self.mtypes}
        return out

    # mtype: stypes
    @property
    def mtype_stype_dict(self):
        """
        returns a list of tratment types within a certain measurement type
        """
        out = {}
        for mtype in self.mtypes:
            aux = []
            for s in self.get_samples(mtypes=mtype):
                for t in s.mtype_tdict[mtype]:
                    aux.extend([t.stype])
            out.update({mtype: self.__sort_list_set(aux)})
        return out

    # mtype: svals
    @property
    def mtype_svals_dict(self):
        """
        returns a list of tratment types within a certain measurement type
        """
        out = {}
        for mtype in self.mtypes:
            aux = []
            for s in self.get_samples(mtypes=mtype):
                for t in s.mtype_tdict[mtype]:
                    aux.extend([t.value])
            out.update({mtype: self.__sort_list_set(aux)})
        return out

    @property
    def stype_sval_dict(self):
        stype_sval_dict = {
            i: self._get_all_series_values(i)
            for i in self.stypes
        }
        return stype_sval_dict

    @property
    def mtype_dict(self):
        m_dict = {
            i: [m for s in self.sample_list for m in s.get_measurements(i)]
            for i in self.mtypes
        }
        return m_dict

    def _get_all_series_values(self, stype):
        return sorted(
            list(
                set([
                    n.value for j in self.sample_list for i in j.measurements
                    for n in i.series if n.stype == stype
                ])))

    @property
    def mtypes(self):
        """
        looks through all samples and returns measurement types
        """
        return sorted(
            list(
                set([
                    i.mtype for j in self.sample_list for i in j.measurements
                ])))

    @property
    def stypes(self):
        """
        looks through all samples and returns measurement types
        """
        return sorted(
            list(set([t for sample in self.sample_list
                      for t in sample.stypes])))

    def stype_results(self, **parameter):
        if not self.results:
            self.results = self.calc_all(**parameter)
        stypes = [i for i in self.results.column_names if 'stype' in i]
        out = {
            i.split()[1]: {round(j, 2): None
                           for j in self.results[i].v}
            for i in stypes
        }

        for stype in out:
            for sval in out[stype]:
                key = 'stype ' + stype
                idx = np.where(self.results[key].v == sval)[0]
                out[stype][sval] = self.results.filter_idx(idx)
        return out

    def _sdict_from_slist(self, s_list):
        """
        creates a dictionary with s.name:s for each sample in a list of samples

        Parameters
        ----------
           s_list: sample or list
        Returns
        -------
           dict
              dictionary with {sample.name : sample} for each sample in s_list

        Note
        ----
           uses _to_list for item -> list conversion
        """
        s_list = _to_list(s_list)

        out = {s.name: s for s in s_list}
        return out

    def calc_all(self, **parameter):
        for sample in self.sample_list:
            label = sample.name
            sample.calc_all(**parameter)
            results = sample.results
            if self.results is None:
                self.results = RockPyData(
                    column_names=results.column_names,
                    data=results.data,
                    row_names=[label for i in results.data])
            else:
                rpdata = RockPyData(column_names=results.column_names,
                                    data=results.data,
                                    row_names=[label for i in results.data])
                self.results = self.results.append_rows(rpdata)
        return self.results

    def average_results(self, **parameter):
        """
        makes averages of all calculations for all samples in group. Only samples with same series are averaged

        prams: parameter are calculation parameters, has to be a dictionary
        """
        substfunc = parameter.pop('substfunc', 'mean')
        out = None
        stype_results = self.stype_results(**parameter)
        for stype in stype_results:
            for sval in sorted(stype_results[stype].keys()):
                aux = stype_results[stype][sval]
                aux.define_alias('variable', 'stype ' + stype)
                aux = condense(aux, substfunc=substfunc)
                if out == None:
                    out = {stype: aux}
                else:
                    out[stype] = out[stype].append_rows(aux)
        return out

    def __add__(self, other):
        self_copy = SampleGroup(sample_list=self.sample_list)
        self_copy.samples.update(other.samples)
        return self_copy

    def _mlist_to_tdict(self, mlist):
        """
        takes a list of measurements looks for common stypes
        """
        stypes = sorted(list(set([m.stypes for m in mlist])))
        return {
            stype: [m for m in mlist if stype in m.stypes]
            for stype in stypes
        }

    def get_measurements(self,
                         snames=None,
                         mtypes=None,
                         series=None,
                         stypes=None,
                         svals=None,
                         sval_range=None,
                         mean=False,
                         invert=False,
                         **options):
        """
        Wrapper, for finding measurements, calls get_samples first and sample.get_measurements
        """
        samples = self.get_samples(snames, mtypes, stypes, svals, sval_range)
        out = []
        for sample in samples:
            try:
                out.extend(
                    sample.get_measurements(
                        mtypes=mtypes,
                        series=series,
                        stypes=stypes,
                        svals=svals,
                        sval_range=sval_range,
                        mean=mean,
                        invert=invert,
                    ))
            except TypeError:
                pass
        return out

    def delete_measurements(self,
                            sname=None,
                            mtype=None,
                            stype=None,
                            sval=None,
                            sval_range=None):
        """
        deletes measurements according to criteria
        """
        samples = self.get_samples(
            snames=sname,
            mtypes=mtype,
            stypes=stype,
            svals=sval,
            sval_range=sval_range
        )  # search for samples with measurement fitting criteria
        for sample in samples:
            sample.remove_measurements(
                mtypes=mtype, stypes=stype, svals=sval, sval_range=sval_range
            )  # individually delete measurements from samples

    def get_samples(self,
                    snames=None,
                    mtypes=None,
                    stypes=None,
                    svals=None,
                    sval_range=None):
        """
        Primary search function for all parameters

        Parameters
        ----------
           snames: list, str
              list of names or a single name of the sample to be retrieved
        """
        if svals is None:
            t_value = np.nan
        else:
            t_value = svals

        out = []

        if snames:
            snames = _to_list(snames)
            for s in snames:
                try:
                    out.append(self.samples[s])
                except KeyError:
                    raise KeyError(
                        'RockPy.sample_group does not contain sample << %s >>'
                        % s)
            if len(out) == 0:
                raise KeyError(
                    'RockPy.sample_group does not contain any samples')
                return

        else:
            out = self.sample_list

        if mtypes:
            mtypes = _to_list(mtypes)
            out = [s for s in out for mtype in mtypes if mtype in s.mtypes]

        if len(out) == 0:
            raise KeyError(
                'RockPy.sample_group does not contain sample with mtypes: << %s >>'
                % mtypes)
            return

        if stypes:
            stypes = _to_list(stypes)
            out = [s for s in out for stype in stypes if stype in s.stypes]
            if len(out) == 0:
                raise KeyError(
                    'RockPy.sample_group does not contain sample with stypes: << %s >>'
                    % stypes)
                return

        if svals:
            svals = _to_list(svals)
            out = [
                s for s in out for sval in svals for stype in stypes
                if sval in s.stype_sval_dict[stype]
            ]
            if len(out) == 0:
                self.log.error(
                    'RockPy.sample_group does not contain sample with (stypes, svals) pair: << %s, %s >>'
                    % (str(stypes), str(t_value)))
                return []

        if sval_range:
            if not isinstance(sval_range, list):
                sval_range = [0, sval_range]
            else:
                if len(sval_range) == 1:
                    sval_range = [0] + sval_range

            out = [
                s for s in out for tv in s.stype_sval_dict[stype]
                for stype in stypes if tv <= max(sval_range)
                if tv >= min(sval_range)
            ]

            if len(out) == 0:
                raise KeyError(
                    'RockPy.sample_group does not contain sample with (stypes, sval_range) pair: << %s, %.2f >>'
                    % (stypes, t_value))
                return

        if len(out) == 0:
            SampleGroup.log.error(
                'UNABLE to find sample with << %s, %s, %s, %.2f >>' %
                (snames, mtypes, stypes, t_value))

        return out

    def create_mean_sample(
        self,
        reference=None,
        ref_dtype='mag',
        vval=None,
        norm_dtypes='all',
        norm_method='max',
        interpolate=True,
        substfunc='mean',
    ):
        """
        Creates a mean sample out of all samples

        :param reference:
        :param ref_dtype:
        :param dtye:
        :param vval:
        :param norm_method:
        :param interpolate:
        :param substfunc:
        :return:
        """

        # create new sample_obj
        mean_sample = Sample(name='mean ' + self.name)
        # get all measurements from all samples in sample group and add to mean sample
        mean_sample.measurements = [
            m for s in self.sample_list for m in s.measurements
        ]
        mean_sample.populate_mdict()

        for mtype in sorted(mean_sample.mdict['mtype_stype_sval']):
            if not mtype in [
                    'mass', 'diameter', 'height', 'volume', 'x_len', 'y_len',
                    'z_len'
            ]:
                for stype in sorted(
                        mean_sample.mdict['mtype_stype_sval'][mtype]):
                    for sval in sorted(mean_sample.mdict['mtype_stype_sval']
                                       [mtype][stype]):
                        series = None  #initialize

                        # normalize if needed
                        if reference or vval:
                            for i, m in enumerate(
                                    mean_sample.mdict['mtype_stype_sval']
                                [mtype][stype][sval]):
                                m = m.normalize(reference=reference,
                                                ref_dtype=ref_dtype,
                                                norm_dtypes=norm_dtypes,
                                                vval=vval,
                                                norm_method=norm_method)
                            series = m.get_series(stypes=stype, svals=sval)[0]
                            # print m, m.series, stype, sval

                        # calculating the mean of all measurements
                        M = mean_sample.mean_measurement(
                            mtype=mtype,
                            stype=stype,
                            sval=sval,
                            substfunc=substfunc,
                            interpolate=interpolate,
                            # reference=reference, ref_dtype=ref_dtype,
                            # norm_dtypes=norm_dtypes,
                            # vval=vval, norm_method=norm_method,
                        )
                        if series:
                            M.add_sval(series_obj=series)
                        # print M.th
                        if reference or vval:
                            M.is_normalized = True
                            M.norm = [
                                reference, ref_dtype, vval, norm_method, np.nan
                            ]

                        mean_sample.mean_measurements.append(M)

        mean_sample.is_mean = True  # set is_mean flag after all measuerements are created
        return mean_sample

    def __get_variable_list(self, rpdata_list):
        out = []
        for rp in rpdata_list:
            out.extend(rp['variable'].v)
        return self.__sort_list_set(out)

    def __sort_list_set(self, values):
        """
        returns a sorted list of non duplicate values
        :param values:
        :return:
        """
        return sorted(list(set(values)))

    ''' INFODICT '''

    def __create_info_dict(self):
        """
        creates all info dictionaries

        Returns
        -------
           dict
              Dictionary with a permutation of sample ,type, stype and sval.
        """
        d = ['mtype', 'stype', 'sval']
        keys = [
            '_'.join(i) for n in range(5)
            for i in itertools.permutations(d, n) if not len(i) == 0
        ]
        out = {i: {} for i in keys}
        return out

    # @profile()
    def add_s2_info_dict(self, s):
        """
        Adds a sample to the infodict.

        Parameters
        ----------
           s: RockPySample
              The sample that should be added to the dictionary
        """

        keys = self.info_dict.keys()  # all possible keys

        for key in keys:
            # split keys into levels
            split_keys = key.split('_')
            for i, level in enumerate(split_keys):
                # i == level number, n == maximal level
                # if i == n _> last level -> list instead of dict
                n = len(split_keys) - 1

                # level 0
                for e0 in s.info_dict[key]:
                    # if only 1 level
                    if i == n == 0:
                        # create key with empty list
                        self._info_dict[key].setdefault(e0, list())
                        # add sample if not already in list
                        if not s in self._info_dict[key][e0]:
                            self._info_dict[key][e0].append(s)
                        continue
                    else:
                        # if not last entry generate key: dict() pair
                        self._info_dict[key].setdefault(e0, dict())

                    # level 1
                    for e1 in s.info_dict[key][e0]:
                        if i == n == 1:
                            self._info_dict[key][e0].setdefault(e1, list())
                            if not s in self._info_dict[key][e0][e1]:
                                self._info_dict[key][e0][e1].append(s)
                            continue
                        elif i > 0:
                            self._info_dict[key][e0].setdefault(e1, dict())

                            # level 2
                            for e2 in s.info_dict[key][e0][e1]:
                                if i == n == 2:
                                    self._info_dict[key][e0][e1].setdefault(
                                        e2, list())
                                    if not s in self._info_dict[key][e0][e1][
                                            e2]:
                                        self._info_dict[key][e0][e1][
                                            e2].append(s)
                                    continue
                                elif i > 1:
                                    self._info_dict[key][e0][e1].setdefault(
                                        e2, dict())

    def recalc_info_dict(self):
        """
        Recalculates the info_dictionary with information of all samples and their corresponding measurements

        """
        self._info_dict = self.__create_info_dict()
        map(self.add_s2_info_dict, self.slist)

    @property
    def info_dict(self):
        """
        Property for easy access of info_dict. If '_info_dict' has not been created, it will create one.
        """
        if not hasattr(self, '_info_dict'):
            self._info_dict = self.__create_info_dict()
            self.recalc_info_dict()
        return self._info_dict
Example #43
0
class TestRockPyData(TestCase):
    def setUp(self):
        # run before each test
        self.testdata = ((1, 2, 3, 4),
                         (1, 6, 7, 8),
                         (1, 2, 11, 12),
                         (1, 6, 55, 66))

        self.col_names = ('F', 'Mx', 'My', 'Mz')
        self.row_names = ('1.Zeile', '2.Zeile_A', '3.Zeile', '4.Zeile_A')
        self.units = ('T', 'mT', 'fT', 'pT')

        self.RPD = RockPyData(column_names=self.col_names, row_names=self.row_names, units=self.units,
                              data=self.testdata)

    def test_column_names(self):
        self.assertEqual(self.RPD.column_names, list(self.col_names))

    def test_column_count(self):
        self.assertEqual(self.RPD.column_count, len(self.col_names))

    def test__find_duplicate_variable_rows(self):
        # self.assertTrue((self.RPD._find_duplicate_variables()[0] == np.array([0, 1, 2])).all())
        self.assertEqual(self.RPD._find_duplicate_variable_rows(), [(0, 1, 2, 3)])

        # redefine variabe alias to the first two columns
        self.RPD.define_alias('variable', ('F', 'Mx'))
        self.assertEqual(self.RPD._find_duplicate_variable_rows(), [(0, 2), (1, 3)])

    def test_rename_column(self):
        self.RPD.rename_column('Mx', 'M_x')
        self.assertEqual(self.RPD.column_names, ['F', 'M_x', 'My', 'Mz'])

    def test_append_rows(self):
        d1 = [[5, 6, 7, 8], [9, 10, 11, 12]]
        self.RPD = self.RPD.append_rows(d1, ('5.Zeile', '6.Zeile'))
        self.assertTrue(np.array_equal(self.RPD.v[-2:, :], np.array(d1)))
        d2 = [5, 6, 7, 8]
        self.RPD = self.RPD.append_rows(d2, '5.Zeile')
        self.assertTrue(np.array_equal(self.RPD.v[-1, :], np.array(d2)))
        # lets try with other RockPyData object
        rpd = copy.deepcopy(self.RPD)
        rpd.rename_column('Mx', 'M_x')
        self.RPD = self.RPD.append_rows(rpd)
        # TODO: add assert
        #print self.RPD

    def test_delete_rows(self):
        self.RPD = self.RPD.delete_rows((0, 2))
        self.assertTrue(np.array_equal(self.RPD.v, np.array(self.testdata)[(1, 3), :]))

    def test_eliminate_duplicate_variable_rows(self):
        # check for one variable column
        self.RPD = self.RPD.eliminate_duplicate_variable_rows()
        self.assertTrue(np.array_equal(self.RPD.v, np.array([]).reshape(0, 4)))

    def test_eliminate_duplicate_variable_rows2(self):
        # check for two variable columns
        self.RPD.define_alias('variable', ('F', 'Mx'))
        rpd = self.RPD.eliminate_duplicate_variable_rows(substfunc='mean')
        self.assertTrue(np.array_equal(rpd.v, np.array([[1., 2., 7., 8.], [1., 6., 31., 37.]])))
        self.assertTrue(np.array_equal(rpd.e, np.array([[0., 0., 4., 4.], [0., 0., 24., 29.]])))
        rpd = self.RPD.eliminate_duplicate_variable_rows(substfunc='last')
        self.assertTrue(np.array_equal(rpd.v, np.array([[1., 2., 11., 12.], [1., 6., 55., 66.]])))


    def test_mean(self):
        self.RPD = self.RPD.mean()
        self.assertTrue(np.array_equal(self.RPD.v, np.array([[1., 4., 19., 22.5]])))
        np.testing.assert_allclose(self.RPD.e, np.array([[0., 2., 20.976, 25.273]]), atol=0.01)

    def test_max(self):
        self.RPD = self.RPD.max()
        self.assertTrue(np.array_equal(self.RPD.v, np.array([[1., 6., 55., 66.]])))

    def test_filter_row_names(self):
        self.assertEqual(self.RPD.filter_row_names(('1.Zeile', '3.Zeile')).row_names, ['1.Zeile', '3.Zeile'])

    def test_filter_match_row_names(self):
        # get all rows ending with '_A'
        self.assertEqual(self.RPD.filter_match_row_names('.*_A').row_names, ['2.Zeile_A', '4.Zeile_A'])

    def test_append_columns(self):
        cb = self.RPD.column_count
        d = (8, 7, 6, 5)
        self.RPD = self.RPD.append_columns('neue Spalte', d)
        self.assertEqual(cb + 1, self.RPD.column_count)
        self.assertTrue(np.array_equal(self.RPD['neue Spalte'].v, np.array(d)))

    def test_sort(self):
        self.assertTrue(np.array_equal(self.RPD.sort('Mx')['Mx'].v, np.array((2, 2, 6, 6))))

    #def test_interpolate(self):
    #    self.RPD.define_alias('variable', 'My')
    #    iv = (1, 11, 33, 55, 100)
    #    self.assertTrue(np.array_equal((self.RPD.interpolate(iv))['My'].v, np.array(iv)))
    #    self.assertTrue(np.array_equal((self.RPD.interpolate(iv))['Mx'].v[1:-1], np.array([2., 4., 6.])))


    def test_magnitude(self):
        self.RPD.define_alias('m', ('Mx', 'My', 'Mz'))
        self.RPD = self.RPD.append_columns('mag', self.RPD.magnitude('m'))
        np.testing.assert_allclose(self.RPD['mag'].v, np.array([5.38516481, 12.20655562, 16.40121947, 86.12200648]), atol=1e-5)


    def test_column_names_to_indices(self):
        self.assertEqual( self.RPD.column_names_to_indices(('Mx', 'Mz')), [1,3])

    def test_interation(self):
        # TODO: add proper assertion
        for l in self.RPD:
            #print l
            pass

    def test_add_errors(self):
        d = RockPyData(column_names=['A', 'B'])
        #d['A'].v = 1  # Attribute Error NoneType has no attribute, maybe initialize to np.nan?
        #d['B'] = 2
        #d['A'].e = 4
        #d['B'].e = 5
        d = d.append_rows([1, 2])
        #print d
        d.e = [[4, 5]]

        self.assertEqual(5., d['B'].e)

    def test_data_assignment(self):
        print self.RPD
        # set only values
        self.RPD['Mx'] = [1.1, 1.2, 1.3, 1.4]
        print self.RPD
        # set values and errors
        self.RPD['Mx'] = [[[1.1, 0.11]], [[1.2, 0.12]], [[1.3, 0.13]], [[1.4, 0.14]]]
        print self.RPD
Example #44
0
class Parm_Spectra(base.Measurement):
    '''
    '''
    def __init__(self,
                 sample_obj,
                 mtype,
                 mfile,
                 machine,
                 mag_method='',
                 **options):
        super(Parm_Spectra, self).__init__(sample_obj, mtype, mfile, machine,
                                           **options)

    def format_sushibar(self):
        data = self.machine_data.out_parm_spectra()
        self.af3 = RockPyData(column_names=data[1],
                              data=data[0][0],
                              units=data[2])
        self.af3.define_alias('m', ('x', 'y', 'z'))
        self.af3 = self.af3.append_columns('mag', self.af3.magnitude('m'))
        self.af3 = self.af3.append_columns(
            column_names='mean_window',
            data=np.array(
                [self.af3['upper_window'].v + self.af3['lower_window'].v])[0] /
            2)

        self.data = RockPyData(column_names=data[1],
                               data=data[0][1:],
                               units=data[2])
        self.data.define_alias('m', ('x', 'y', 'z'))
        self.data = self.data.append_columns('mag', self.data.magnitude('m'))
        self.data = self.data.append_columns(
            column_names='mean_window',
            data=np.array([
                self.data['upper_window'].v + self.data['lower_window'].v
            ])[0] / 2)
        self.data.define_alias('variable', 'mean_window')

    def _get_cumulative_data(self, subtract_af3=True):
        cumulative_data = deepcopy(self.data)

        if subtract_af3:
            cumulative_data['x'] = cumulative_data['x'].v - self.af3['x'].v
            cumulative_data['y'] = cumulative_data['y'].v - self.af3['y'].v
            cumulative_data['z'] = cumulative_data['z'].v - self.af3['z'].v
            cumulative_data['mag'] = cumulative_data.magnitude('m')

        cumulative_data['x'] = [
            np.sum(cumulative_data['x'].v[:i])
            for i, v in enumerate(cumulative_data['x'].v)
        ]
        cumulative_data['y'] = [
            np.sum(cumulative_data['y'].v[:i])
            for i, v in enumerate(cumulative_data['y'].v)
        ]
        cumulative_data['z'] = [
            np.sum(cumulative_data['z'].v[:i])
            for i, v in enumerate(cumulative_data['z'].v)
        ]
        cumulative_data['mag'] = cumulative_data.magnitude('m')

        return cumulative_data

    def plt_parm_spectra(self,
                         subtract_af3=True,
                         rtn=False,
                         norm=False,
                         fill=False):
        plot_data = deepcopy(self.data)

        if subtract_af3:
            plot_data['x'] = plot_data['x'].v - self.af3['x'].v
            plot_data['y'] = plot_data['y'].v - self.af3['y'].v
            plot_data['z'] = plot_data['z'].v - self.af3['z'].v
            plot_data['mag'] = plot_data.magnitude('m')

        if norm:
            norm_factor = max(plot_data['mag'].v)
        else:
            norm_factor = 1

        if fill:
            plt.fill_between(plot_data['mean_window'].v,
                             0,
                             plot_data['mag'].v / norm_factor,
                             alpha=0.1,
                             label='pARM spetra')
        else:
            plt.plot(plot_data['mean_window'].v,
                     0,
                     plot_data['mag'].v / norm_factor,
                     label='pARM spetra')
        if not rtn:
            plt.show()

    def plt_parm_acquisition(self, subtract_af3=True, rtn=False, norm=False):
        plot_data = self._get_cumulative_data(subtract_af3=subtract_af3)

        if norm:
            norm_factor = max(plot_data['mag'].v)
        else:
            norm_factor = 1

        plt.plot(plot_data['mean_window'].v,
                 plot_data['mag'].v / norm_factor,
                 label='pARM acquisition')

        if not rtn:
            plt.show()

    def plt_acq_spec(self, subtract_af3=True, norm=True):
        self.plt_parm_spectra(subtract_af3=subtract_af3,
                              rtn=True,
                              norm=norm,
                              fill=True)
        self.plt_parm_acquisition(subtract_af3=subtract_af3,
                                  rtn=True,
                                  norm=norm)
        plt.xlabel('AF field [mT]')
        plt.grid()
        plt.show()
Example #45
0
def test():
    # define some data for tutorials.rst
    testdata = ((1, 2, 3, 4),
                (2, 6, 7, 8),
                (9, 10, 11, 12))

    testdata2 = ((1, 1),
                 (2, 2),
                 (19, 3))

    # create a rockpydata object with named columns and filled with testdata
    d = RockPyData(column_names=('F', 'Mx', 'My', 'Mz'), row_names=('1.Zeile', '2.Zeile', '3.Zeile'),
                   units=('T', 'mT', 'fT', 'pT'), data=testdata)

    d_json = numpyson.dumps(d)
    print d_json
    dd = numpyson.loads(d_json)
    print repr(dd)

    print "dd:", dd

    #d = d.eliminate_duplicate_variable_rows(substfunc='last')
    #print d._find_unique_variable_rows()
    print('d:\n%s' % d)

    e = RockPyData(column_names=('F', 'Mx'), row_names=('1.Zeile', '2.Zeile', '3.Zeile'),
                   units=('T', 'mT', 'fT', 'pT'), data=testdata2)

    print('e:\n%s' % e)

    print('e+d:\n%s' % (e+d))
    print('e-d:\n%s' % (e-d))
    print('e/d:\n%s' % (e/d))
    print('e*d:\n%s' % (e*d))

    print('d/e:\n%s' % (d/e))
    print('d*e:\n%s' % (d*e))
    print('d+e:\n%s' % (d+e))
    print('d-e:\n%s' % (d-e))

    print d.units
    # define as many aliases as you want
    d.define_alias('M', ('Mx', 'My', 'Mz'))
    d.define_alias('Mzx', ('Mz', 'Mx'))

    # show some data
    # aliases 'all', 'variable' and 'dep_var' are predefined
    print('all:\n%s' % d['all'])
    print('Mzx:\n%s' % d['Mzx'])

    # lets alter some data
    d['Mx'] = np.array((13, 24, 35))

    # show M with modified Mx component
    print('M:\n%s' % d['M'])
    # show Mx
    print('Mx:\n%s' % d['Mx'])
    # we can also alter several columns at once
    d['M'] = ((2, 3, 4),
              (18, 88, 98),
              (39, 89, 99))
    print('M:\n%s' % d['M'])

    # some math fun
    # calculate magnitude of vector 'M' and save it as new column 'magM'
    d = d.append_columns('magM', d.magnitude('M'))

    # calculate values of 'magM' normalized to 100
    #d.append_columns('normM', d.normalize('magM', 100))

    # we can also add arbitrary data in a new column
    d = d.append_columns(("T",), np.array((1, 2, 3)))

    # we can also add an empty column
    d = d.append_columns(("empty",))

    # renaming a column
    d.rename_column('T', 'temp')

    # show all data again, now including magM and T as the last two columns
    print d

    # do a plot of F vs magM
    # plt.plot(d['F'], d['magM'])
    # plt.show()

    # fancy filtering of data
    tf_array = (d['Mx'].v > 10) & (d['Mx'].v < 20)
    print 'filtering:'
    filtered_d = d.filter(tf_array)
    print filtered_d['Mx']

    # arithmetic operations
    e = deepcopy(d)
    # mutlipy one column with value
    e['Mx'].v *= 2
    # calculate difference of two rockpydata objects
    #c = e - d
    #print c

    #c = e + d
    #print c

    #c = e / d
    #print c

    #c = e * d
    #print c

    #print repr(c)

    # test single line object
    l = RockPyData(column_names=('A', 'B', 'C', 'D'), row_names=('1.Zeile',),
                   units=('T', 'mT', 'fT', 'pT'), data=((1, 2, 3, 4),))
    l = l.append_columns('X', (5,))
    print l

    print l['X']

    #print d.mean()
    print d
    print d + (1, 2, 3, 4, 5, 6)

    print d.interpolate(np.arange(2, 10, .5), includesourcedata=True)
    #d.define_alias('variable', 'Mx')
    #print d.interpolate(np.arange(0, 10, .5))

    print "condense:"
    print condense([d, d*2, d*3], substfunc='median')
Example #46
0
 def format_pmd(self):
     data = RockPyData(column_names=['field', 'x', 'y', 'z'], data=self.machine_data.out_afdemag())
     data.define_alias('m', ('x', 'y', 'z'))
     self._raw_data['data'] = data.append_columns('mag', data.magnitude('m'))
Example #47
0
class SampleGroup(object):
    """
    Container for Samples, has special calculation methods
    """
    log = logging.getLogger(__name__)

    count = 0

    def __init__(self, name=None, sample_list=None, sample_file=None, **options):
        SampleGroup.count += 1

        SampleGroup.log.info('CRATING new << samplegroup >>')

        # ## initialize
        if name is None:
            name = 'SampleGroup %04i' % (self.count)

        self.name = name
        self.samples = {}
        self.results = None

        self.color = None

        if sample_file:
            self.import_multiple_samples(sample_file, **options)

        self._info_dict = self.__create_info_dict()

        if sample_list:
            self.add_samples(sample_list)

    def __getstate__(self):
        '''
        returned dict will be pickled
        :return:
        '''
        state = {k: v for k, v in self.__dict__.iteritems() if k in
                 (
                     'name',
                     'samples',
                     'results'
                 )
                 }

        return state

    def __setstate__(self, state):
        self.__dict__.update(state)
        # self.recalc_info_dict()

    def __repr__(self):
        # return super(SampleGroup, self).__repr__()
        return "<RockPy.SampleGroup - << %s - %i samples >> >" % (self.name, len(self.sample_names))

    def __getitem__(self, item):
        if item in self.sdict:
            return self.samples[item]
        try:
            return self.sample_list[item]
        except KeyError:
            raise KeyError('SampleGroup has no Sample << %s >>' % item)

    def import_multiple_samples(self, sample_file, length_unit='mm', mass_unit='mg', **options):
        """
        imports a csv file with sample_names masses and dimensions and creates the sample_objects
        :param sample_file:
        :param length_unit:
        :param mass_unit:
        :return:
        """
        reader_object = csv.reader(open(sample_file), delimiter='\t')
        r_list = [i for i in reader_object if not '#' in i]
        header = r_list[0]
        d_dict = {i[0]: {header[j].lower(): float(i[j]) for j in range(1, len(i))} for i in r_list[1:]}
        for sample in d_dict:
            mass = d_dict[sample].get('mass', None)
            height = d_dict[sample].get('height', None)
            diameter = d_dict[sample].get('diameter', None)
            S = Sample(sample, mass=mass, height=height, diameter=diameter, mass_unit=mass_unit,
                       length_unit=length_unit)
            self.samples.update({sample: S})

    def pop_sample(self, sample_name):
        """
        remove samples from sample_group will take str(sample_name), list(sample_name)
        """
        if not isinstance(sample_name, list):
            sample_name = [sample_name]
        for sample in sample_name:
            if sample in self.samples:
                self.samples.pop(sample)
        return self

    # ### DATA properties
    @property
    def sample_list(self):
        return self.slist

    @property
    def sample_names(self):
        return sorted(self.samples.keys())

    def add_samples(self, s_list):
        """
        Adds a sample to the sample dictionary and adds the sample_group to sample.sample_groups

        Parameters
        ----------
           s_list: single item or list
              single items get transformed to list

        Note
        ----
           Uses _item_to_list for list conversion
        """

        s_list = _to_list(s_list)
        self.samples.update(self._sdict_from_slist(s_list=s_list))
        self.log.info('ADDING sample(s) %s' % [s.name for s in s_list])
        for s in s_list:
            s.sgroups.append(self)
            self.add_s2_info_dict(s)

    def remove_samples(self, s_list):
        """
        Removes a sample from the sgroup.samples dictionary and removes the sgroup from sample.sgroups

        Parameters
        ----------
           s_list: single item or list
              single items get transformed to list

        Note
        ----
           Uses _item_to_list for list conversion
        """

        s_list = _to_list(s_list)
        for s in s_list:
            self.sdict[s].sgroups.remove(self)
            self.samples.pop(s)

    # ## components of container
    # lists
    @property
    def slist(self):
        out = [self.samples[i] for i in sorted(self.samples.keys())]
        return out

    @property
    def sdict(self):
        return {s.name: s for s in self.sample_list}

    @property
    def mtypes(self):
        out = [sample.mtypes for sample in self.sample_list]
        return self.__sort_list_set(out)

    @property
    def stypes(self):
        out = []
        for sample in self.sample_list:
            out.extend(sample.stypes)
        return self.__sort_list_set(out)

    @property
    def svals(self):
        out = []
        for sample in self.sample_list:
            out.extend(sample.svals)
        return self.__sort_list_set(out)

    # measurement: samples
    @property
    def mtype_sdict(self):
        out = {mtype: self.get_samples(mtypes=mtype) for mtype in self.mtypes}
        return out

    # mtype: stypes
    @property
    def mtype_stype_dict(self):
        """
        returns a list of tratment types within a certain measurement type
        """
        out = {}
        for mtype in self.mtypes:
            aux = []
            for s in self.get_samples(mtypes=mtype):
                for t in s.mtype_tdict[mtype]:
                    aux.extend([t.stype])
            out.update({mtype: self.__sort_list_set(aux)})
        return out

    # mtype: svals
    @property
    def mtype_svals_dict(self):
        """
        returns a list of tratment types within a certain measurement type
        """
        out = {}
        for mtype in self.mtypes:
            aux = []
            for s in self.get_samples(mtypes=mtype):
                for t in s.mtype_tdict[mtype]:
                    aux.extend([t.value])
            out.update({mtype: self.__sort_list_set(aux)})
        return out

    @property
    def stype_sval_dict(self):
        stype_sval_dict = {i: self._get_all_series_values(i) for i in self.stypes}
        return stype_sval_dict

    @property
    def mtype_dict(self):
        m_dict = {i: [m for s in self.sample_list for m in s.get_measurements(i)] for i in self.mtypes}
        return m_dict

    def _get_all_series_values(self, stype):
        return sorted(list(set([n.value for j in self.sample_list for i in j.measurements for n in i.series
                                if n.stype == stype])))

    @property
    def mtypes(self):
        """
        looks through all samples and returns measurement types
        """
        return sorted(list(set([i.mtype for j in self.sample_list for i in j.measurements])))

    @property
    def stypes(self):
        """
        looks through all samples and returns measurement types
        """
        return sorted(list(set([t for sample in self.sample_list for t in sample.stypes])))

    def stype_results(self, **parameter):
        if not self.results:
            self.results = self.calc_all(**parameter)
        stypes = [i for i in self.results.column_names if 'stype' in i]
        out = {i.split()[1]: {round(j, 2): None for j in self.results[i].v} for i in stypes}

        for stype in out:
            for sval in out[stype]:
                key = 'stype ' + stype
                idx = np.where(self.results[key].v == sval)[0]
                out[stype][sval] = self.results.filter_idx(idx)
        return out

    def _sdict_from_slist(self, s_list):
        """
        creates a dictionary with s.name:s for each sample in a list of samples

        Parameters
        ----------
           s_list: sample or list
        Returns
        -------
           dict
              dictionary with {sample.name : sample} for each sample in s_list

        Note
        ----
           uses _to_list for item -> list conversion
        """
        s_list = _to_list(s_list)

        out = {s.name: s for s in s_list}
        return out

    def calc_all(self, **parameter):
        for sample in self.sample_list:
            label = sample.name
            sample.calc_all(**parameter)
            results = sample.results
            if self.results is None:
                self.results = RockPyData(column_names=results.column_names,
                                          data=results.data, row_names=[label for i in results.data])
            else:
                rpdata = RockPyData(column_names=results.column_names,
                                    data=results.data, row_names=[label for i in results.data])
                self.results = self.results.append_rows(rpdata)
        return self.results

    def average_results(self, **parameter):
        """
        makes averages of all calculations for all samples in group. Only samples with same series are averaged

        prams: parameter are calculation parameters, has to be a dictionary
        """
        substfunc = parameter.pop('substfunc', 'mean')
        out = None
        stype_results = self.stype_results(**parameter)
        for stype in stype_results:
            for sval in sorted(stype_results[stype].keys()):
                aux = stype_results[stype][sval]
                aux.define_alias('variable', 'stype ' + stype)
                aux = condense(aux, substfunc=substfunc)
                if out == None:
                    out = {stype: aux}
                else:
                    out[stype] = out[stype].append_rows(aux)
        return out

    def __add__(self, other):
        self_copy = SampleGroup(sample_list=self.sample_list)
        self_copy.samples.update(other.samples)
        return self_copy

    def _mlist_to_tdict(self, mlist):
        """
        takes a list of measurements looks for common stypes
        """
        stypes = sorted(list(set([m.stypes for m in mlist])))
        return {stype: [m for m in mlist if stype in m.stypes] for stype in stypes}

    def get_measurements(self,
                         snames=None,
                         mtypes=None,
                         series=None,
                         stypes=None, svals=None, sval_range=None,
                         mean=False,
                         invert=False,
                         **options):
        """
        Wrapper, for finding measurements, calls get_samples first and sample.get_measurements
        """
        samples = self.get_samples(snames, mtypes, stypes, svals, sval_range)
        out = []
        for sample in samples:
            try:
                out.extend(sample.get_measurements(mtypes=mtypes,
                                                   series=series,
                                                   stypes=stypes, svals=svals, sval_range=sval_range,
                                                   mean=mean,
                                                   invert=invert,
                                                   ))
            except TypeError:
                pass
        return out

    def delete_measurements(self, sname=None, mtype=None, stype=None, sval=None, sval_range=None):
        """
        deletes measurements according to criteria
        """
        samples = self.get_samples(snames=sname, mtypes=mtype, stypes=stype, svals=sval,
                                   sval_range=sval_range)  # search for samples with measurement fitting criteria
        for sample in samples:
            sample.remove_measurements(mtypes=mtype, stypes=stype, svals=sval,
                                       sval_range=sval_range)  # individually delete measurements from samples

    def get_samples(self, snames=None, mtypes=None, stypes=None, svals=None, sval_range=None):
        """
        Primary search function for all parameters

        Parameters
        ----------
           snames: list, str
              list of names or a single name of the sample to be retrieved
        """
        if svals is None:
            t_value = np.nan
        else:
            t_value = svals

        out = []

        if snames:
            snames = _to_list(snames)
            for s in snames:
                try:
                    out.append(self.samples[s])
                except KeyError:
                    raise KeyError('RockPy.sample_group does not contain sample << %s >>' % s)
            if len(out) == 0:
                raise KeyError('RockPy.sample_group does not contain any samples')
                return

        else:
            out = self.sample_list

        if mtypes:
            mtypes = _to_list(mtypes)
            out = [s for s in out for mtype in mtypes if mtype in s.mtypes]

        if len(out) == 0:
            raise KeyError('RockPy.sample_group does not contain sample with mtypes: << %s >>' % mtypes)
            return

        if stypes:
            stypes = _to_list(stypes)
            out = [s for s in out for stype in stypes if stype in s.stypes]
            if len(out) == 0:
                raise KeyError('RockPy.sample_group does not contain sample with stypes: << %s >>' % stypes)
                return

        if svals:
            svals = _to_list(svals)
            out = [s for s in out for sval in svals for stype in stypes if sval in s.stype_sval_dict[stype]]
            if len(out) == 0:
                self.log.error(
                    'RockPy.sample_group does not contain sample with (stypes, svals) pair: << %s, %s >>' % (
                        str(stypes), str(t_value)))
                return []

        if sval_range:
            if not isinstance(sval_range, list):
                sval_range = [0, sval_range]
            else:
                if len(sval_range) == 1:
                    sval_range = [0] + sval_range

            out = [s for s in out for tv in s.stype_sval_dict[stype] for stype in stypes
                   if tv <= max(sval_range)
                   if tv >= min(sval_range)]

            if len(out) == 0:
                raise KeyError(
                    'RockPy.sample_group does not contain sample with (stypes, sval_range) pair: << %s, %.2f >>' % (
                        stypes, t_value))
                return

        if len(out) == 0:
            SampleGroup.log.error(
                'UNABLE to find sample with << %s, %s, %s, %.2f >>' % (snames, mtypes, stypes, t_value))

        return out

    def create_mean_sample(self,
                           reference=None,
                           ref_dtype='mag', vval=None,
                           norm_dtypes='all',
                           norm_method='max',
                           interpolate=True,
                           substfunc='mean',
                           ):
        """
        Creates a mean sample out of all samples

        :param reference:
        :param ref_dtype:
        :param dtye:
        :param vval:
        :param norm_method:
        :param interpolate:
        :param substfunc:
        :return:
        """

        # create new sample_obj
        mean_sample = Sample(name='mean ' + self.name)
        # get all measurements from all samples in sample group and add to mean sample
        mean_sample.measurements = [m for s in self.sample_list for m in s.measurements]
        mean_sample.populate_mdict()

        for mtype in sorted(mean_sample.mdict['mtype_stype_sval']):
            if not mtype in ['mass', 'diameter', 'height', 'volume', 'x_len', 'y_len', 'z_len']:
                for stype in sorted(mean_sample.mdict['mtype_stype_sval'][mtype]):
                    for sval in sorted(mean_sample.mdict['mtype_stype_sval'][mtype][stype]):
                        series = None #initialize

                        # normalize if needed
                        if reference or vval:
                            for i, m in enumerate(mean_sample.mdict['mtype_stype_sval'][mtype][stype][sval]):
                                m = m.normalize(
                                    reference=reference, ref_dtype=ref_dtype,
                                    norm_dtypes=norm_dtypes,
                                    vval=vval, norm_method=norm_method)
                            series = m.get_series(stypes=stype, svals=sval)[0]
                            # print m, m.series, stype, sval

                        # calculating the mean of all measurements
                        M = mean_sample.mean_measurement(mtype=mtype, stype=stype, sval=sval,
                                                         substfunc=substfunc,
                                                         interpolate=interpolate,
                                                         # reference=reference, ref_dtype=ref_dtype,
                                                         # norm_dtypes=norm_dtypes,
                                                         # vval=vval, norm_method=norm_method,
                                                         )
                        if series:
                            M.add_sval(series_obj=series)
                        # print M.th
                        if reference or vval:
                            M.is_normalized = True
                            M.norm = [reference, ref_dtype, vval, norm_method, np.nan]

                        mean_sample.mean_measurements.append(M)

        mean_sample.is_mean = True  # set is_mean flag after all measuerements are created
        return mean_sample

    def __get_variable_list(self, rpdata_list):
        out = []
        for rp in rpdata_list:
            out.extend(rp['variable'].v)
        return self.__sort_list_set(out)

    def __sort_list_set(self, values):
        """
        returns a sorted list of non duplicate values
        :param values:
        :return:
        """
        return sorted(list(set(values)))

    ''' INFODICT '''

    def __create_info_dict(self):
        """
        creates all info dictionaries

        Returns
        -------
           dict
              Dictionary with a permutation of sample ,type, stype and sval.
        """
        d = ['mtype', 'stype', 'sval']
        keys = ['_'.join(i) for n in range(5) for i in itertools.permutations(d, n) if not len(i) == 0]
        out = {i: {} for i in keys}
        return out

    # @profile()
    def add_s2_info_dict(self, s):
        """
        Adds a sample to the infodict.

        Parameters
        ----------
           s: RockPySample
              The sample that should be added to the dictionary
        """

        keys = self.info_dict.keys()  # all possible keys

        for key in keys:
            # split keys into levels
            split_keys = key.split('_')
            for i, level in enumerate(split_keys):
                # i == level number, n == maximal level
                # if i == n _> last level -> list instead of dict
                n = len(split_keys) - 1

                # level 0
                for e0 in s.info_dict[key]:
                    # if only 1 level
                    if i == n == 0:
                        # create key with empty list
                        self._info_dict[key].setdefault(e0, list())
                        # add sample if not already in list
                        if not s in self._info_dict[key][e0]:
                            self._info_dict[key][e0].append(s)
                        continue
                    else:
                        # if not last entry generate key: dict() pair
                        self._info_dict[key].setdefault(e0, dict())

                    # level 1
                    for e1 in s.info_dict[key][e0]:
                        if i == n == 1:
                            self._info_dict[key][e0].setdefault(e1, list())
                            if not s in self._info_dict[key][e0][e1]:
                                self._info_dict[key][e0][e1].append(s)
                            continue
                        elif i > 0:
                            self._info_dict[key][e0].setdefault(e1, dict())

                            # level 2
                            for e2 in s.info_dict[key][e0][e1]:
                                if i == n == 2:
                                    self._info_dict[key][e0][e1].setdefault(e2, list())
                                    if not s in self._info_dict[key][e0][e1][e2]:
                                        self._info_dict[key][e0][e1][e2].append(s)
                                    continue
                                elif i > 1:
                                    self._info_dict[key][e0][e1].setdefault(e2, dict())

    def recalc_info_dict(self):
        """
        Recalculates the info_dictionary with information of all samples and their corresponding measurements

        """
        self._info_dict = self.__create_info_dict()
        map(self.add_s2_info_dict, self.slist)

    @property
    def info_dict(self):
        """
        Property for easy access of info_dict. If '_info_dict' has not been created, it will create one.
        """
        if not hasattr(self, '_info_dict'):
            self._info_dict = self.__create_info_dict()
            self.recalc_info_dict()
        return self._info_dict