Beispiel #1
0
 def getDatasets(self, fields):
     """Returns single output dataset (self.ds_out).
     This method should return a list of Dataset objects, which can include
     Dataset1D, Dataset2D and DatasetText
     """
     # raise DatasetPluginException if there are errors
     if fields['ds_x'] == fields['ds_y']:
         raise plugins.DatasetPluginException(
             'X and Y datasets cannot be the same')
     if fields['ds_out'] in (fields['ds_x'], fields['ds_y'], ''):
         raise plugins.DatasetPluginException(
             'Input and output datasets cannot be the same.')
     # make a new dataset with name in fields['ds_out']
     self.ds_out = plugins.Dataset1D(fields['ds_out'])
     # return list of datasets
     return [self.ds_out]
Beispiel #2
0
    def updateDatasets(self, fields, helper):
        """Do shifting of dataset.
        This function should *update* the dataset(s) returned by getDatasets
        """
        # get the input dataset - helper provides methods for getting other
        # datasets from Veusz
        ds_in = helper.getDataset(fields['ds_in'])
        # get the value to add
        window = fields['window']
        method = fields['method']
        start_index = fields['start_index']
        end_index = fields['end_index']

        if end_index == 0:
            end_index = len(ds_in.data)

        x = numpy.array(ds_in.data)
        x = x[start_index:end_index]

        if ds_in == helper.getDataset(fields['ds_out']):
            raise plugins.DatasetPluginException(
                "Input and output datasets should differ.")
        if x.ndim != 1:
            raise plugins.DatasetPluginException(
                "smooth only accepts 1 dimension arrays.")
        if x.size < window:
            raise plugins.DatasetPluginException(
                "Input vector needs to be bigger than window size.")
        if window < 3:
            raise plugins.DatasetPluginException("Window is too small.")
        if not method in [
                'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
        ]:
            raise plugins.DatasetPluginException(
                "Mehtod is one of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
            )
        y = smooth(x, window, method)
        y = numpy.concatenate(
            (ds_in.data[0:start_index], y, ds_in.data[end_index:]))

        # update output dataset with input dataset (plus value) and errorbars
        self.ds_out.update(data=y,
                           serr=ds_in.serr,
                           perr=ds_in.perr,
                           nerr=ds_in.nerr)
        return [self.ds_out]
 def getDatasets(self, fields):
     """Returns single output dataset (self.ds_out).
     This method should return a list of Dataset objects, which can include
     Dataset1D, Dataset2D and DatasetText
     """
     # raise DatasetPluginException if there are errors
     if fields['ds_out'] == '':
         raise plugins.DatasetPluginException('Invalid output dataset name')
     if fields['ds_out'] in [fields['ds0'], fields['ds1'], fields['ds2']]:
         raise plugins.DatasetPluginException(
             'Input and output datasets cannot be the same.')
     # make a new dataset with name in fields['ds_out']
     self.ds_out = plugins.Dataset1D(fields['ds_out'])
     # Automatic temperature output dataset
     self.ds_T = plugins.Dataset1D(fields['ds_out'] + '_T')
     # Automatic equilibrium length output dataset
     self.ds_len = plugins.Dataset1D(fields['ds_out'] + '_L')
     # return list of datasets
     return [self.ds_out, self.ds_T, self.ds_len]
Beispiel #4
0
 def getDatasets(self, fields):
     """Returns single output dataset (self.ds_out).
     This method should return a list of Dataset objects, which can include
     Dataset1D, Dataset2D and DatasetText
     """
     # raise DatasetPluginException if there are errors
     if fields['ax'] == fields['ay']:
         raise plugins.DatasetPluginException(
             'Curve A (X,Y) values must differ')
     if fields['bx'] == fields['by']:
         raise plugins.DatasetPluginException(
             'Curve B (X,Y) value must differ')
     if fields['ds_out'] in (fields['ax'], fields['bx'], fields['by'], '',):
         raise plugins.DatasetPluginException(
             'Input and output datasets cannot be the same.')
     # make a new dataset with name in fields['ds_out']
     self.ds_out = plugins.Dataset1D(fields['ds_out'])
     self.error = 0
     # return list of datasets
     return [self.ds_out]
Beispiel #5
0
 def getDatasets(self, fields):
     """Returns single output dataset (self.ds_out).
     This method should return a list of Dataset objects, which can include
     Dataset1D, Dataset2D and DatasetText
     """
     # raise DatasetPluginException if there are errors
     if fields['ds_out'] == '':
         raise plugins.DatasetPluginException('Invalid output dataset name')
     # make a new dataset with name in fields['ds_out']
     logging.debug('DSOUT', fields)
     self.ds_out = plugins.Dataset1D(fields['ds_out'])
     # return list of datasets
     return [self.ds_out]
Beispiel #6
0
def get_from_unit(ds, to_unit):
    """Get starting conversion unit for dataset `ds0` to convert it to `to_unit`"""
    #     ds = getattr(ds0, 'pluginds', ds0)
    from_unit = getattr(ds, 'unit', False)
    if not from_unit or to_unit in ['None', '', None, False]:
        raise plugins.DatasetPluginException(
            'Selected dataset does not have a measurement unit.')
    # Implicit To-From percentage conversion
    from_group = known_units[from_unit]
    to_group = known_units[to_unit]
    if from_group != to_group:
        if 'part' not in (from_group, to_group):
            raise plugins.DatasetPluginException(
                'Incompatible conversion: from {} to {}'.format(
                    from_unit, to_unit))
        if to_group == 'part':
            from_unit = 'percent'
        elif from_group == 'part':
            # Guess default unit for destination dimension
            from_unit = getattr(ds, 'old_unit', user_defaults[to_group])

    return from_unit, to_unit, from_group, to_group
Beispiel #7
0
def percent_func(ds, action='To Absolute', auto=True):
    """Returns the function used to convert dataset `ds` to percent or back to absolute"""
    ini = getattr(ds, 'm_initialDimension', False)
    # Auto initial dimension
    if not ini:
        if not auto or action != 'To Percent':
            raise plugins.DatasetPluginException(
                'Selected dataset does not have an initial dimension set. \
        Please first run "Initial dimension..." tool. {}{}'.format(action, ds))
        ds.m_initialDimension = np.array(ds.data[:5]).mean()

    ini = ds.m_initialDimension
    if action == 'To Absolute':
        # If current dataset unit is not percent, convert to
        u = getattr(ds, 'unit', 'percent')
        convert_func = Converter.convert_func(u, 'percent')
        func = lambda out: convert_func(out * ini / 100.)
    elif action == 'To Percent':
        func = lambda out: 100. * out / ini
    return func
Beispiel #8
0
def do(fields, helper):
    """Actually do the CurveOperationPlugin calculation."""
    op = fields['operation'].lower()
    xtol = fields['tolerance']
    # get the input dataset - helper provides methods for getting other
    # datasets from Veusz
    ay = np.array(helper.getDataset(fields['ay']).data)
    ax = np.array(helper.getDataset(fields['ax']).data)
    N = len(ax)
    if len(ay) != N:
        raise plugins.DatasetPluginException(
            'Curve A X,Y datasets must have same length')
    by = np.array(helper.getDataset(fields['by']).data)
    bx = np.array(helper.getDataset(fields['bx']).data)
    Nb = len(bx)
    if len(by) != Nb:
        raise plugins.DatasetPluginException(
            'Curve B X,Y datasets must have same length')
    error = 0

    # Relativize
    if fields['relative']:
        d = by[0] - ay[0]
        by -= d
        logging.debug('relative correction', d)

    # If the two curves share the same X dataset, directly operate
    if fields['bx'] == fields['ax']:
        out = numexpr.evaluate(op, local_dict={'a': ay, 'b': by})
        return out, 0

    # Smooth x data
    if fields['smooth']:
        ax = utils.smooth(ax)
        bx = utils.smooth(bx)

    # Rectify x datas so they can be used for interpolation
    if xtol > 0:
        rax, dax, erra = utils.rectify(ax)
        rbx, dbx, errb = utils.rectify(bx)
        logging.debug('rectification errors', erra, errb)
        if erra > xtol or errb > xtol:
            raise plugins.DatasetPluginException(
                'X Datasets are not comparable in the required tolerance.')
    # TODO: manage extrapolation!
    # Get rectified B(x) spline for B(y)
    logging.debug('rbx', rbx[-1] - bx[-1], rbx)
    logging.debug('by', by)
    N = len(rbx)
    margin = 1 + int(N / 10)
    step = 2 + int((N - 2 * margin) / 100)
    logging.debug( 'interpolating', len(rbx), len(by), margin, step)
    bsp = interpolate.LSQUnivariateSpline(rbx, by, rbx[margin:-margin:step]) #ext='const' scipy>=0.15
    error = bsp.get_residual()
    # Evaluate B(y) spline with rectified A(x) array
    b = bsp(rax)
    logging.debug('rax', rax[-1] - ax[-1], rax)
    logging.debug('a', ay)
    logging.debug('b', b, b[1000:1010])
#   np.save('/tmp/misura/rbx',rbx)
#   np.save('/tmp/misura/by',by)
#   np.save('/tmp/misura/rax',rax)
#   np.save('/tmp/misura/ay',ay)
    # Perform the operation using numexpr
    out = numexpr.evaluate(op, local_dict={'a': ay, 'b': b})
    logging.debug('out', out)
    return out, error
Beispiel #9
0
    def apply(self, interface, fields):
        """Do the work of the plugin.
        interface: veusz command line interface object (exporting commands)
        fields: dict mapping field names to values
        """
        self.ops = []
        self.doc = interface.document
        # raise DatasetPluginException if there are errors
        ds = interface.document.data.get(fields['ds'], False)
        if not ds:
            raise plugins.DatasetPluginException('Dataset not found' +
                                                 fields['ds'])
        if isinstance(ds, document.datasets.Dataset1DPlugin):
            logging.error(
                'Cannot convert to percent a derived dataset. Please convert the source.'
            )
            return False

        action = units.percent_action(ds, fields['action'])
        ds1 = units.percent_conversion(ds, action, fields['auto'])
        ds = ds1
        self.ops.append(document.OperationDatasetSet(fields['ds'], ds))
        #self.ops.append(document.OperationDatasetSetVal(fields['ds'], 'data',slice(None,None),ds1.data[:]))

        self.apply_ops()
        logging.debug('Converted %s %s using initial dimension %.2f.' %
                      (fields['ds'], fields['action'], ds.m_initialDimension))
        # 		QtGui.QMessageBox.information(None,'Percentage output',
        # 				'Converted %s %s using initial dimension %.2f.' % (fields['ds'], msg, ds.m_initialDimension))

        # updating all dependent datapoints
        convert_func = units.percent_func(ds, action, fields['auto'])
        utils.convert_datapoint_units(convert_func, fields['ds'], self.doc)

        if not fields['propagate']:
            return
        # Find all datasets plotted with the same Y axis
        cvt = []
        tree = get_plotted_tree(self.doc.basewidget)
        upax = []
        for axp, dslist in tree['axis'].iteritems():
            if not fields['ds'] in dslist:
                continue
            logging.debug('Propagating to', cvt)
            cvt += dslist
            upax.append(axp)
        cvt = list(set(cvt))
        if fields['ds'] in cvt:
            cvt.remove(fields['ds'])
        act = 'To Percent' if ds.m_percent else 'To Absolute'
        # Create a non-propagating percentage operation for each dataset found
        for nds in cvt:
            ncur = getattr(self.doc.data[nds], 'm_percent', None)
            if ncur == ds.m_percent:
                continue
            logging.debug('Really propagating percentage to', nds)
            fields = {
                'ds': nds,
                'propagate': False,
                'action': act,
                'auto': True
            }
            self.ops.append(
                document.OperationToolsPlugin(PercentPlugin(), fields))
        # Update axis labels
        old = units.symbols.get(ds.old_unit, False)
        new = units.symbols.get(ds.unit, False)
        if old and new:
            for ax in upax:
                ax = self.doc.resolveFullWidgetPath(ax)
                lbl = ax.settings.label.replace(old, new)
                self.toset(ax, 'label', lbl)
        # Apply everything
        self.apply_ops('Percentage: Propagate')
    def apply(self, interface, fields):
        """Do the work of the plugin.
        interface: veusz command line interface object (exporting commands)
        fields: dict mapping field names to values
        """
        self.ops = []
        self.doc = interface.document
        # raise DatasetPluginException if there are errors
        ds = self.doc.data.get(fields['ds'], False)
        if not ds:
            raise plugins.DatasetPluginException('Dataset not found' +
                                                 fields['ds'])
        out = numpy.array(ds.data)
        # If data was converted to percentage, convert back to real numbers
        percent = getattr(ds, 'm_percent', False)
        if percent:
            out = out * ds.m_initialDimension / 100.
        # Calculate automatic initial value
        ini = fields['ini']
        n = fields['num']
        start = fields['start']
        if fields['auto']:
            if n > len(out) / 2:
                raise plugins.DatasetPluginException(
                    'Too many points used for calculation: %i/%i' %
                    (n, len(out)))
            x = interface.document.data.get(fields['ds_x'], False)
            if x is not False:
                x = numpy.array(x.data)
            i = 0
            # Cut from start T
            if start != -1 and x is not False:
                diff = abs(x - start)
                i = numpy.where(diff == min(diff))[0][0]
                x = x[i:]
            ini = out[i:i + n]
            if fields['method'] == 'mean':
                ini = ini.mean()
            elif fields['method'] == 'linear-regression':
                if x is False:
                    raise plugins.DatasetPluginException('Dataset not found' +
                                                         fields['ds_x'])

                (slope, const) = scipy.polyfit(x[:n], ini, 1)
                ini = x[0] * slope + const

        # Convert back to percent if needed
        ds1 = copy(ds)
        if percent:
            out = 100. * out / ini
            ds1.data = plugins.numpyCopyOrNone(out)
        orig = getattr(ds, 'm_initialDimension', False)
        if orig and orig != ini and not fields['suppress_messageboxes']:
            repl = QtGui.QMessageBox.warning(
                None,
                'Initial dimension',
                'Changing initial dimension from %.2f to %.2f. Confirm?' %
                (orig, ini),
                QtGui.QMessageBox.Ok | QtGui.QMessageBox.Cancel,
                defaultButton=QtGui.QMessageBox.Ok)
            if repl != QtGui.QMessageBox.Ok:
                QtGui.QMessageBox.information(None, 'Initial dimension',
                                              'Change cancelled')
                return
        ds1.m_initialDimension = ini
        self.ops.append(document.OperationDatasetSet(fields['ds'], ds1))
        self.apply_ops()
        if not fields['suppress_messageboxes']:
            QtGui.QMessageBox.information(
                None, 'Initial dimension output',
                'Initial dimension configured to %.2f' % ini)
Beispiel #11
0
    def apply(self, interface, fields):
        """Do the work of the plugin.
        interface: veusz command line interface object (exporting commands)
        fields: dict mapping field names to values
        """
        self.ops = []
        self.doc = interface.document

        ds = interface.document.data.get(fields['ds'], False)
        if not ds:
            raise plugins.DatasetPluginException('Dataset not found' +
                                                 fields['ds'])

        ds1 = units.convert(ds, fields['convert'])
        self.ops.append(document.OperationDatasetSet(fields['ds'], ds1))
        self.apply_ops()

        # Update DataPoints
        convert_func = units.convert_func(ds, fields['convert'])
        utils.convert_datapoint_units(convert_func, fields['ds'], self.doc)

        # Update file-wise time unit if ds is the time dataset
        if ds.linked and fields['ds'] == ds.linked.prefix + 't':
            ds.linked.params.time_unit = ds1.unit

        ####
        # PROPAGATION
        if not fields['propagate']:
            return
        # Find all datasets plotted with the same Y axis
        cvt = []
        tree = get_plotted_tree(self.doc.basewidget)
        upax = []
        for axp, dslist in tree['axis'].iteritems():
            if not fields['ds'] in dslist:
                continue
            logging.debug('Propagating to', cvt)
            cvt += dslist
            upax.append(axp)
        # If time dataset, propagate to all time datasets
        if ds.m_var == 't':
            for k, nds in self.doc.data.iteritems():
                if k == fields['ds']:
                    continue
                if getattr(nds, 'm_var', False) != 't':
                    continue
                cvt.append(k)
        cvt = list(set(cvt))
        # Create a non-propagating unit conversion operation for each dataset
        # found
        for nds in cvt:
            if nds == fields['ds']:
                continue
            ncur = getattr(self.doc.data[nds], 'unit', False)
            if not ncur:
                continue
            logging.debug('Really propagating unit conversion to', nds)
            fields = {
                'ds': nds,
                'propagate': False,
                'convert': fields['convert']
            }
            self.ops.append(
                document.OperationToolsPlugin(UnitsConverterTool(), fields))
        # Update axis labels
        old = units.symbols.get(ds.unit, False)
        new = units.symbols.get(fields['convert'], False)
        if old and new:
            for ax in upax:
                ax = self.doc.resolveFullWidgetPath(ax)
                lbl = ax.settings.label.replace(old, new)
                self.toset(ax, 'label', lbl)

        # Apply everything
        self.apply_ops('UnitsConverterTool: Propagate')