예제 #1
0
def check_datalenuniform(dataset, minimum_points=1):
    if len(dataset.curves_data.all()) == 0:
        return
    ptnr = len(dataset.curves_data.all()[0].yVector)
    for cd in dataset.curves_data.all():
        if len(cd.yVector) < minimum_points:
            raise VoltPyFailed('Data needs to have at least %i data points.' %
                               minimum_points)
        elif len(cd.yVector) != ptnr:
            raise VoltPyFailed(
                'Each curve needs to have the same number of data points.')
예제 #2
0
def check_sampling(dataset, same_sampling=True):
    if len(dataset.curves_data.all()) == 0:
        return
    ptnr = len(dataset.curves_data.all()[0].current_samples)
    if ptnr == 0:
        raise VoltPyFailed(
            'Data have to include multi-sampling (nonaveraged).')
    if same_sampling:
        for cd in dataset.curves_data.all():
            if len(cd.current_samples) != ptnr:
                raise VoltPyFailed(
                    'Each curve needs to have the same sampling length.')
예제 #3
0
    def finalize(self, user):
        xvalues = []
        yvalues = []
        selRange = SelectRange.getData(self.model)
        try:
            analyte = self.model.analytes.get(
                id=int(SelectAnalyte.getData(self.model)))
        except:
            VoltPyFailed('Wrong analyte selected.')
        self.model.custom_data['analyte'] = analyte.name
        unitsTrans = dict(mmodels.Dataset.CONC_UNITS)
        self.model.custom_data['units'] = unitsTrans[
            self.model.dataset.analytes_conc_unit[analyte.id]]
        for cd in self.model.dataset.curves_data.all():
            startIndex = cd.xValue2Index(selRange[0])
            endIndex = cd.xValue2Index(selRange[1])
            if endIndex < startIndex:
                endIndex, startIndex = startIndex, endIndex
            yvalues.append(
                max(cd.yVector[startIndex:endIndex]) -
                min(cd.yVector[startIndex:endIndex]))
            xvalues.append(
                self.model.dataset.analytes_conc.get(analyte.id,
                                                     {}).get(cd.id, 0))

        if 0 not in xvalues:
            raise VoltPyFailed(
                'The method requires signal value for concentration 0 %s' %
                self.model.custom_data['units'])
        data = [[float(b) for b in xvalues], [float(b) for b in yvalues]]
        self.model.custom_data['matrix'] = data
        p = calc_normal_equation_fit(data[0], data[1])
        sx0, sslope, sintercept = calc_sx0(p['slope'], p['intercept'], data[0],
                                           data[1])
        if p['slope'] != 0:
            self.model.custom_data['fitEquation'] = p
            self.model.custom_data['slopeStdDev'] = sslope
            self.model.custom_data['interceptStdDev'] = sintercept
            self.model.custom_data['result'] = p['intercept'] / p['slope']
            self.model.custom_data['resultStdDev'] = sx0,
            self.model.custom_data['corrCoef'] = np.corrcoef(data[0],
                                                             data[1])[0, 1]
        else:
            self.model.custom_data['fitEquation'] = p
            self.model.custom_data['result'] = None
            self.model.custom_data['resultStdDev'] = None
            self.model.custom_data['corrCoef'] = None
        self.model.completed = True
        self.model.step = 0
        self.model.save()
예제 #4
0
        def best_fit_factor(SamplingPred, PotentialPred, ConcentrationPred):
            capac_r2 = 0
            capac_index = None
            for i, sp in enumerate(SamplingPred.T):
                x = np.array(range(sp.shape[0] - 1))
                if sp[1] > 0:
                    yvec = sp[1:]
                else:
                    yvec = np.dot(sp[1:], -1)
                capac_fit, capac_cov = fit_capacitive_eq(xvec=x,
                                                         yvec=yvec,
                                                         dE=dE)
                yc_pred = calc_capacitive(x, dE, *capac_fit)
                r2c = np.power(np.corrcoef(yc_pred, yvec)[0, 1], 2)

                if r2c > capac_r2:
                    capac_r2 = r2c
                    capac_index = i

            if capac_index is None:
                raise VoltPyFailed(
                    'Could not determine the capacitive factor.')

            chosen = {}
            chosen['x'] = SamplingPred[:, capac_index]
            chosen['y'] = PotentialPred[:, capac_index]
            chosen['z'] = ConcentrationPred[:, capac_index]
            return chosen
 def apply(self, user, dataset):
     """
     This procedure cannot be applied to other data.
     """
     raise VoltPyFailed(
         'Self Referencing Background Correction does not support apply function.'
     )
예제 #6
0
def calc_sx0(slope, intercept, xvec, yvec):
    """
    Computes standard deviation of x0, slope, and intercept
    for slope, intercept and points [xvec, yvec]
    """
    yevec = np.polyval((slope, intercept),
                       xvec)  # [slope*x+intercept for x in xvec]
    y0index = -1
    minx = np.min(xvec)
    if minx == 0:
        x0index = np.argmin(xvec)
    else:
        raise VoltPyFailed(
            'Requires data point with coordinates concentration 0.')
    xmean = np.average(xvec)
    sr = np.sqrt(1 / (len(xvec) - 2) * np.sum(
        (yi - ye)**2 for yi, ye in zip(yvec, yevec)))
    sxx = np.sum(np.power(np.subtract(xvec, xmean), 2))
    sx0 = np.multiply(
        (sr / slope),
        np.sqrt(1 + 1 / len(xvec) + (yvec[x0index] - np.average(yvec))**2 /
                (slope**2 * np.sum((xi - xmean)**2 for xi in xvec))))
    sSlope = np.divide(sr, np.sqrt(sxx))
    sIntercept = sr * np.sqrt(np.sum(np.power(xvec, 2)) / (len(xvec) * sxx))
    return sx0, sSlope, sIntercept
예제 #7
0
def check_dataset_integrity(dataset: mmodels.Dataset,
                            params_to_check: List[int]) -> None:
    if len(dataset.curves_data.all()) < 2:
        return
    cd1 = dataset.curves_data.all()[0]
    for cd in dataset.curves_data.all():
        for p in params_to_check:
            if cd.curve.params[p] != cd1.curve.params[p]:
                raise VoltPyFailed('All curves in dataset have to be similar.')
예제 #8
0
    def _best_fit_factor(self, dE, concs, SamplingPred, PotentialPred,
                         ConcentrationPred):
        capac_index = -1
        capac_r2 = 0
        if False:
            for i, sp in enumerate(SamplingPred.T):
                if sp[1] > 0:
                    yvec = sp
                else:
                    yvec = np.dot(sp, -1)
                x = np.arange(0, len(yvec))

                capac_fit, capac_cov = fit_capacitive_eq(xvec=x,
                                                         yvec=yvec,
                                                         dE=dE)
                yc_pred = calc_capacitive(x, dE, *capac_fit)
                r2c = np.power(np.corrcoef(yc_pred, yvec)[0, 1], 2)
                """
                import matplotlib.pyplot as plt
                print(i, ':', r2c)
                plt.plot(sp, 'g')
                plt.plot(yc_pred, 'r')
                plt.show()
                """
                """
                farad_fit, farad_cov = fit_faradaic_eq(
                    xvec=x,
                    yvec=yvec
                )
                yf_pred = calc_faradaic(x, *farad_fit)
                r2f = np.power(np.corrcoef(yf_pred, yvec)[0, 1], 2)
                """

                if r2c > capac_r2:
                    capac_r2 = r2c
                    capac_index = i

        tobeat = 0
        best_factor = -1
        for i, cp in enumerate(ConcentrationPred.T):
            if i == capac_index:
                continue
            rr = np.corrcoef(cp, concs)
            if np.abs(rr[0, 1]) > tobeat:
                best_factor = i
                tobeat = np.abs(rr[0, 1])

        if best_factor == -1:
            raise VoltPyFailed(
                'Decomposed factors do not meet the requirements.')

        chosen = {}
        chosen['x'] = SamplingPred[:, best_factor]
        chosen['y'] = PotentialPred[:, best_factor]
        chosen['z'] = ConcentrationPred[:, best_factor]
        return chosen
예제 #9
0
 def process(self, user, dataset):
     self._checkDataset(dataset)
     if self.type == 'processing':
         mname = self.cleaned_data.get('method', None)
         if mname in self.methods:
             if self.methods[mname].errors:
                 errors = '\n'.join([str(x) for x in self.methods[mname].errors])
                 raise VoltPyFailed('Data does not meets requirements for selected method: %s' % errors)
             a = mmodels.Processing(
                 owner=user,
                 dataset=dataset,
                 method=mname,
                 method_display_name=self.methods[mname].__str__(),
                 name='',
                 active_step_num=0,
                 deleted=False,
                 completed=False
             )
             a.save()
             dataset.prepareUndo(processing_instance=a)
             return a.id
         return None
     elif self.type == 'analysis':
         mname = self.cleaned_data.get('method', None)
         if mname in self.methods:
             if self.methods[mname].errors:
                 errors = '\n'.join([str(x) for x in self.methods[mname].errors])
                 raise VoltPyFailed('Data does not meets requirements for selected method: %s' % errors)
             a = mmodels.Analysis(
                 owner=user,
                 dataset=dataset,
                 method=mname,
                 method_display_name=self.methods[mname].__str__(),
                 name='',
                 active_step_num=0,
                 deleted=False,
                 completed=False
             )
             a.save()
             dataset.save()
             return a.id
     return None
예제 #10
0
 def finalize(self, user):
     settings = Settings.getData(self.model)
     try:
         self.model.custom_data['iterations'] = int(settings['Iterations'])
         self.model.custom_data['degree'] = int(settings['Degree'])
     except ValueError:
         raise VoltPyFailed('Wrong values for degree or iterations.')
     self.__perform(self.model.dataset)
     self.model.step = None
     self.model.completed = True
     self.model.save()
예제 #11
0
 def finalize(self, user):
     try:
         self.model.custom_data['PeakMaximum'] = float(
             SelectPoint.getData(self.model))
         self.model.custom_data['PeakSpan'] = SelectRange.getData(
             self.model)
     except ValueError:
         raise VoltPyFailed('No values selected.')
     self.__perform(self.model.dataset)
     self.model.step = None
     self.model.completed = True
     self.model.save()
예제 #12
0
def _parseGetCFID(cfile, details, user):
    """
    Upload file and return File id.
    """
    ext = cfile.name.rsplit('.', 1)[1]
    parserClass = _getParserClass(ext)
    try:
        parserObj = parserClass(cfile, details)
        cf_id = parserObj.saveModels(user)
    except:
        raise VoltPyFailed('Could not parse file %s' % cfile.name)
    return cf_id
예제 #13
0
파일: SGSmooth.py 프로젝트: efce/voltPy
 def finalize(self, user):
     settings = Settings.getData(self.model)
     try:
         self.model.custom_data['WindowSpan'] = int(settings['Window Span'])
         self.model.custom_data['Degree'] = int(settings['Degree'])
     except ValueError:
         raise VoltPyFailed('Wrong values for span or degree.')
     self.__perform(self.model.dataset)
     self.model.step = None
     self.model.completed = True
     self.model.save()
     return True
예제 #14
0
 def apply(self, user, dataset):
     an = self.model.getCopy()
     an.dataset = dataset
     an.appliesModel = self.model
     an.save()
     self.model = an
     try:
         self.finalize(user)
     except:
         an.deleted = True
         an.save()
         raise VoltPyFailed('Could not apply model.')
     return an.id
예제 #15
0
 def __chooseTw(self, tptw):
     if tptw < 10:
         raise VoltPyFailed(
             'Could not select tp and tw, too little samples per point.')
     elif tptw < 20:
         return [tptw - 3, tptw - 6, tptw - 9]
     elif tptw < 32:
         return [tptw - 4, tptw - 10, tptw - 15]
     else:
         n = int(np.floor(np.log2(tptw))) - 1
         dist = [(x**1.7 + 4) for x in range(n)]
         expmax = tptw - 5
         maxdist = np.max(dist)
         ok_dist = np.floor(np.multiply(dist, np.divide(expmax, maxdist)))
         ret_dist = [int(x) for x in ok_dist]
         return ret_dist
 def finalize(self, user):
     Y = []
     CONC = []
     SENS = []
     RANGES = []
     analyte = self.model.dataset.analytes.all()[0]
     self.model.custom_data['analyte'] = analyte.name
     unitsTrans = dict(mmodels.Dataset.CONC_UNITS)
     self.model.custom_data['units'] = unitsTrans[
         self.model.dataset.analytes_conc_unit[analyte.id]]
     tags = TagCurves.getData(self.model)
     if len(set(tags.keys())) <= 2:
         raise VoltPyFailed('Not enough sensitivities to analyze the data.')
     for name, cds in tags.items():
         for cid in cds:
             SENS.append(name)
             cd = self.model.dataset.curves_data.get(id=cid)
             Y.append([])
             Y[-1] = cd.yVector
             CONC.append(
                 self.model.dataset.analytes_conc.get(analyte.id,
                                                      {}).get(cd.id, 0))
             rng = [
                 cd.xValue2Index(SelectRange.getData(self.model)[0]),
                 cd.xValue2Index(SelectRange.getData(self.model)[1])
             ]
             RANGES.append([])
             RANGES[-1] = rng
     result = sbcm.selfReferencingBackgroundCorrection(
         Y, CONC, SENS, RANGES)
     self.model.custom_data['result'] = result.get('__AVG__', None)
     self.model.custom_data['resultStdDev'] = result.get('__STD__', None)
     self.model.custom_data['fitEquations'] = {}
     for k, v in result.items():
         if isinstance(k, str):
             if k.startswith('_'):
                 continue
         self.model.custom_data['fitEquations'][k] = v
     self.model.save()
     return True
예제 #17
0
 def process(self, user, request):
     ret = super(PolynomialBackgroundFit, self).process(user, request)
     self.model.custom_data['fitCoeff'] = []
     if self.model.active_step_num == 2:
         for cd in self.model.dataset.curves_data.all():
             ranges = SelectTwoRanges.getData(self.model)
             v = []
             v.append(cd.xValue2Index(ranges[0]))
             v.append(cd.xValue2Index(ranges[1]))
             v.append(cd.xValue2Index(ranges[2]))
             v.append(cd.xValue2Index(ranges[3]))
             v.sort()
             (st1, en1, st2, en2) = (v[0], v[1], v[2], v[3])
             xvec = np.append(cd.xVector[st1:en1], cd.xVector[st2:en2])
             yvec = np.append(cd.yVector[st1:en1], cd.yVector[st2:en2])
             try:
                 degree = int(Settings.getData(self.model)['Degree'])
             except ValueError:
                 raise VoltPyFailed('Wrong degree of polynomial')
             p = np.polyfit(xvec, yvec, degree)
             self.model.custom_data['fitCoeff'].append(p)
             self.model.save()
     return ret
예제 #18
0
 def exportableData(self):
     if not self.model.completed:
         raise VoltPyFailed('Incomplete data')
     return np.matrix(self.model.custom_data['matrix']).T
예제 #19
0
def prepareDataForASD(dataset: Dataset, start_index: int, end_index: int,
                      tptw: int, method_type: int, centering: bool):
    cds = dataset.curves_data.all()
    if len(cds) == 0:
        raise VoltPyFailed('Dataset error.')
    cd1 = cds[0]
    Param = mmodels.Curve.Param
    if all([
            cd1.curve.params[Param.method] != Param.method_dpv,
            cd1.curve.params[Param.method] != Param.method_sqw
    ]):
        raise VoltPyFailed('Method works only for DP/SQW data.')

    needSame = [
        Param.tp, Param.tw, Param.ptnr, Param.nonaveragedsampling, Param.Ep,
        Param.Ek, Param.Estep
    ]
    for cd in cds:
        for p in needSame:
            if cd.curve.params[p] != cd1.curve.params[p]:
                raise VoltPyFailed('All curves in dataset have to be similar.')

    if method_type == TYPE_SEPARATE:
        main_data_1 = np.zeros(
            (tptw, int(len(cd1.current_samples) / tptw / 2), len(cds)))
        main_data_2 = np.zeros(
            (tptw, int(len(cd1.current_samples) / tptw / 2), len(cds)))
        for cnum, cd in enumerate(cds):
            pos = 0
            for i in np.arange(0, len(cd1.current_samples), 2 * tptw):
                pos = int(i / (2 * tptw))
                main_data_1[:, pos, cnum] = cd.current_samples[i:(i + tptw)]
                main_data_2[:, pos,
                            cnum] = cd.current_samples[(i + tptw):(i +
                                                                   (2 * tptw))]
        main_data_1 = main_data_1[:, start_index:end_index, :]
        main_data_2 = main_data_2[:, start_index:end_index, :]

    elif method_type == TYPE_TOGETHER:
        main_data_1 = np.zeros(
            (tptw, int(len(cd1.current_samples) / tptw), len(cds)))
        for cnum, cd in enumerate(cds):
            pos = 0
            for i in np.arange(0, len(cd1.current_samples), tptw):
                pos = int(i / tptw)
                main_data_1[:, pos, cnum] = cd.current_samples[i:(i + tptw)]
        main_data_1 = main_data_1[:, 2 * start_index:2 * end_index, :]

    elif method_type == TYPE_COMBINED:
        main_data_1 = np.zeros(
            (2 * tptw, int(len(cd1.current_samples) / tptw / 2), len(cds)))
        for cnum, cd in enumerate(cds):
            pos = 0
            for i in np.arange(0, len(cd1.current_samples), 2 * tptw):
                pos = int(i / (2 * tptw))
                main_data_1[:, pos,
                            cnum] = cd.current_samples[i:(i + 2 * tptw)]
        main_data_1 = main_data_1[:, start_index:end_index, :]

    if centering:
        main_mean = np.mean(main_data_1, axis=0)
        for i in range(main_data_1.shape[1]):
            for ii in range(main_data_1.shape[2]):
                main_data_1[:, i,
                            ii] = main_data_1[:, i, ii] - main_mean[i, ii]
        if method_type == TYPE_SEPARATE:
            main_mean2 = np.mean(main_data_2, axis=0)
            for i in range(main_data_2.shape[1]):
                for ii in range(main_data_2.shape[2]):
                    main_data_2[:, i,
                                ii] = main_data_2[:, i, ii] - main_mean2[i, ii]

    if method_type == TYPE_SEPARATE:
        return (main_data_1, main_data_2)
    return (main_data_1, None)
예제 #20
0
 def apply(self, user, dataset):
     """
     This does not support appliyng existing
     """
     raise VoltPyFailed('Average curve does not support apply function.')
예제 #21
0
def check_datalen(dataset, minimum_points=1):
    for cd in dataset.curves_data.all():
        if len(cd.yVector) < minimum_points:
            raise VoltPyFailed('Data needs to have at least %i data points.' %
                               minimum_points)
예제 #22
0
def paginate(request: HttpRequest, queryset: QuerySet, sortable_by: List[str],
             current_page: int) -> str:
    page_size = 15
    path = request.path
    txt_sort = ''
    search_string = ''
    if request.method == 'POST':
        search_string = request.POST.get('search', '')
    if request.method == 'GET':
        search_string = request.GET.get('search', '')
    if search_string:
        dbquery = Q(name__icontains=search_string)
        if 'filename' in sortable_by:
            dbquery |= Q(filename=search_string)
        if 'dataset' in sortable_by:
            dbquery |= Q(dataset__name__icontains=search_string)
        if 'analytes' in sortable_by:
            dbquery |= Q(analytes__name__icontains=search_string)
        if 'method' in sortable_by:
            dbquery |= Q(method_display_name__icontains=search_string)
        queryset = queryset.filter(dbquery)
    if request.method in ['GET', 'POST']:
        if request.GET.get('sort', False):
            sort_by = request.GET.get('sort')
            if sort_by in sortable_by:
                if sort_by == 'analytes':
                    from django.db.models import Min
                    order_by = sort_by
                    txt_sort = '?sort=%s' % sort_by
                    queryset = queryset.annotate(
                        an_name=Min('analytes__name')).order_by('an_name')
                elif sort_by == 'dataset':
                    order_by = sort_by
                    txt_sort = '?sort=%s' % sort_by
                    queryset = queryset.order_by('dataset__name')
                else:
                    order_by = sort_by
                    txt_sort = '?sort=%s' % sort_by
                    queryset = queryset.order_by(order_by)
        else:
            queryset = queryset.order_by('-id')
    else:
        raise VoltPyFailed('Error.')

    splpath = path.split('/')
    if is_number(splpath[-2]):
        path = '/'.join(splpath[:-2])
        path += '/'
    ret = {}
    elements = len(queryset)
    ret['number_of_pages'] = int(np.ceil(elements / page_size))
    if current_page <= 0 or current_page > ret['number_of_pages']:
        # TODO: Log wrong page number
        current_page = 1
    start = (current_page - 1) * page_size
    end = start + page_size
    ret['search_append'] = ''
    if search_string != '':
        from urllib.parse import quote
        sanitize_search = quote(search_string)
        ret['search_append'] = '&search=' + sanitize_search
        if not txt_sort:
            txt_sort = '?search=%s' % sanitize_search
        else:
            txt_sort += '&search=%s' % sanitize_search
    items_count = len(queryset)
    ret['current_page_content'] = queryset[start:end:1]
    search_url = request.get_full_path()
    if 'search=' in search_url:
        try:
            search_url = search_url[:search_url.index('&search=')]
        except:
            search_url = search_url[:search_url.index('search=')]
    ret['search_url'] = search_url
    ret['paginator'] = ''.join([
        '<div class="paginator">',
        '<a href="%s1/%s">[&lt;&lt;]</a>&nbsp' % (path, txt_sort),
        '<a href="%s%s/%s">[&lt;]</a>&nbsp;' %
        (path, str(current_page - 1) if (current_page > 1) else "1", txt_sort),
    ])
    for i in range(ret['number_of_pages']):
        p = str(i + 1)
        if int(p) == current_page:
            ret['paginator'] += '[{num}]&nbsp;'.format(num=p)
        else:
            ret['paginator'] += '<a href="{path}{num}/{sort}">[{num}]</a>&nbsp;'.format(
                path=path, num=p, sort=txt_sort)
    search_string = search_string.replace('<', '&lt;').replace('>', '&gt;')
    ret['search_results_for'] = ((
        '<span class="css_search">Search results for&nbsp;<i>%s</i>:</span><br />'
        % search_string) if search_string else '')
    ret['paginator'] += ''.join([
        '<a href="%s%s/%s">[&gt;]</a>&nbsp;' %
        (path, str(current_page + 1) if (current_page < ret['number_of_pages'])
         else str(ret['number_of_pages']), txt_sort),
        '<a href="%s%s/%s">[&gt;&gt;]</a>' %
        (path, str(ret['number_of_pages']), txt_sort),
        '&nbsp; %d items out of %s ' %
        (len(ret['current_page_content']), items_count), '</div>'
    ])
    return ret
예제 #23
0
 def exportableData(self):
     if not self.model.completed:
         raise VoltPyFailed('Incomplete data for export.')
     arrexp = np.array(self.model.custom_data['matrix'])
     return arrexp
예제 #24
0
    def __perform(self, dataset: mmodels.Dataset):
        method_type = self.model.custom_data['MethodType']
        centering = self.model.custom_data['Centering']
        if method_type not in self._allowed_types:
            raise VoltPyFailed('Not allowed type.')

        Param = mmodels.Curve.Param

        cd1 = dataset.curves_data.all()[0]
        self.model.custom_data['tp'] = cd1.curve.params[Param.tp]
        self.model.custom_data['tw'] = cd1.curve.params[Param.tw]
        tptw = self.model.custom_data['tp'] + self.model.custom_data['tw']
        dE = cd1.curve.params[Param.dE]

        dec_start = cd1.xValue2Index(
            self.model.custom_data['DecomposeRange'][0])
        dec_end = cd1.xValue2Index(self.model.custom_data['DecomposeRange'][1])
        if dec_start > dec_end:
            dec_start, dec_end = dec_end, dec_start

        an_selected = self.model.analytes.all()[0]
        concs_different = dataset.getUncorrelatedConcs()
        an_selected_conc = dataset.getConc(an_selected.id)
        self.model.custom_data['analyte'] = an_selected.name

        if not an_selected_conc:
            raise VoltPyFailed('Wrong analyte selected.')

        an_num = len(concs_different)
        factors = an_num + 2

        main_data_1, main_data_2 = prepare.prepareDataForASD(
            dataset=dataset,
            start_index=dec_start,
            end_index=dec_end,
            tptw=tptw,
            method_type=method_type,
            centering=centering)

        if any([
                main_data_1.shape[0] < factors,
                main_data_1.shape[1] < factors,
                main_data_1.shape[2] < factors,
        ]):
            factors = np.min(main_data_1.shape)

        X0 = np.random.rand(factors, main_data_1.shape[0])
        Y0 = np.random.rand(factors, main_data_1.shape[1])

        # TODO: fit one combined array (step and pulse), check correlation for all analytes before selection.
        SamplingPred1, PotentialPred1, ConcentrationPred1, errflag1, iter_num1, cnv1 = asd.asd(
            main_data_1, X0, Y0, main_data_1.shape[0], main_data_1.shape[1],
            main_data_1.shape[2], factors, 1, 0.000001, 100)
        """
        plt.subplot(311)
        plt.plot(SamplingPred1)
        plt.subplot(312)
        plt.plot(PotentialPred1)
        plt.subplot(313)
        plt.plot(ConcentrationPred1)
        plt.show()
        """
        if method_type == prepare.TYPE_SEPARATE:
            X0 = SamplingPred1.T
            Y0 = PotentialPred1.T
            SamplingPred2, PotentialPred2, ConcentrationPred2, errflag2, iter_num2, cnv2 = asd.asd(
                main_data_2, X0, Y0, main_data_1.shape[0],
                main_data_1.shape[1], main_data_1.shape[2], factors, 1,
                0.000001, 100)

        def recompose(bfd, method_type):
            if method_type != prepare.TYPE_COMBINED:
                mult = np.mean(bfd['x'][self.model.custom_data['tw']:])
                yvecs = np.dot(np.matrix(bfd['y']).T, np.matrix(bfd['z']))
                yvecs = np.dot(yvecs, mult)
            else:
                mult1 = np.mean(
                    bfd['x'][self.model.
                             custom_data['tw']:self.model.custom_data['tw'] +
                             self.model.custom_data['tp']])
                mult2 = np.mean(bfd['x'][(2 * self.model.custom_data['tw'] +
                                          self.model.custom_data['tp']):])
                yvecs1 = np.dot(np.matrix(bfd['y']).T,
                                np.matrix(bfd['z'])).dot(mult1)
                yvecs2 = np.dot(np.matrix(bfd['y']).T,
                                np.matrix(bfd['z'])).dot(mult2)
                yvecs = np.zeros((yvecs1.shape[0] * 2, yvecs1.shape[1]))
                yvecs[0::2] = yvecs1
                yvecs[1::2] = yvecs2
            return yvecs

        if method_type == prepare.TYPE_SEPARATE:
            bfd0 = self._best_fit_factor(dE=dE,
                                         concs=an_selected_conc,
                                         SamplingPred=SamplingPred1,
                                         PotentialPred=PotentialPred1,
                                         ConcentrationPred=ConcentrationPred1)
            bfd1 = self._best_fit_factor(dE=dE,
                                         concs=an_selected_conc,
                                         SamplingPred=SamplingPred2,
                                         PotentialPred=PotentialPred2,
                                         ConcentrationPred=ConcentrationPred2)
            yv0 = recompose(bfd0, method_type)
            yv1 = recompose(bfd1, method_type)
            yvecs2 = np.subtract(yv1, yv0)

        elif method_type == prepare.TYPE_TOGETHER:
            bfd0 = self._best_fit_factor(dE=dE,
                                         concs=an_selected_conc,
                                         SamplingPred=SamplingPred1,
                                         PotentialPred=PotentialPred1,
                                         ConcentrationPred=ConcentrationPred1)
            yv0 = recompose(bfd0, method_type)
            yvecs2 = np.subtract(yv0[1::2], yv0[0::2])

        elif method_type == prepare.TYPE_COMBINED:
            bfd0 = self._best_fit_factor(dE=dE,
                                         concs=an_selected_conc,
                                         SamplingPred=SamplingPred1,
                                         PotentialPred=PotentialPred1,
                                         ConcentrationPred=ConcentrationPred1)
            yv0 = recompose(bfd0, method_type)
            yvecs2 = np.subtract(yv0[1::2], yv0[0::2])

        if yvecs2.shape[1] == dataset.curves_data.all().count():
            for i, cd in enumerate(dataset.curves_data.all()):
                newcd = cd.getCopy()
                newcd.setCrop(dec_start, dec_end)
                newcdConc = dataset.getCurveConcDict(cd)
                newy = np.array(yvecs2[:, i].T).squeeze(
                )  # change to array to remove dimension
                newcd.yVector = newy
                newcd.date = timezone.now()
                newcd.save()
                dataset.removeCurve(cd)
                dataset.addCurve(newcd, newcdConc)
            dataset.save()
        else:
            raise VoltPyFailed('Computation error.')
예제 #25
0
def check_analyte(dataset, minimum_number=1):
    if len(dataset.analytes.all()) < minimum_number:
        raise VoltPyFailed(
            'Data needs to have at least %i analyte(s) defined.' %
            minimum_number)
예제 #26
0
    def __perform(self, dataset):
        if dataset.curves_data.all().count() == 0:
            raise VoltPyFailed('Dataset error.')
        Param = mmodels.Curve.Param

        cd1 = dataset.curves_data.all()[0]
        self.model.custom_data['tp'] = cd1.curve.params[Param.tp]
        self.model.custom_data['tw'] = cd1.curve.params[Param.tw]

        tptw = cd1.curve.params[Param.tp] + cd1.curve.params[Param.tw]
        dE = cd1.curve.params[Param.dE]

        an_num = dataset.analytes.all().count()
        factors = (an_num + 1) if an_num > 0 else 2

        main_data_1, main_data_2 = prepare.prepareDataForASD(
            dataset=dataset,
            start_index=0,
            end_index=len(cd1.xVector),
            tptw=tptw,
            method_type=prepare.TYPE_TOGETHER,
            centering=False,
        )
        if any([
                main_data_1.shape[0] < factors,
                main_data_1.shape[1] < factors,
                main_data_1.shape[2] < factors,
        ]):
            factors = np.min(main_data_1.shape)

        X0 = np.random.rand(factors, main_data_1.shape[0])
        Y0 = np.random.rand(factors, main_data_1.shape[1])

        SamplingPred1, PotentialPred1, ConcentrationPred1, errflag1, iter_num1, cnv1 = asd.asd(
            main_data_1, X0, Y0, main_data_1.shape[0], main_data_1.shape[1],
            main_data_1.shape[2], factors, 1, 0.000001, 100)
        """
        X0 = SamplingPred1.T
        Y0 = PotentialPred1.T
        SamplingPred2, PotentialPred2, ConcentrationPred2, errflag2, iter_num2, cnv2 = asd.asd(
            main_data_2,
            X0,
            Y0,
            main_data_1.shape[0],
            main_data_1.shape[1],
            main_data_1.shape[2],
            factors,
            1,
            0.000001,
            100
        )
        """
        def best_fit_factor(SamplingPred, PotentialPred, ConcentrationPred):
            capac_r2 = 0
            capac_index = None
            for i, sp in enumerate(SamplingPred.T):
                x = np.array(range(sp.shape[0] - 1))
                if sp[1] > 0:
                    yvec = sp[1:]
                else:
                    yvec = np.dot(sp[1:], -1)
                capac_fit, capac_cov = fit_capacitive_eq(xvec=x,
                                                         yvec=yvec,
                                                         dE=dE)
                yc_pred = calc_capacitive(x, dE, *capac_fit)
                r2c = np.power(np.corrcoef(yc_pred, yvec)[0, 1], 2)

                if r2c > capac_r2:
                    capac_r2 = r2c
                    capac_index = i

            if capac_index is None:
                raise VoltPyFailed(
                    'Could not determine the capacitive factor.')

            chosen = {}
            chosen['x'] = SamplingPred[:, capac_index]
            chosen['y'] = PotentialPred[:, capac_index]
            chosen['z'] = ConcentrationPred[:, capac_index]
            return chosen

        bfd0 = best_fit_factor(SamplingPred1, PotentialPred1,
                               ConcentrationPred1)
        # bfd1 = best_fit_factor(SamplingPred2, PotentialPred2, ConcentrationPred2)
        randnum = np.random.randint(0, len(bfd0['y']), size=10)

        # Calculate tau in 10 random points for both best factors and all curves:
        cfits = []
        for bfd in [bfd0]:  # (bfd0, bfd1):
            xv = np.array(range(len(bfd['x'])))
            for ri in randnum:
                for cp in bfd['z']:
                    sampling_recovered = np.dot(bfd['x'], cp).dot(bfd['y'][ri])
                    if sampling_recovered[1] > 0:
                        yvec = sampling_recovered
                    else:
                        yvec = np.dot(sampling_recovered, -1)
                    capac_fit, capac_cov = fit_capacitive_eq(xvec=xv,
                                                             yvec=yvec,
                                                             dE=dE)
                    cfits.append(capac_fit)
        cfits = np.matrix(cfits)
        fit_mean = np.mean(cfits, axis=0)
        fit_std = np.std(cfits, axis=0)
        self.model.custom_data['Tau'] = fit_mean[0, 2]
        self.model.custom_data['TauStdDev'] = fit_std[0, 2]
        self.model.custom_data['Romega'] = fit_mean[0, 0]
        self.model.custom_data['RomegaStdDev'] = fit_std[0, 0]
        self.model.custom_data['BestFitData'] = {}
        self.model.custom_data['BestFitData'][0] = bfd0
        # self.model.custom_data['BestFitData'][1] = bfd1
        self.model.save()
 def exportableData(self):
     if not self.model.completed:
         raise VoltPyFailed("Data incomplete")
     raise ValueError("Not implemented")
예제 #28
0
 def apply(self, user, dataset):
     """
     This procedure cannot be applied to other data.
     """
     raise VoltPyFailed(
         'Slope Standard Addition does not supports apply function.')
예제 #29
0
def generate_plot(request: HttpRequest,
                  user: User,
                  to_plot: Optional[str] = None,
                  plot_type: Optional[str] = None,
                  value_id: Optional[int] = None,
                  **kwargs) -> List:
    assert (to_plot is not None
            and plot_type is None) or (to_plot is None
                                       and plot_type is not None)
    if to_plot is not None:
        if isinstance(to_plot, mmodels.File):
            plot_type = 'file'
        elif isinstance(to_plot, mmodels.Dataset):
            plot_type = 'dataset'
        elif isinstance(to_plot, mmodels.Fileset):
            plot_type = 'fileset'
        elif isinstance(to_plot, mmodels.Analysis):
            plot_type = 'analysis'
        else:
            raise VoltPyFailed('Could not plot')

    allowedTypes = [
        'file',
        'analysis',
        'dataset',
        'fileset',
    ]
    if plot_type not in allowedTypes:
        raise VoltPyNotAllowed('Operation not allowed.')
    vtype = kwargs.get('vtype', plot_type)
    vid = kwargs.get('vid', value_id)
    addTo = kwargs.get('add', None)

    pm = mpm.PlotManager()
    data = []
    if plot_type == 'file':
        if to_plot is None:
            cf = mmodels.File.get(id=value_id)
        else:
            cf = to_plot
        data = pm.datasetHelper(user, cf)
        pm.xlabel = pm.xLabelHelper(user)
        pm.include_x_switch = True
    elif plot_type == 'dataset':
        if to_plot is None:
            cs = mmodels.Dataset.get(id=value_id)
        else:
            cs = to_plot
        data = pm.datasetHelper(user, cs)
        pm.xlabel = pm.xLabelHelper(user)
        pm.include_x_switch = True
    elif plot_type == 'analysis':
        if to_plot is None:
            data = pm.analysisHelper(user, value_id)
        else:
            data = to_plot
        pm.xlabel = pm.xLabelHelper(user)
        pm.include_x_switch = False
    elif plot_type == 'fileset':
        if to_plot is None:
            fs = mmodels.Fileset.get(id=value_id)
        else:
            fs = to_plot
        data = []
        for f in fs.files.all():
            data.extend(pm.datasetHelper(user, f))
        pm.xlabel = pm.xLabelHelper(user)
        pm.include_x_switch = True

    pm.ylabel = 'i / µA'
    pm.setInteraction(kwargs.get('interactionName', 'none'))

    for d in data:
        pm.add(**d)

    if addTo:
        for a in addTo:
            pm.add(**a)

    return pm.getEmbeded(request, user, vtype, vid)