Esempio n. 1
0
    def getResponsible(self):
        """ Return all manager info of responsible departments """
        managers = {}
        departments = []
        for analysis in self.objectValues('Analysis'):
            department = analysis.getService().getDepartment()
            if department is None:
                continue
            department_id = department.getId()
            if department_id in departments:
                continue
            departments.append(department_id)
            manager = department.getManager()
            if manager is None:
                continue
            manager_id = manager.getId()
            if not managers.has_key(manager_id):
                managers[manager_id] = {}
                managers[manager_id]['name'] = to_unicode(manager.getFullname())
                managers[manager_id]['email'] = to_unicode(manager.getEmailAddress())
                managers[manager_id]['phone'] = to_unicode(manager.getBusinessPhone())
                managers[manager_id]['signature'] = '%s/Signature' % manager.absolute_url()
                managers[manager_id]['dept'] = ''
            mngr_dept = managers[manager_id]['dept']
            if mngr_dept:
                mngr_dept += ', '
            mngr_dept += department.Title()
            managers[manager_id]['dept'] = to_unicode(mngr_dept)
        mngr_keys = managers.keys()
        mngr_info = {}
        mngr_info['ids'] = mngr_keys
        mngr_info['dict'] = managers

        return mngr_info
Esempio n. 2
0
    def parse_headerline(self, line):
        #Process incoming header line
        """11/03/2014 14:46:46
        PANalytical
        Results quantitative - Omnian 2013,

        Selected archive:,Omnian 2013
        Number of results selected:,4
        """

        # Save each header field (that we know) and its own value in the dict
        if line.startswith('Results quantitative'):
            line = to_unicode(line)
            if len(self._header) == 0:
                self.err("Unexpected header format", numline=self._numline)
                return -1

            line = line.replace(',', "")
            splitted = line.split(' - ')
            self._header['Quantitative'] = splitted[1]
            return 1

        if line.startswith('Selected archive'):
            if len(self._header) == 0:
                self.err("No header found", numline=self._numline)
                return -1

            splitted = self.splitLine(line)
            if len(splitted) > 1:
                self._header['Archive'] = splitted[1].replace('"', '').strip()
            else:
                self.warn('Unexpected header format', numline=self._numline)
            return 0

        if line.startswith('Number of'):
            if len(self._header) == 0:
                self.err("No header found", numline=self._numline)
                return -1

            splitted = self.splitLine(line)
            if len(splitted) > 1:
                self._header['NumResults'] = splitted[1].replace('"',
                                                                 '').strip()
            else:
                self.warn('Unexpected header format', numline=self._numline)
            return 0

        if line.startswith('Seq.'):
            if len(self._header) == 0:
                self.err("No header found", numline=self._numline)
                return -1
            #Grab column names
            self._columns = line.split(',')
            self._end_header = True
            return 1

        else:
            self._header['Date'] = line
            return 1
Esempio n. 3
0
    def parse_headerline(self, line):
        #Process incoming header line
        """11/03/2014 14:46:46
        PANalytical
        Results quantitative - Omnian 2013,

        Selected archive:,Omnian 2013
        Number of results selected:,4
        """
        
        # Save each header field (that we know) and its own value in the dict        
        if line.startswith('Results quantitative'):
            line = to_unicode(line)
            if len(self._header) == 0:
                self.err("Unexpected header format", numline=self._numline)
                return -1

            line = line.replace(',', "")
            splitted = line.split(' - ')
            self._header['Quantitative'] = splitted[1]
            return 1

        if line.startswith('Selected archive'):
            if len(self._header) == 0:
                self.err("No header found", numline=self._numline)
                return -1

            splitted = self.splitLine(line)
            if len(splitted) > 1:
                self._header['Archive'] = splitted[1].replace('"', '').strip()
            else:
                self.warn('Unexpected header format', numline=self._numline)
            return 0

        if line.startswith('Number of'):
            if len(self._header) == 0:
                self.err("No header found", numline=self._numline)
                return -1

            splitted = self.splitLine(line)
            if len(splitted) > 1:
                self._header['NumResults'] = splitted[1].replace('"', '').strip()
            else:
                self.warn('Unexpected header format', numline=self._numline)
            return 0

        if line.startswith('Seq.'):
            if len(self._header) == 0:
                self.err("No header found", numline=self._numline)
                return -1
            #Grab column names
            self._columns = line.split(',')
            self._end_header = True
            return 1

        else:
            self._header['Date'] = line
            return 1
Esempio n. 4
0
def reset_auth_key(portal):
    """Resets a new key for the encryption on user auto-authentication

    This key is used to generate an encrypted token (symmetric encryption) for
    the authentication of requests sent by queue clients and workers to the
    Queue's server API. Must be 32 url-safe base64-encoded bytes
    """
    # Create and store the key
    registry_id = "senaite.queue.auth_key"
    key = base64.urlsafe_b64encode(os.urandom(32))
    ploneapi.portal.set_registry_record(registry_id, to_unicode(key))
Esempio n. 5
0
    def __call__(self):
        plone.protect.CheckAuthenticator(self.request)
        uid = self.request.get('UID', '')
        title = self.request.get('Title', '')
        ret = {
            'UID': '',
            'Title': '',
            'Prefix': '',
            'Hazardous': '',
            'SampleMatrixUID': '',
            'SampleMatrixTitle': '',
            'MinimumVolume': '',
            'ContainerTypeUID': '',
            'ContainerTypeTitle': '',
            'SamplePoints': ('', ),
            'StorageLocations': ('', ),
        }
        proxies = None
        if uid:
            try:
                bsc = getToolByName(self.context, 'bika_setup_catalog')
                proxies = bsc(UID=uid)
            except ParseError:
                pass
        elif title:
            try:
                bsc = getToolByName(self.context, 'bika_setup_catalog')
                proxies = bsc(portal_type='SampleType',
                              title=to_unicode(title))
            except ParseError:
                pass

        if proxies and len(proxies) == 1:
            st = proxies[0].getObject()
            ret = {
               'UID': st.UID(),
               'Title': st.Title(),
               'Prefix': st.getPrefix(),
               'Hazardous': st.getHazardous(),
               'SampleMatrixUID': st.getSampleMatrix() and \
                                  st.getSampleMatrix().UID() or '',
               'SampleMatrixTitle': st.getSampleMatrix() and \
                                  st.getSampleMatrix().Title() or '',
               'MinimumVolume':  st.getMinimumVolume(),
               'ContainerTypeUID': st.getContainerType() and \
                                   st.getContainerType().UID() or '',
               'ContainerTypeTitle': st.getContainerType() and \
                                     st.getContainerType().Title() or '',
               'SamplePoints': dict((sp.UID(),sp.Title()) for sp in st.getSamplePoints()),
               'StorageLocations': dict((sp.UID(),sp.Title()) for sp in st.getStorageLocations()),
               }

        return json.dumps(ret)
Esempio n. 6
0
def translate_i18n(i18n_msg):
    """Safely translate and convert to UTF8, any zope i18n msgid returned from
    senaite health's message factory
    """
    text = to_unicode(i18n_msg)
    try:
        request = api.get_request()
        domain = getattr(i18n_msg, "domain", "senaite.health")
        text = translate(text, domain=domain, context=request)
    except UnicodeDecodeError:
        logger.warn("{} couldn't be translated".format(text))
    return to_utf8(text)
Esempio n. 7
0
    def __call__(self):
        plone.protect.CheckAuthenticator(self.request)
        uid = self.request.get('UID', '')
        title = self.request.get('Title', '')
        ret = {
               'UID': '',
               'Title': '',
               'Prefix': '',
               'Hazardous': '',
               'SampleMatrixUID': '',
               'SampleMatrixTitle': '',
               'MinimumVolume':  '',
               'ContainerTypeUID': '',
               'ContainerTypeTitle': '',
               'SamplePoints': ('',),
               'StorageLocations': ('',),
               }
        proxies = None
        if uid:
            try:
                bsc = getToolByName(self.context, 'bika_setup_catalog')
                proxies = bsc(UID=uid)
            except ParseError:
                pass
        elif title:
            try:
                bsc = getToolByName(self.context, 'bika_setup_catalog')
                proxies = bsc(portal_type='SampleType', title=to_unicode(title))
            except ParseError:
                pass

        if proxies and len(proxies) == 1:
            st = proxies[0].getObject();
            ret = {
               'UID': st.UID(),
               'Title': st.Title(),
               'Prefix': st.getPrefix(),
               'Hazardous': st.getHazardous(),
               'SampleMatrixUID': st.getSampleMatrix() and \
                                  st.getSampleMatrix().UID() or '',
               'SampleMatrixTitle': st.getSampleMatrix() and \
                                  st.getSampleMatrix().Title() or '',
               'MinimumVolume':  st.getMinimumVolume(),
               'ContainerTypeUID': st.getContainerType() and \
                                   st.getContainerType().UID() or '',
               'ContainerTypeTitle': st.getContainerType() and \
                                     st.getContainerType().Title() or '',
               'SamplePoints': dict((sp.UID(),sp.Title()) for sp in st.getSamplePoints()),
               'StorageLocations': dict((sp.UID(),sp.Title()) for sp in st.getStorageLocations()),
               }

        return json.dumps(ret)
Esempio n. 8
0
    def getResponsible(self):
        """ Return all manager info of responsible departments """
        managers = {}
        departments = []
        for analysis in self.objectValues('Analysis'):
            department = analysis.getService().getDepartment()
            if department is None:
                continue
            department_id = department.getId()
            if department_id in departments:
                continue
            departments.append(department_id)
            manager = department.getManager()
            if manager is None:
                continue
            manager_id = manager.getId()
            if not managers.has_key(manager_id):
                managers[manager_id] = {}
                managers[manager_id]['name'] = to_unicode(
                    manager.getFullname())
                managers[manager_id]['email'] = to_unicode(
                    manager.getEmailAddress())
                managers[manager_id]['phone'] = to_unicode(
                    manager.getBusinessPhone())
                managers[manager_id][
                    'signature'] = '%s/Signature' % manager.absolute_url()
                managers[manager_id]['dept'] = ''
            mngr_dept = managers[manager_id]['dept']
            if mngr_dept:
                mngr_dept += ', '
            mngr_dept += department.Title()
            managers[manager_id]['dept'] = to_unicode(mngr_dept)
        mngr_keys = managers.keys()
        mngr_info = {}
        mngr_info['ids'] = mngr_keys
        mngr_info['dict'] = managers

        return mngr_info
Esempio n. 9
0
 def setSamplePoint(self, value, **kw):
     """ Accept Object, Title or UID, and convert SampleType title to UID
     before saving.
     """
     if hasattr(value, "portal_type") and value.portal_type == "SamplePoint":
         pass
     else:
         bsc = getToolByName(self, 'bika_setup_catalog')
         sampletypes = bsc(portal_type='SamplePoint', title=to_unicode(value))
         if sampletypes:
             value = sampletypes[0].UID
         else:
             sampletypes = bsc(portal_type='SamplePoint', UID=value)
             if sampletypes:
                 value = sampletypes[0].UID
             else:
                 value = None
     for ar in self.getAnalysisRequests():
         ar.Schema()['SamplePoint'].set(ar, value)
     return self.Schema()['SamplePoint'].set(self, value)
Esempio n. 10
0
 def getRows(self, uid=None, title=None, searchTerm=None):
     """ Searches for objects matching with the specified params and
         return an array of dictionaries. The dictionary structure depends
         on each implementation
     """
     rows = []
     brains = []
     if uid:
         brains = self.bika_setup_catalog(portal_type=self.portal_type,
                                          UID=uid)
     elif title:
         brains = self.bika_setup_catalog(portal_type=self.portal_type,
                                          title=to_unicode(title))
     else:
         brains = self.bika_setup_catalog(portal_type=self.portal_type,
                                          inactive_state='active')
         if brains and searchTerm:
             brains = [brains for brain in brains \
                       if brain.Title.lower().find(searchTerm) > -1
                       or brain.Description.lower().find(searchTerm) > -1]
     for brain in brains:
         brain = brain.getObject()
         rows.append(self.convertToDictionary(brain))
     return rows
Esempio n. 11
0
    def parse_headerline(self, line):
        #Process incoming header line
        """
        29/11/2013 10:15:44
        PANalytical
        "Quantification of sample ESFERA CINZA - 1g H3BO3 -  1:0,5 - NO PPC",

        R.M.S.:,"0,035"
        Result status:,
        Sum before normalization:,"119,5 %"
        Normalised to:,"100,0 %"
        Sample type:,Pressed powder
        Initial sample weight (g):,"2,000"
        Weight after pressing (g):,"3,000"
        Correction applied for medium:,No
        Correction applied for film:,No
        Used Compound list:,Oxides
        Results database:,omnian 2013
        Results database in:,c:\panalytical\superq\userdata
        """

        if line.startswith('"Quantification of sample') or line.startswith(
                'Quantification of sample'):
            line = to_unicode(line)
            if len(self._header) == 0:
                self.warn('Unexpected header format', numline=self._numline)
                return -1
            # Remove non important string and double comas to obtein
            # the sample name free
            line = line.replace("Quantification of sample ", "")
            line = line.replace('"', "")
            splitted = line.split(' - ')

            if len(
                    splitted
            ) > 3:  # Maybe we don't need this, i could be all the sample's identifier...
                self._header['Sample'] = splitted[0].strip(' ')
                self._header['Quantity'] = splitted[1]
                self._header['????'] = splitted[2]  # At present we
                # don't know what
                # is that
                self._header['PPC'] = splitted[3]

            elif len(splitted) == 1:
                self._header['Sample'] = splitted[0].replace(
                    'Quantification of sample', '').strip(' ')

            else:
                self.warn('Unexpected header format', numline=self._numline)
            return 1
        # Save each header field (that we know) and its own value in the dict
        if line.startswith('R.M.S.'):

            if len(self._header) == 0:
                self.err("No header found", numline=self._numline)
                return -1

            splitted = self.splitLine(line)
            if len(splitted) > 1:
                self._header['R.M.S.'] = splitted[1].replace('"', '').strip()
            else:
                self.warn('Unexpected header format', numline=self._numline)
            return 0

        if line.startswith('Result status'):
            if len(self._header) == 0:
                self.err("No header found", numline=self._numline)

            splitted = self.splitLine(line)
            if len(splitted) > 1:
                self._header['Result status'] = splitted[1].replace(
                    '"', '').strip()
            else:
                self.warn('Unexpected header format', numline=self._numline)

            return 0

        if line.startswith('Sum before normalization'):
            if len(self._header) == 0:
                self.err("No header found", numline=self._numline)
                return -1

            splitted = self.splitLine(line)
            if len(splitted) > 1:
                self._header['Sum'] = splitted[1].replace('"', '').strip()
            else:
                self.warn('Unexpected header format', numline=self._numline)

            return 0

        if line.startswith('Normalised to'):
            if len(self._header) == 0:
                self.err("No header found", numline=self._numline)
                return -1

            splitted = self.splitLine(line)
            if len(splitted) > 1:
                self._header['Normalized'] = splitted[1].replace('"',
                                                                 '').strip()
            else:
                self.warn('Unexpected header format', numline=self._numline)

            return 0

        if line.startswith('Sample type'):
            if len(self._header) == 0:
                self.err("No header found", numline=self._numline)
                return -1

            splitted = self.splitLine(line)
            if len(splitted) > 1:
                self._header['Sample type'] = splitted[1].strip()
            else:
                self.warn('Unexpected header format', numline=self._numline)

            return 0

        if line.startswith('Initial sample weight (g)'):
            if len(self._header) == 0:
                self.err("No header found", numline=self._numline)
                return -1

            splitted = self.splitLine(line)
            if len(splitted) > 1:
                self._header['Initial sample weight'] = splitted[1].replace(
                    '"', '').strip()
            else:
                self.warn('Unexpected header format', numline=self._numline)

            return 0

        if line.startswith('Weight after pressing (g)'):
            if len(self._header) == 0:
                self.err("No header found", numline=self._numline)
                return -1

            splitted = self.splitLine(line)
            if len(splitted) > 1:
                self._header['Weight after pressing'] = splitted[1].replace(
                    '"', '').strip()
            else:
                self.warn('Unexpected header format', numline=self._numline)

            return 0

        if line.startswith('Correction applied for medium'):
            if len(self._header) == 0:
                self.warn('Unexpected header format', numline=self._numline)
                return -1

            splitted = self.splitLine(line)
            if len(splitted) > 1:
                self._header['Correction medium'] = splitted[1].replace(
                    '"', '').strip()
            else:
                self.warn('Unexpected header format', numline=self._numline)

            return 0

        if line.startswith('Correction applied for film'):
            if len(self._header) == 0:
                self.err("No header found", numline=self._numline)
                return -1

            splitted = self.splitLine(line)
            if len(splitted) > 1:
                self._header['Correction film'] = splitted[1].replace(
                    '"', '').strip()
            else:
                self.warn('Unexpected header format', numline=self._numline)

            return 0

        if line.startswith('Used Compound list'):
            if len(self._header) == 0:
                self.err("No header found", numline=self._numline)
                return -1

            splitted = self.splitLine(line)
            if len(splitted) > 1:
                self._header['Used compound'] = splitted[1].replace(
                    '"', '').strip()
            else:
                self.warn('Unexpected header format', numline=self._numline)

            return 0
        if line.startswith('Results database:'):
            if len(self._header) == 0:
                self.err("No header found", numline=self._numline)
                return -1

            splitted = self.splitLine(line)
            if len(splitted) > 1:
                self._header['Result database'] = splitted[1].replace(
                    '"', '').strip()
            else:
                self.warn('Unexpected header format', numline=self._numline)

            return 0

        if self.columns_name:
            if len(self._header) == 0:
                self.err("No header found", numline=self._numline)
                return -1

            #Grab column names
            self._end_header = True
            self._columns = self.splitLine(line)
            return 1

        if line.startswith('Results database in'):
            if len(self._header) == 0:
                self.err("No header found", numline=self._numline)
                return -1

            splitted = self.splitLine(line)
            if len(splitted) > 1:
                self._header['Database path'] = splitted[1] + splitted[2]
                self.columns_name = True
            else:
                self.warn('Unexpected header format', numline=self._numline)

            return 1

        else:
            self._header['Date'] = line
            return 1
Esempio n. 12
0
    def __call__(self):
        plone.protect.CheckAuthenticator(self.request)
        bsc = getToolByName(self.context, 'bika_setup_catalog')
        uc = getToolByName(self.context, 'uid_catalog')

        service_title = self.request.get('service_title', '').strip()
        if not service_title:
            return ''

        analysis = uc(UID=self.request.get('analysis_uid', None))
        if analysis:
            analysis = analysis[0].getObject()
            self.request['ajax_load'] = 1
            tmp = LogView(analysis, self.request)
            self.log = tmp.folderitems()
            self.log.reverse()
        else:
            self.log = []

        brains = bsc(portal_type="AnalysisService",
                     title=to_unicode(service_title))
        if not brains:
            return ''

        self.service = brains[0].getObject()

        self.calc = self.service.getCalculation()

        self.partsetup = self.service.getPartitionSetup()

        # convert uids to comma-separated list of display titles
        for i,ps in enumerate(self.partsetup):

            self.partsetup[i]['separate'] = \
                ps.has_key('separate') and _('Yes') or _('No')

            if type(ps['sampletype']) == str:
                ps['sampletype'] = [ps['sampletype'],]
            sampletypes = []
            for st in ps['sampletype']:
                res = bsc(UID=st)
                sampletypes.append(res and res[0].Title or st)
            self.partsetup[i]['sampletype'] = ", ".join(sampletypes)

            if ps.has_key('container'):
                if type(ps['container']) == str:
                    self.partsetup[i]['container'] = [ps['container'],]
                try:
                    containers = [bsc(UID=c)[0].Title for c in ps['container']]
                except IndexError:
                    containers = [c for c in ps['container']]
                self.partsetup[i]['container'] = ", ".join(containers)
            else:
                self.partsetup[i]['container'] = ''

            if ps.has_key('preservation'):
                if type(ps['preservation']) == str:
                    ps['preservation'] = [ps['preservation'],]
                try:
                    preservations = [bsc(UID=c)[0].Title for c in ps['preservation']]
                except IndexError:
                    preservations = [c for c in ps['preservation']]
                self.partsetup[i]['preservation'] = ", ".join(preservations)
            else:
                self.partsetup[i]['preservation'] = ''

        return self.template()
Esempio n. 13
0
    def __call__(self):
        plone.protect.CheckAuthenticator(self.request)
        bsc = getToolByName(self.context, 'bika_setup_catalog')
        uc = getToolByName(self.context, 'uid_catalog')

        service_title = self.request.get('service_title', '').strip()
        if not service_title:
            return ''

        analysis = uc(UID=self.request.get('analysis_uid', None))
        if analysis:
            analysis = analysis[0].getObject()
            self.request['ajax_load'] = 1
            tmp = LogView(analysis, self.request)
            self.log = tmp.folderitems()
            self.log.reverse()
        else:
            self.log = []

        brains = bsc(portal_type="AnalysisService",
                     title=to_unicode(service_title))
        if not brains:
            return ''

        self.service = brains[0].getObject()

        self.calc = self.service.getCalculation()

        self.partsetup = self.service.getPartitionSetup()

        # convert uids to comma-separated list of display titles
        for i, ps in enumerate(self.partsetup):

            self.partsetup[i]['separate'] = \
                ps.has_key('separate') and _('Yes') or _('No')

            if type(ps['sampletype']) == str:
                ps['sampletype'] = [
                    ps['sampletype'],
                ]
            sampletypes = []
            for st in ps['sampletype']:
                res = bsc(UID=st)
                sampletypes.append(res and res[0].Title or st)
            self.partsetup[i]['sampletype'] = ", ".join(sampletypes)

            if ps.has_key('container'):
                if type(ps['container']) == str:
                    self.partsetup[i]['container'] = [
                        ps['container'],
                    ]
                try:
                    containers = [bsc(UID=c)[0].Title for c in ps['container']]
                except IndexError:
                    containers = [c for c in ps['container']]
                self.partsetup[i]['container'] = ", ".join(containers)
            else:
                self.partsetup[i]['container'] = ''

            if ps.has_key('preservation'):
                if type(ps['preservation']) == str:
                    ps['preservation'] = [
                        ps['preservation'],
                    ]
                try:
                    preservations = [
                        bsc(UID=c)[0].Title for c in ps['preservation']
                    ]
                except IndexError:
                    preservations = [c for c in ps['preservation']]
                self.partsetup[i]['preservation'] = ", ".join(preservations)
            else:
                self.partsetup[i]['preservation'] = ''

        return self.template()
Esempio n. 14
0
def get_method_instrument_constraints(context, uids):
    """
        Returns a dictionary with the constraints and rules for
        methods, instruments and results to be applied to each of the
        analyses specified in the param uids (an array of uids).
        See docs/imm_results_entry_behaviour.png for further details
    """
    constraints = {}
    uc = getToolByName(context, 'uid_catalog')
    analyses = uc(portal_type=['Analysis', 'ReferenceAnalysis'],
                  UID=uids)
    cached_servs = {}
    for analysis in analyses:
        if not analysis:
            continue
        analysis = analysis.getObject()
        auid = analysis.UID()
        suid = analysis.getServiceUID()
        refan = analysis.portal_type == 'ReferenceAnalysis'
        cachedkey = "qc" if refan else "re"
        if suid in cached_servs.get(cachedkey, []):
            constraints[auid] = cached_servs[cachedkey][suid]
            continue

        if not cached_servs.get(cachedkey, None):
            cached_servs[cachedkey] = {suid: {}}
        else:
            cached_servs[cachedkey][suid] = {}
        constraints[auid] = {}

        allowed_instruments = analysis.getAllowedInstruments()

        # Analysis allows manual/instrument entry?
        s_mentry = analysis.getManualEntryOfResults()
        s_ientry = analysis.getInstrumentEntryOfResults()
        s_instrums = allowed_instruments if s_ientry else []
        s_instrums = [instr.UID() for instr in s_instrums]
        a_dinstrum = analysis.getInstrument() if s_ientry else None
        s_methods = analysis.getAllowedMethods()
        s_dmethod = analysis.getMethod()
        dmuid = s_dmethod.UID() if s_dmethod else ''
        diuid = a_dinstrum.UID() if a_dinstrum else ''

        # To take into account ASs with no method assigned by default or
        # ASs that have an instrument assigned by default that doesn't have
        # a method associated.
        if s_mentry or not s_dmethod:
            s_methods += [None]

        for method in s_methods:
            # Method manual entry?
            m_mentry = method.isManualEntryOfResults() if method else True

            instrs = []
            if method:
                # Instruments available for this method and analysis?
                instrs = [i for i in method.getInstruments()
                          if i.UID() in s_instrums]
            else:
                # What about instruments without a method assigned?
                instrs = [i for i in allowed_instruments
                          if i.UID() in s_instrums and not i.getMethods()]

            instuids = [i.UID() for i in instrs]
            v_instrobjs = [i for i in instrs if i.isValid()]
            v_instrs = [i.UID() for i in v_instrobjs]
            muid = method.UID() if method else ''

            # PREMISES
            # p1: Analysis allows manual entry?
            # p2: Analysis allows instrument entry?
            # p3: Method selected and non empty?
            # p4: Method allows manual entry?
            # p5: At least one instrument available for this method?
            # p6: Valid instruments available?
            # p7: All instruments valid?
            # p8: Methods allow the service's default instrument?
            # p9: Default instrument valid?
            premises = [
                "R" if not refan else 'Q',
                "Y" if s_mentry else "N",
                "Y" if s_ientry else "N",
                "Y" if method else "N",
                "Y" if m_mentry else "N",
                "Y" if instrs else "N",
                "Y" if v_instrs or not instrs else "N",
                "Y" if len(v_instrs) == len(instrs) else "N",
                "Y" if diuid in instuids else "N",
                "Y" if a_dinstrum and a_dinstrum.isValid() else "N",
            ]
            tprem = ''.join(premises)

            fiuid = v_instrs[0] if v_instrs else ''
            instrtitle = to_unicode(a_dinstrum.Title()) if a_dinstrum else ''
            iinstrs = ', '.join([to_unicode(i.Title()) for i in instrs
                                 if i.UID() not in v_instrs])
            dmeth = to_unicode(method.Title()) if method else ''
            m1 = _("Invalid instruments are not displayed: %s") % iinstrs
            m2 = _("Default instrument %s is not valid") % instrtitle
            m3 = _("No valid instruments available: %s ") % iinstrs
            m4 = _("Manual entry of results for method %s is not allowed "
                   "and no valid instruments found: %s") % (dmeth, iinstrs)
            m5 = _("The method %s is not valid: no manual entry allowed "
                   "and no instrument assigned") % dmeth
            m6 = _("The method %s is not valid: only instrument entry for "
                   "this analysis is allowed, but the method has no "
                   "instrument assigned") % dmeth
            m7 = _("Only instrument entry for this analysis is allowed, "
                   "but there is no instrument assigned")

            """
            Matrix dict keys char positions: (True: Y, False: N)
              0: (R)egular analysis or (Q)C analysis
              1: Analysis allows manual entry?
              2: Analysis allows instrument entry?
              3: Method is not None?
              4: Method allows manual entry?
              5: At least one instrument avialable for the method?
              6: Valid instruments available?
              7: All instruments valid?
              8: Method allows the service's default instrument?
              9: Default instrument valid?

            Matrix dict values array indexes:
              0: Method list visible? YES:1, NO:0, YES(a):2, YES(r):3
              1: Add "None" in methods list? YES:1, NO:0, NO(g):2
              2: Instr. list visible? YES:1, NO:0
              3: Add "None" in instrums list? YES: 1, NO:0
              4: UID of the selected instrument or '' if None
              5: Results field editable? YES: 1, NO:0
              6: Alert message string

            See docs/imm_results_entry_behaviour.png for further details
            """
            matrix = {
                # Regular analyses
                'RYYYYYYYY': [1, 1, 1, 1, diuid, 1, ''],  # B1
                'RYYYYYYYN': [1, 1, 1, 1, '', 1, ''],  # B2
                'RYYYYYYNYY': [1, 1, 1, 1, diuid, 1, m1],  # B3
                'RYYYYYYNYN': [1, 1, 1, 1, '', 1, m2],  # B4
                'RYYYYYYNN': [1, 1, 1, 1, '', 1, m1],  # B5
                'RYYYYYN': [1, 1, 1, 1, '', 1, m3],  # B6
                'RYYYYN': [1, 1, 1, 1, '', 1, ''],  # B7
                'RYYYNYYYY': [1, 1, 1, 0, diuid, 1, ''],  # B8
                'RYYYNYYYN': [1, 1, 1, 0, fiuid, 1, ''],  # B9
                'RYYYNYYNYY': [1, 1, 1, 0, diuid, 1, m1],  # B10
                'RYYYNYYNYN': [1, 1, 1, 1, '', 0, m2],  # B11
                'RYYYNYYNN': [1, 1, 1, 0, fiuid, 1, m1],  # B12
                'RYYYNYN': [1, 1, 1, 1, '', 0, m4],  # B13
                'RYYYNN': [1, 1, 1, 1, '', 0, m5],  # B14
                'RYYNYYYYY': [1, 1, 1, 1, diuid, 1, ''],  # B15
                'RYYNYYYYN': [1, 1, 1, 1, '', 1, ''],  # B16
                'RYYNYYYNYY': [1, 1, 1, 1, diuid, 1, m1],  # B17
                'RYYNYYYNYN': [1, 1, 1, 1, '', 1, m2],  # B18
                'RYYNYYYNN': [1, 1, 1, 1, '', 1, m1],  # B19
                'RYYNYYN': [1, 1, 1, 1, '', 1, m3],  # B20
                'RYYNYN': [1, 1, 1, 1, '', 1, ''],  # B21
                'RYNY': [2, 0, 0, 0, '', 1, ''],  # B22
                'RYNN': [0, 0, 0, 0, '', 1, ''],  # B23
                'RNYYYYYYY': [3, 2, 1, 1, diuid, 1, ''],  # B24
                'RNYYYYYYN': [3, 2, 1, 1, '', 1, ''],  # B25
                'RNYYYYYNYY': [3, 2, 1, 1, diuid, 1, m1],  # B26
                'RNYYYYYNYN': [3, 2, 1, 1, '', 1, m2],  # B27
                'RNYYYYYNN': [3, 2, 1, 1, '', 1, m1],  # B28
                'RNYYYYN': [3, 2, 1, 1, '', 1, m3],  # B29
                'RNYYYN': [3, 2, 1, 1, '', 0, m6],  # B30
                'RNYYNYYYY': [3, 2, 1, 0, diuid, 1, ''],  # B31
                'RNYYNYYYN': [3, 2, 1, 0, fiuid, 1, ''],  # B32
                'RNYYNYYNYY': [3, 2, 1, 0, diuid, 1, m1],  # B33
                'RNYYNYYNYN': [3, 2, 1, 1, '', 0, m2],  # B34
                'RNYYNYYNN': [3, 2, 1, 0, fiuid, 1, m1],  # B35
                'RNYYNYN': [3, 2, 1, 1, '', 0, m3],  # B36
                'RNYYNN': [3, 2, 1, 1, '', 0, m6],  # B37
                'RNYNYYYYY': [3, 1, 1, 0, diuid, 1, ''],  # B38
                'RNYNYYYYN': [3, 1, 1, 0, fiuid, 1, ''],  # B39
                'RNYNYYYNYY': [3, 1, 1, 0, diuid, 1, m1],  # B40
                'RNYNYYYNYN': [3, 1, 1, 1, '', 0, m2],  # B41
                'RNYNYYYNN': [3, 1, 1, 0, fiuid, 1, m1],  # B42
                'RNYNYYN': [3, 1, 1, 0, '', 0, m3],  # B43
                'RNYNYN': [3, 1, 1, 0, '', 0, m7],  # B44
                # QC Analyses
                'QYYYYYYYY': [1, 1, 1, 1, diuid, 1, ''],  # C1
                'QYYYYYYYN': [1, 1, 1, 1, '', 1, ''],  # C2
                'QYYYYYYNYY': [1, 1, 1, 1, diuid, 1, ''],  # C3
                'QYYYYYYNYN': [1, 1, 1, 1, diuid, 1, ''],  # C4
                'QYYYYYYNN': [1, 1, 1, 1, '', 1, ''],  # C5
                'QYYYYYN': [1, 1, 1, 1, '', 1, ''],  # C6
                'QYYYYN': [1, 1, 1, 1, '', 1, ''],  # C7
                'QYYYNYYYY': [1, 1, 1, 0, diuid, 1, ''],  # C8
                'QYYYNYYYN': [1, 1, 1, 0, fiuid, 1, ''],  # C9
                'QYYYNYYNYY': [1, 1, 1, 0, diuid, 1, ''],  # C10
                'QYYYNYYNYN': [1, 1, 1, 0, diuid, 1, ''],  # C11
                'QYYYNYYNN': [1, 1, 1, 0, fiuid, 1, ''],  # C12
                'QYYYNYN': [1, 1, 1, 0, fiuid, 1, ''],  # C13
                'QYYYNN': [1, 1, 1, 1, '', 0, m5],  # C14
                'QYYNYYYYY': [1, 1, 1, 1, diuid, 1, ''],  # C15
                'QYYNYYYYN': [1, 1, 1, 1, '', 1, ''],  # C16
                'QYYNYYYNYY': [1, 1, 1, 1, diuid, 1, ''],  # C17
                'QYYNYYYNYN': [1, 1, 1, 1, diuid, 1, ''],  # C18
                'QYYNYYYNN': [1, 1, 1, 1, fiuid, 1, ''],  # C19
                'QYYNYYN': [1, 1, 1, 1, diuid, 1, ''],  # C20
                'QYYNYN': [1, 1, 1, 1, '', 1, ''],  # C21
                'QYNY': [2, 0, 0, 0, '', 1, ''],  # C22
                'QYNN': [0, 0, 0, 0, '', 1, ''],  # C23
                'QNYYYYYYY': [3, 2, 1, 1, diuid, 1, ''],  # C24
                'QNYYYYYYN': [3, 2, 1, 1, '', 1, ''],  # C25
                'QNYYYYYNYY': [3, 2, 1, 1, diuid, 1, ''],  # C26
                'QNYYYYYNYN': [3, 2, 1, 1, diuid, 1, ''],  # C27
                'QNYYYYYNN': [3, 2, 1, 1, '', 1, ''],  # C28
                'QNYYYYN': [3, 2, 1, 1, '', 1, ''],  # C29
                'QNYYYN': [3, 2, 1, 1, '', 0, m6],  # C30
                'QNYYNYYYY': [3, 2, 1, 0, diuid, 1, ''],  # C31
                'QNYYNYYYN': [3, 2, 1, 0, fiuid, 1, ''],  # C32
                'QNYYNYYNYY': [3, 2, 1, 0, diuid, 1, ''],  # C33
                'QNYYNYYNYN': [3, 2, 1, 0, diuid, 1, ''],  # C34
                'QNYYNYYNN': [3, 2, 1, 0, fiuid, 1, ''],  # C35
                'QNYYNYN': [3, 2, 1, 0, fiuid, 1, ''],  # C36
                'QNYYNN': [3, 2, 1, 1, '', 0, m5],  # C37
                'QNYNYYYYY': [3, 1, 1, 0, diuid, 1, ''],  # C38
                'QNYNYYYYN': [3, 1, 1, 0, fiuid, 1, ''],  # C39
                'QNYNYYYNYY': [3, 1, 1, 0, diuid, 1, ''],  # C40
                'QNYNYYYNYN': [3, 1, 1, 0, diuid, 1, ''],  # C41
                'QNYNYYYNN': [3, 1, 1, 0, fiuid, 1, ''],  # C42
                'QNYNYYN': [3, 1, 1, 0, fiuid, 1, ''],  # C43
                'QNYNYN': [3, 1, 1, 1, '', 0, m7],  # C44
            }
            targ = [v for k, v in matrix.items() if tprem.startswith(k)]
            if not targ:
                targ = [[1, 1, 1, 1, '', 0, 'Key not found: %s' % tprem], ]
            targ = targ[0]
            atitle = analysis.Title() if analysis else "None"
            mtitle = method.Title() if method else "None"
            instdi = {}
            if refan and instrs:
                instdi = {i.UID(): i.Title() for i in instrs}
            elif not refan and v_instrobjs:
                instdi = {i.UID(): i.Title() for i in v_instrobjs}
            targ += [instdi, mtitle, atitle, tprem]
            constraints[auid][muid] = targ
            cached_servs[cachedkey][suid][muid] = targ
    return constraints
Esempio n. 15
0
def get_method_instrument_constraints(context, uids):
    """
        Returns a dictionary with the constraints and rules for
        methods, instruments and results to be applied to each of the
        analyses specified in the param uids (an array of uids).
        See docs/imm_results_entry_behaviour.png for further details
    """
    constraints = {}
    uc = getToolByName(context, 'uid_catalog')
    analyses = uc(portal_type=['Analysis', 'ReferenceAnalysis'], UID=uids)
    cached_servs = {}
    for analysis in analyses:
        if not analysis:
            continue
        analysis = analysis.getObject()
        auid = analysis.UID()
        suid = analysis.getServiceUID()
        refan = analysis.portal_type == 'ReferenceAnalysis'
        cachedkey = "qc" if refan else "re"
        if suid in cached_servs.get(cachedkey, []):
            constraints[auid] = cached_servs[cachedkey][suid]
            continue

        if not cached_servs.get(cachedkey, None):
            cached_servs[cachedkey] = {suid: {}}
        else:
            cached_servs[cachedkey][suid] = {}
        constraints[auid] = {}

        allowed_instruments = analysis.getAllowedInstruments()

        # Analysis allows manual/instrument entry?
        s_mentry = analysis.getManualEntryOfResults()
        s_ientry = analysis.getInstrumentEntryOfResults()
        s_instrums = allowed_instruments if s_ientry else []
        s_instrums = [instr.UID() for instr in s_instrums]
        a_dinstrum = analysis.getInstrument() if s_ientry else None
        s_methods = analysis.getAllowedMethods()
        s_dmethod = analysis.getMethod()
        dmuid = s_dmethod.UID() if s_dmethod else ''
        diuid = a_dinstrum.UID() if a_dinstrum else ''

        # To take into account ASs with no method assigned by default or
        # ASs that have an instrument assigned by default that doesn't have
        # a method associated.
        if s_mentry or not s_dmethod:
            s_methods += [None]

        for method in s_methods:
            # Method manual entry?
            m_mentry = method.isManualEntryOfResults() if method else True

            instrs = []
            if method:
                # Instruments available for this method and analysis?
                instrs = [
                    i for i in method.getInstruments() if i.UID() in s_instrums
                ]
            else:
                # What about instruments without a method assigned?
                instrs = [
                    i for i in allowed_instruments
                    if i.UID() in s_instrums and not i.getMethods()
                ]

            instuids = [i.UID() for i in instrs]
            v_instrobjs = [i for i in instrs if i.isValid()]
            v_instrs = [i.UID() for i in v_instrobjs]
            muid = method.UID() if method else ''

            # PREMISES
            # p1: Analysis allows manual entry?
            # p2: Analysis allows instrument entry?
            # p3: Method selected and non empty?
            # p4: Method allows manual entry?
            # p5: At least one instrument available for this method?
            # p6: Valid instruments available?
            # p7: All instruments valid?
            # p8: Methods allow the service's default instrument?
            # p9: Default instrument valid?
            premises = [
                "R" if not refan else 'Q',
                "Y" if s_mentry else "N",
                "Y" if s_ientry else "N",
                "Y" if method else "N",
                "Y" if m_mentry else "N",
                "Y" if instrs else "N",
                "Y" if v_instrs or not instrs else "N",
                "Y" if len(v_instrs) == len(instrs) else "N",
                "Y" if diuid in instuids else "N",
                "Y" if a_dinstrum and a_dinstrum.isValid() else "N",
            ]
            tprem = ''.join(premises)

            fiuid = v_instrs[0] if v_instrs else ''
            instrtitle = to_unicode(a_dinstrum.Title()) if a_dinstrum else ''
            iinstrs = ', '.join([
                to_unicode(i.Title()) for i in instrs
                if i.UID() not in v_instrs
            ])
            dmeth = to_unicode(method.Title()) if method else ''
            m1 = _("Invalid instruments are not displayed: %s") % iinstrs
            m2 = _("Default instrument %s is not valid") % instrtitle
            m3 = _("No valid instruments available: %s ") % iinstrs
            m4 = _("Manual entry of results for method %s is not allowed "
                   "and no valid instruments found: %s") % (dmeth, iinstrs)
            m5 = _("The method %s is not valid: no manual entry allowed "
                   "and no instrument assigned") % dmeth
            m6 = _("The method %s is not valid: only instrument entry for "
                   "this analysis is allowed, but the method has no "
                   "instrument assigned") % dmeth
            m7 = _("Only instrument entry for this analysis is allowed, "
                   "but there is no instrument assigned")
            """
            Matrix dict keys char positions: (True: Y, False: N)
              0: (R)egular analysis or (Q)C analysis
              1: Analysis allows manual entry?
              2: Analysis allows instrument entry?
              3: Method is not None?
              4: Method allows manual entry?
              5: At least one instrument avialable for the method?
              6: Valid instruments available?
              7: All instruments valid?
              8: Method allows the service's default instrument?
              9: Default instrument valid?

            Matrix dict values array indexes:
              0: Method list visible? YES:1, NO:0, YES(a):2, YES(r):3
              1: Add "None" in methods list? YES:1, NO:0, NO(g):2
              2: Instr. list visible? YES:1, NO:0
              3: Add "None" in instrums list? YES: 1, NO:0
              4: UID of the selected instrument or '' if None
              5: Results field editable? YES: 1, NO:0
              6: Alert message string

            See docs/imm_results_entry_behaviour.png for further details
            """
            matrix = {
                # Regular analyses
                'RYYYYYYYY': [1, 1, 1, 1, diuid, 1, ''],  # B1
                'RYYYYYYYN': [1, 1, 1, 1, '', 1, ''],  # B2
                'RYYYYYYNYY': [1, 1, 1, 1, diuid, 1, m1],  # B3
                'RYYYYYYNYN': [1, 1, 1, 1, '', 1, m2],  # B4
                'RYYYYYYNN': [1, 1, 1, 1, '', 1, m1],  # B5
                'RYYYYYN': [1, 1, 1, 1, '', 1, m3],  # B6
                'RYYYYN': [1, 1, 1, 1, '', 1, ''],  # B7
                'RYYYNYYYY': [1, 1, 1, 0, diuid, 1, ''],  # B8
                'RYYYNYYYN': [1, 1, 1, 0, fiuid, 1, ''],  # B9
                'RYYYNYYNYY': [1, 1, 1, 0, diuid, 1, m1],  # B10
                'RYYYNYYNYN': [1, 1, 1, 1, '', 0, m2],  # B11
                'RYYYNYYNN': [1, 1, 1, 0, fiuid, 1, m1],  # B12
                'RYYYNYN': [1, 1, 1, 1, '', 0, m4],  # B13
                'RYYYNN': [1, 1, 1, 1, '', 0, m5],  # B14
                'RYYNYYYYY': [1, 1, 1, 1, diuid, 1, ''],  # B15
                'RYYNYYYYN': [1, 1, 1, 1, '', 1, ''],  # B16
                'RYYNYYYNYY': [1, 1, 1, 1, diuid, 1, m1],  # B17
                'RYYNYYYNYN': [1, 1, 1, 1, '', 1, m2],  # B18
                'RYYNYYYNN': [1, 1, 1, 1, '', 1, m1],  # B19
                'RYYNYYN': [1, 1, 1, 1, '', 1, m3],  # B20
                'RYYNYN': [1, 1, 1, 1, '', 1, ''],  # B21
                'RYNY': [2, 0, 0, 0, '', 1, ''],  # B22
                'RYNN': [0, 0, 0, 0, '', 1, ''],  # B23
                'RNYYYYYYY': [3, 2, 1, 1, diuid, 1, ''],  # B24
                'RNYYYYYYN': [3, 2, 1, 1, '', 1, ''],  # B25
                'RNYYYYYNYY': [3, 2, 1, 1, diuid, 1, m1],  # B26
                'RNYYYYYNYN': [3, 2, 1, 1, '', 1, m2],  # B27
                'RNYYYYYNN': [3, 2, 1, 1, '', 1, m1],  # B28
                'RNYYYYN': [3, 2, 1, 1, '', 1, m3],  # B29
                'RNYYYN': [3, 2, 1, 1, '', 0, m6],  # B30
                'RNYYNYYYY': [3, 2, 1, 0, diuid, 1, ''],  # B31
                'RNYYNYYYN': [3, 2, 1, 0, fiuid, 1, ''],  # B32
                'RNYYNYYNYY': [3, 2, 1, 0, diuid, 1, m1],  # B33
                'RNYYNYYNYN': [3, 2, 1, 1, '', 0, m2],  # B34
                'RNYYNYYNN': [3, 2, 1, 0, fiuid, 1, m1],  # B35
                'RNYYNYN': [3, 2, 1, 1, '', 0, m3],  # B36
                'RNYYNN': [3, 2, 1, 1, '', 0, m6],  # B37
                'RNYNYYYYY': [3, 1, 1, 0, diuid, 1, ''],  # B38
                'RNYNYYYYN': [3, 1, 1, 0, fiuid, 1, ''],  # B39
                'RNYNYYYNYY': [3, 1, 1, 0, diuid, 1, m1],  # B40
                'RNYNYYYNYN': [3, 1, 1, 1, '', 0, m2],  # B41
                'RNYNYYYNN': [3, 1, 1, 0, fiuid, 1, m1],  # B42
                'RNYNYYN': [3, 1, 1, 0, '', 0, m3],  # B43
                'RNYNYN': [3, 1, 1, 0, '', 0, m7],  # B44
                # QC Analyses
                'QYYYYYYYY': [1, 1, 1, 1, diuid, 1, ''],  # C1
                'QYYYYYYYN': [1, 1, 1, 1, '', 1, ''],  # C2
                'QYYYYYYNYY': [1, 1, 1, 1, diuid, 1, ''],  # C3
                'QYYYYYYNYN': [1, 1, 1, 1, diuid, 1, ''],  # C4
                'QYYYYYYNN': [1, 1, 1, 1, '', 1, ''],  # C5
                'QYYYYYN': [1, 1, 1, 1, '', 1, ''],  # C6
                'QYYYYN': [1, 1, 1, 1, '', 1, ''],  # C7
                'QYYYNYYYY': [1, 1, 1, 0, diuid, 1, ''],  # C8
                'QYYYNYYYN': [1, 1, 1, 0, fiuid, 1, ''],  # C9
                'QYYYNYYNYY': [1, 1, 1, 0, diuid, 1, ''],  # C10
                'QYYYNYYNYN': [1, 1, 1, 0, diuid, 1, ''],  # C11
                'QYYYNYYNN': [1, 1, 1, 0, fiuid, 1, ''],  # C12
                'QYYYNYN': [1, 1, 1, 0, fiuid, 1, ''],  # C13
                'QYYYNN': [1, 1, 1, 1, '', 0, m5],  # C14
                'QYYNYYYYY': [1, 1, 1, 1, diuid, 1, ''],  # C15
                'QYYNYYYYN': [1, 1, 1, 1, '', 1, ''],  # C16
                'QYYNYYYNYY': [1, 1, 1, 1, diuid, 1, ''],  # C17
                'QYYNYYYNYN': [1, 1, 1, 1, diuid, 1, ''],  # C18
                'QYYNYYYNN': [1, 1, 1, 1, fiuid, 1, ''],  # C19
                'QYYNYYN': [1, 1, 1, 1, diuid, 1, ''],  # C20
                'QYYNYN': [1, 1, 1, 1, '', 1, ''],  # C21
                'QYNY': [2, 0, 0, 0, '', 1, ''],  # C22
                'QYNN': [0, 0, 0, 0, '', 1, ''],  # C23
                'QNYYYYYYY': [3, 2, 1, 1, diuid, 1, ''],  # C24
                'QNYYYYYYN': [3, 2, 1, 1, '', 1, ''],  # C25
                'QNYYYYYNYY': [3, 2, 1, 1, diuid, 1, ''],  # C26
                'QNYYYYYNYN': [3, 2, 1, 1, diuid, 1, ''],  # C27
                'QNYYYYYNN': [3, 2, 1, 1, '', 1, ''],  # C28
                'QNYYYYN': [3, 2, 1, 1, '', 1, ''],  # C29
                'QNYYYN': [3, 2, 1, 1, '', 0, m6],  # C30
                'QNYYNYYYY': [3, 2, 1, 0, diuid, 1, ''],  # C31
                'QNYYNYYYN': [3, 2, 1, 0, fiuid, 1, ''],  # C32
                'QNYYNYYNYY': [3, 2, 1, 0, diuid, 1, ''],  # C33
                'QNYYNYYNYN': [3, 2, 1, 0, diuid, 1, ''],  # C34
                'QNYYNYYNN': [3, 2, 1, 0, fiuid, 1, ''],  # C35
                'QNYYNYN': [3, 2, 1, 0, fiuid, 1, ''],  # C36
                'QNYYNN': [3, 2, 1, 1, '', 0, m5],  # C37
                'QNYNYYYYY': [3, 1, 1, 0, diuid, 1, ''],  # C38
                'QNYNYYYYN': [3, 1, 1, 0, fiuid, 1, ''],  # C39
                'QNYNYYYNYY': [3, 1, 1, 0, diuid, 1, ''],  # C40
                'QNYNYYYNYN': [3, 1, 1, 0, diuid, 1, ''],  # C41
                'QNYNYYYNN': [3, 1, 1, 0, fiuid, 1, ''],  # C42
                'QNYNYYN': [3, 1, 1, 0, fiuid, 1, ''],  # C43
                'QNYNYN': [3, 1, 1, 1, '', 0, m7],  # C44
            }
            targ = [v for k, v in matrix.items() if tprem.startswith(k)]
            if not targ:
                targ = [
                    [1, 1, 1, 1, '', 0,
                     'Key not found: %s' % tprem],
                ]
            targ = targ[0]
            atitle = analysis.Title() if analysis else "None"
            mtitle = method.Title() if method else "None"
            instdi = {}
            if refan and instrs:
                instdi = {i.UID(): i.Title() for i in instrs}
            elif not refan and v_instrobjs:
                instdi = {i.UID(): i.Title() for i in v_instrobjs}
            targ += [instdi, mtitle, atitle, tprem]
            constraints[auid][muid] = targ
            cached_servs[cachedkey][suid][muid] = targ
    return constraints
Esempio n. 16
0
    def parse_headerline(self, line):
        #Process incoming header line
        """
        29/11/2013 10:15:44
        PANalytical
        "Quantification of sample ESFERA CINZA - 1g H3BO3 -  1:0,5 - NO PPC",

        R.M.S.:,"0,035"
        Result status:,
        Sum before normalization:,"119,5 %"
        Normalised to:,"100,0 %"
        Sample type:,Pressed powder
        Initial sample weight (g):,"2,000"
        Weight after pressing (g):,"3,000"
        Correction applied for medium:,No
        Correction applied for film:,No
        Used Compound list:,Oxides
        Results database:,omnian 2013
        Results database in:,c:\panalytical\superq\userdata
        """

        if line.startswith('"Quantification of sample') or line.startswith('Quantification of sample'):
            line = to_unicode(line)
            if len(self._header) == 0:
                self.warn('Unexpected header format', numline=self._numline)
                return -1
            # Remove non important string and double comas to obtein
            # the sample name free
            line = line.replace("Quantification of sample ", "")
            line = line.replace('"', "")
            splitted = line.split(' - ')

            if len(splitted) > 3:# Maybe we don't need this, i could be all the sample's identifier...
                self._header['Sample'] = splitted[0].strip(' ')
                self._header['Quantity'] = splitted[1]
                self._header['????'] = splitted[2]# At present we
                                                  # don't know what
                                                  # is that
                self._header['PPC'] = splitted[3]
            
            elif len(splitted) == 1:
                self._header['Sample'] = splitted[0].replace('Quantification of sample','').strip(' ')

            else:
                self.warn('Unexpected header format', numline=self._numline)
            return 1
        # Save each header field (that we know) and its own value in the dict
        if line.startswith('R.M.S.'):

            if len(self._header) == 0:
                self.err("No header found", numline=self._numline)
                return -1

            splitted = self.splitLine(line)
            if len(splitted) > 1:
                self._header['R.M.S.'] = splitted[1].replace('"', '').strip()
            else:
                self.warn('Unexpected header format', numline=self._numline)
            return 0

        if line.startswith('Result status'):
            if len(self._header) == 0:
                self.err("No header found", numline=self._numline)

            splitted = self.splitLine(line)
            if len(splitted) > 1:
                self._header['Result status'] = splitted[1].replace('"', '').strip()
            else:
                self.warn('Unexpected header format', numline=self._numline)

            return 0

        if line.startswith('Sum before normalization'):
            if len(self._header) == 0:
                self.err("No header found", numline=self._numline)
                return -1

            splitted = self.splitLine(line)
            if len(splitted) > 1:
                self._header['Sum'] = splitted[1].replace('"', '').strip()
            else:
                self.warn('Unexpected header format', numline=self._numline)

            return 0

        if line.startswith('Normalised to'):
            if len(self._header) == 0:
                self.err("No header found", numline=self._numline)
                return -1

            splitted = self.splitLine(line)
            if len(splitted) > 1:
                self._header['Normalized'] = splitted[1].replace('"', '').strip()
            else:
                self.warn('Unexpected header format', numline=self._numline)

            return 0

        if line.startswith('Sample type'):
            if len(self._header) == 0:
                self.err("No header found", numline=self._numline)
                return -1

            splitted = self.splitLine(line)
            if len(splitted) > 1:
                self._header['Sample type'] = splitted[1].strip()
            else:
                self.warn('Unexpected header format', numline=self._numline)

            return 0

        if line.startswith('Initial sample weight (g)'):
            if len(self._header) == 0:
                self.err("No header found", numline=self._numline)
                return -1

            splitted = self.splitLine(line)
            if len(splitted) > 1:
                self._header['Initial sample weight'] = splitted[1].replace('"', '').strip()
            else:
                self.warn('Unexpected header format', numline=self._numline)

            return 0

        if line.startswith('Weight after pressing (g)'):
            if len(self._header) == 0:
                self.err("No header found", numline=self._numline)
                return -1

            splitted = self.splitLine(line)
            if len(splitted) > 1:
                self._header['Weight after pressing'] = splitted[1].replace('"', '').strip()
            else:
                self.warn('Unexpected header format', numline=self._numline)

            return 0

        if line.startswith('Correction applied for medium'):
            if len(self._header) == 0:
                self.warn('Unexpected header format', numline=self._numline)
                return -1

            splitted = self.splitLine(line)
            if len(splitted) > 1:
                self._header['Correction medium'] = splitted[1].replace('"', '').strip()
            else:
                self.warn('Unexpected header format', numline=self._numline)

            return 0

        if line.startswith('Correction applied for film'):
            if len(self._header) == 0:
                self.err("No header found", numline=self._numline)
                return -1

            splitted = self.splitLine(line)
            if len(splitted) > 1:
                self._header['Correction film'] = splitted[1].replace('"', '').strip()
            else:
                self.warn('Unexpected header format', numline=self._numline)

            return 0

        if line.startswith('Used Compound list'):
            if len(self._header) == 0:
                self.err("No header found", numline=self._numline)
                return -1

            splitted = self.splitLine(line)
            if len(splitted) > 1:
                self._header['Used compound'] = splitted[1].replace('"', '').strip()
            else:
                self.warn('Unexpected header format', numline=self._numline)

            return 0
        if line.startswith('Results database:'):
            if len(self._header) == 0:
                self.err("No header found", numline=self._numline)
                return -1

            splitted = self.splitLine(line)
            if len(splitted) > 1:
                self._header['Result database'] = splitted[1].replace('"', '').strip()
            else:
                self.warn('Unexpected header format', numline=self._numline)

            return 0

       
        if self.columns_name:
            if len(self._header) == 0:
                self.err("No header found", numline=self._numline)
                return -1

            #Grab column names
            self._end_header = True
            self._columns = self.splitLine(line)
            return 1

        if line.startswith('Results database in'):
            if len(self._header) == 0:
                self.err("No header found", numline=self._numline)
                return -1
            
            splitted = self.splitLine(line)
            if len(splitted) > 1:
                self._header['Database path'] = splitted[1]+splitted[2]
                self.columns_name = True
            else:
                self.warn('Unexpected header format', numline=self._numline)
                
            return 1
            
        else:
            self._header['Date'] = line
            return 1