Exemple #1
0
 def create_analysisservices(self, as_data):
     """
     Creates a set of analaysis services to be used in the tests
     :as_data: [{
             'title':'xxx',
             'ShortTitle':'xxx',
             'Keyword': 'xxx',
             'PointOfCapture': 'Lab',
             'Category':category object,
             'Methods': [methods object,],
             },
         ...]
     """
     folder = self.portal.bika_setup.bika_analysisservices
     ans_list = []
     for as_d in as_data:
         _id = folder.invokeFactory('AnalysisService', id=tmpID())
         ans = folder[_id]
         ans.edit(
             title=as_d['title'],
             ShortTitle=as_d.get('ShortTitle', ''),
             Keyword=as_d.get('Keyword', ''),
             PointOfCapture=as_d.get('PointOfCapture', 'Lab'),
             Category=as_d.get('Category', ''),
             Methods=as_d.get('Methods', []),
             )
         ans.unmarkCreationFlag()
         renameAfterCreation(ans)
         ans_list.append(ans)
     return ans_list
Exemple #2
0
 def create_methods(self, methods_data):
     """
     Creates a set of methods to be used in the tests
     :methods_data: [{
             'title':'xxx',
             'description':'xxx',
             'Instructions':'xxx',
             'MethodID':'xxx',
             'Accredited':'False/True'},
         ...]
     """
     folder = self.portal.bika_setup.methods
     methods_list = []
     for meth_d in methods_data:
         _id = folder.invokeFactory('Method', id=tmpID())
         meth = folder[_id]
         meth.edit(
             title=meth_d['title'],
             description=meth_d.get('description', ''),
             Instructions=meth_d.get('Instructions', ''),
             MethodID=meth_d.get('MethodID', ''),
             Accredited=meth_d.get('Accredited', True),
             )
         meth.unmarkCreationFlag()
         renameAfterCreation(meth)
         methods_list.append(meth)
     return methods_list
    def load_analysis_profiles(self, sheet):
        nr_rows = sheet.get_highest_row()
        nr_cols = sheet.get_highest_column()
        ##        self.request.response.write("<input type='hidden' id='load_section' value='Analysis Profiles' max='%s'/>"%(nr_rows-3))
        ##        self.request.response.flush()
        rows = [[sheet.cell(row=row_nr, column=col_nr).value for col_nr in range(nr_cols)] for row_nr in range(nr_rows)]
        fields = rows[1]
        folder = self.context.bika_setup.bika_arprofiles
        for row in rows[3:]:
            row = dict(zip(fields, row))
            _id = folder.invokeFactory("ARProfile", id="tmp")
            obj = folder[_id]
            services = [d.strip() for d in unicode(row["Service"]).split(",")]
            proxies = self.bsc(portal_type="AnalysisService", getKeyword=services)
            if len(proxies) != len(services):
                raise Exception(
                    "Analysis Profile services invalid.  Got %s, found %s" % (services, [p.getKeyword for p in proxies])
                )

            obj.edit(
                title=unicode(row["title"]),
                description=unicode(row["description"]),
                Service=[s.UID for s in proxies],
                ProfileKey=unicode(row["ProfileKey"]),
            )
            obj.unmarkCreationFlag()
            renameAfterCreation(obj)
 def load_analysis_specifications(self, sheet):
     nr_rows = sheet.get_highest_row()
     nr_cols = sheet.get_highest_column()
     ##        self.request.response.write("<input type='hidden' id='load_section' value='Analysis Specifications' max='%s'/>"%(nr_rows-3))
     ##        self.request.response.flush()
     rows = [[sheet.cell(row=row_nr, column=col_nr).value for col_nr in range(nr_cols)] for row_nr in range(nr_rows)]
     fields = rows[1]
     folder = self.context.bika_setup.bika_analysisspecs
     ResultsRange = []
     for row in rows[3:]:
         row = dict(zip(fields, row))
         if row["SampleType"]:
             if ResultsRange:
                 obj.setResultsRange(ResultsRange)
                 ResultsRange = []
             _id = folder.invokeFactory("AnalysisSpec", id="tmp")
             obj = folder[_id]
             SampleType = self.sampletypes[row["SampleType"]]
             obj.edit(SampleType=SampleType.UID(), title=row["SampleType"])
             obj.unmarkCreationFlag()
             renameAfterCreation(obj)
         else:
             ResultsRange.append(
                 {
                     "keyword": row["keyword"],
                     "min": str(row["min"]),
                     "max": str(row["max"]),
                     "error": str(row["error"]),
                 }
             )
    def load_methods(self, sheet):
        nr_rows = sheet.get_highest_row()
        nr_cols = sheet.get_highest_column()
        ##        self.request.response.write("<input type='hidden' id='load_section' value='Methods' max='%s'/>"%(nr_rows-3))
        ##        self.request.response.flush()
        rows = [[sheet.cell(row=row_nr, column=col_nr).value for col_nr in range(nr_cols)] for row_nr in range(nr_rows)]
        fields = rows[1]
        folder = self.context.bika_methods
        self.methods = {}
        for row in rows[3:]:
            row = dict(zip(fields, row))
            _id = folder.invokeFactory("Method", id="tmp")
            obj = folder[_id]

            obj.edit(
                title=unicode(row["title"]),
                description=unicode(row["description"]),
                Instructions=unicode(row["Instructions"]),
            )

            if row["MethodDocument"]:
                file_title = sortable_title(obj, row["MethodDocument"])
                path = resource_filename(
                    "bika.lims", "setupdata/%s/methods/%s" % (self.file_name, row["MethodDocument"])
                )
                # file_id = obj.invokeFactory("File", id=row['MethodDocument'])
                # thisfile = obj[file_id]
                file_data = open(path, "rb").read()
                obj.setMethodDocument(file_data)

            obj.unmarkCreationFlag()
            renameAfterCreation(obj)
            self.methods[unicode(row["title"])] = obj
Exemple #6
0
    def __call__(self):

        if "viewlet_submitted" in self.request.form:
            data = {}
            try:
                data = self.validate_form_inputs()
            except ValidationError as e:
                self.form_error(e.message)
                return

            from Products.CMFPlone.utils import _createObjectByType
            from bika.lims.utils import tmpID
            instance = _createObjectByType('InvoiceBatch', self.context, tmpID(), title=data['title'])
            instance.unmarkCreationFlag()
            instance.edit(
                Project=data['project_uid'],
                Services=data['services'],
                BatchStartDate=data['start_date'],
                BatchEndDate=data['end_date']
            )
            renameAfterCreation(instance)
            instance.processForm()
            msg = u'Invoice for period "%s" to "%s" created.' % (data['start_date'], data['end_date'])
            self.context.plone_utils.addPortalMessage(msg)
            self.request.response.redirect(self.context.absolute_url())
    def load_lab_departments(self, sheet):
        self.departments = {}
        lab_contacts = self.bsc(portal_type="LabContact")
        nr_rows = sheet.get_highest_row()
        nr_cols = sheet.get_highest_column()
        ##        self.request.response.write("<input type='hidden' id='load_section' value='Lab Departments' max='%s'/>"%(nr_rows-3))
        ##        self.request.response.flush()
        rows = [[sheet.cell(row=row_nr, column=col_nr).value for col_nr in range(nr_cols)] for row_nr in range(nr_rows)]
        fields = rows[1]
        folder = self.context.bika_setup.bika_departments
        for row in rows[3:]:
            row = dict(zip(fields, row))
            _id = folder.invokeFactory("Department", id="tmp")
            obj = folder[_id]
            manager = None
            for contact in lab_contacts:
                contact = contact.getObject()
                if contact.getFullname() == unicode(row["_LabContact_Fullname"]):
                    manager = contact
                    break
            if not manager:
                message = "Error: lookup of '%s' in LabContacts/Fullname failed." % unicode(row["_LabContact_Fullname"])
                self.plone_utils.addPortalMessage(message)
                raise Exception(message)
            obj.edit(title=unicode(row["title"]), description=unicode(row["description"]), Manager=manager.UID())
            self.departments[unicode(row["title"])] = obj
            obj.unmarkCreationFlag()
            renameAfterCreation(obj)

            # set importedlab contact's department references
            if hasattr(self, "lab_contacts"):
                for contact in self.lab_contacts:
                    if contact["Department"] == unicode(row["title"]):
                        contact["obj"].setDepartment(obj.UID())
 def load_instruments(self, sheet):
     nr_rows = sheet.get_highest_row()
     nr_cols = sheet.get_highest_column()
     ##        self.request.response.write("<input type='hidden' id='load_section' value='Instruments' max='%s'/>"%(nr_rows-3))
     ##        self.request.response.flush()
     rows = [[sheet.cell(row=row_nr, column=col_nr).value for col_nr in range(nr_cols)] for row_nr in range(nr_rows)]
     fields = rows[1]
     folder = self.context.bika_setup.bika_instruments
     for row in rows[3:]:
         row = dict(zip(fields, row))
         _id = folder.invokeFactory("Instrument", id="tmp")
         obj = folder[_id]
         obj.edit(
             title=unicode(row["title"]),
             description=unicode(row["description"]),
             Type=unicode(row["Type"]),
             Brand=unicode(row["Brand"]),
             Model=unicode(row["Model"]),
             SerialNo=unicode(row["SerialNo"]),
             CalibrationCertificate=unicode(row["CalibrationCertificate"]),
             CalibrationExpiryDate=unicode(row["CalibrationExpiryDate"]),
             DataInterface=row["DataInterface"],
         )
         self.instruments[unicode(row["title"])] = obj
         obj.unmarkCreationFlag()
         renameAfterCreation(obj)
 def load_lab_contacts(self, sheet):
     self.lab_contacts = []
     nr_rows = sheet.get_highest_row()
     nr_cols = sheet.get_highest_column()
     ##        self.request.response.write("<input type='hidden' id='load_section' value='Lab Contacts' max='%s'/>"%(nr_rows-3))
     ##        self.request.response.flush()
     rows = [[sheet.cell(row=row_nr, column=col_nr).value for col_nr in range(nr_cols)] for row_nr in range(nr_rows)]
     fields = rows[1]
     folder = self.context.bika_setup.bika_labcontacts
     for row in rows[3:]:
         row = dict(zip(fields, row))
         _id = folder.invokeFactory("LabContact", id="tmp")
         obj = folder[_id]
         obj.unmarkCreationFlag()
         renameAfterCreation(obj)
         Fullname = unicode(row["Firstname"]) + " " + unicode(row["Surname"])
         obj.edit(
             title=Fullname,
             description=Fullname,
             Firstname=unicode(row["Firstname"]),
             Surname=unicode(row["Surname"]),
             EmailAddress=unicode(row["EmailAddress"]),
             BusinessPhone=unicode(row["BusinessPhone"]),
             BusinessFax=unicode(row["BusinessFax"]),
             MobilePhone=unicode(row["MobilePhone"]),
             JobTitle=unicode(row["JobTitle"]),
         )
         # Department = row['Department'],
         # Signature = row['Signature'],
         row["obj"] = obj
         self.lab_contacts.append(row)
 def load_containers(self, sheet):
     nr_rows = sheet.get_highest_row()
     nr_cols = sheet.get_highest_column()
     ##        self.request.response.write("<input type='hidden' id='load_section' value='Containers' max='%s'/>"%(nr_rows-3))
     ##        self.request.response.flush()
     rows = [[sheet.cell(row=row_nr, column=col_nr).value for col_nr in range(nr_cols)] for row_nr in range(nr_rows)]
     fields = rows[1]
     folder = self.context.bika_setup.bika_containers
     self.containers = {}
     for row in rows[3:]:
         row = dict(zip(fields, row))
         _id = folder.invokeFactory("Container", id="tmp")
         obj = folder[_id]
         P = row["Preservation"]
         obj.edit(
             title=unicode(row["title"]),
             description=unicode(row["description"]),
             Capacity=unicode(row["Capacity"]),
             ContainerType=row["ContainerType"] and self.containertypes[row["ContainerType"]] or None,
             PrePreserved=row["PrePreserved"] and row["PrePreserved"] or False,
             Preservation=P and self.preservations[P] or None,
         )
         obj.unmarkCreationFlag()
         renameAfterCreation(obj)
         self.containers[unicode(row["title"])] = obj
 def load_preservations(self, sheet):
     nr_rows = sheet.get_highest_row()
     nr_cols = sheet.get_highest_column()
     ##        self.request.response.write("<input type='hidden' id='load_section' value='Preservations' max='%s'/>"%(nr_rows-3))
     ##        self.request.response.flush()
     rows = [[sheet.cell(row=row_nr, column=col_nr).value for col_nr in range(nr_cols)] for row_nr in range(nr_rows)]
     fields = rows[1]
     folder = self.context.bika_setup.bika_preservations
     self.preservations = {}
     for row in rows[3:]:
         row = dict(zip(fields, row))
         _id = folder.invokeFactory("Preservation", id="tmp")
         obj = folder[_id]
         containertypes = []
         if row["ContainerType"]:
             for ct in row["ContainerType"].split(","):
                 containertypes.append(self.containertypes[ct.strip()])
         obj.edit(
             title=unicode(row["title"]),
             description=unicode(row["description"]),
             RetentionPeriod=row["RetentionPeriod"] and eval(row["RetentionPeriod"]) or {},
             ContainerType=containertypes,
         )
         obj.unmarkCreationFlag()
         renameAfterCreation(obj)
         self.preservations[unicode(row["title"])] = obj
Exemple #12
0
 def workflow_script_receive(self):
     """ receive order """
     products = self.aq_parent.objectValues('Product')
     items = self.order_lineitems
     for item in items:
         quantity = int(item['Quantity'])
         if quantity < 1:
             continue
         product = [p for p in products if p.getId() == item['Product']][0]
         folder = self.bika_setup.bika_stockitems
         for i in range(quantity):
             pi = _createObjectByType('StockItem', folder, tmpID())
             pi.setProduct(product)
             pi.setOrderId(self.getId())
             pi.setDateReceived(DateTime())
             pi.unmarkCreationFlag()
             renameAfterCreation(pi)
             # Manually reindex stock item in catalog
             self.bika_setup_catalog.reindexObject(pi)
         product.setQuantity(product.getQuantity() + quantity)
     self.setDateReceived(DateTime())
     self.reindexObject()
     # Print stock item stickers if opted for
     if self.bika_setup.getAutoPrintInventoryStickers():
         # TODO: Use better method to redirect after transition
         self.REQUEST.response.write(
             "<script>window.location.href='%s'</script>" % (
                 self.absolute_url() + '/stickers/?items=' + self.getId()))
    def addReferenceAnalysis(self, service_uid, reference_type):
        """ add an analysis to the sample """
        rc = getToolByName(self, REFERENCE_CATALOG)
        service = rc.lookupObject(service_uid)

        analysis = _createObjectByType("ReferenceAnalysis", self, tmpID())
        analysis.unmarkCreationFlag()

        calculation = service.getCalculation()
        interim_fields = calculation and calculation.getInterimFields() or []
        renameAfterCreation(analysis)

        # maxtime = service.getMaxTimeAllowed() and service.getMaxTimeAllowed() \
        #     or {'days':0, 'hours':0, 'minutes':0}
        # starttime = DateTime()
        # max_days = float(maxtime.get('days', 0)) + \
        #          (
        #              (float(maxtime.get('hours', 0)) * 3600 + \
        #               float(maxtime.get('minutes', 0)) * 60)
        #              / 86400
        #          )
        # duetime = starttime + max_days

        analysis.setReferenceType(reference_type)
        analysis.setService(service_uid)
        analysis.setInterimFields(interim_fields)
        return analysis.UID()
Exemple #14
0
    def create_reflex_rule(self, title, method, rules_data):
        """
        Given a dict with reflex rules data, it creates a reflex rules object
        :title: a string with the title
        :method: a method object
        :rules_data: there is an example
        [{'actions': [{'act_row_idx': 0,
                       'action': 'repeat',
                       'an_result_id': 'rep-1',
                       'analyst': '',
                       'otherWS': 'current',
                       'setresultdiscrete': '',
                       'setresulton': 'original',
                       'setresultvalue': '',
                       'worksheettemplate': ''}],
          'conditions': [{'analysisservice': '52853cf7d5114b5aa8c159afad2f3da1',
                          'and_or': 'no',
                          'cond_row_idx': 0,
                          'discreteresult': '',
                          'range0': '11',
                          'range1': '12'}],
          'mother_service_uid': '52853cf7d5114b5aa8c159afad2f3da1',
          'rulenumber': '0',
          'trigger': 'submit'},
         {'actions': [{'act_row_idx': 0,
                       'action': 'repeat',
                       'an_result_id': 'rep-2',
                       'analyst': '',
                       'otherWS': 'current',
                       'setresultdiscrete': '',
                       'setresulton': 'original',
                       'setresultvalue': '',
                       'worksheettemplate': ''}],
          'conditions': [{'analysisservice': 'rep-1',
                          'and_or': 'no',
                          'cond_row_idx': 0,
                          'discreteresult': '',
                          'range0': '12',
                          'range1': '12'},],
          'mother_service_uid': '52853cf7d5114b5aa8c159afad2f3da1',
          'rulenumber': '2',
          'trigger': 'submit'}]

        """
        # Creating a rule
        rules_list = []
        folder = self.portal.bika_setup.bika_reflexrulefolder
        _id = folder.invokeFactory('ReflexRule', id=tmpID())
        rule = folder[_id]
        rule.edit(
            title=title,
            )
        rule.setMethod(method.UID())
        if rules_data:
            rule.setReflexRules(rules_data)
        rule.unmarkCreationFlag()
        renameAfterCreation(rule)
        return rule
Exemple #15
0
    def Import(self):
        folder = self.context.patients
        rows = self.get_rows(3)
        for row in rows:
            if not row['Firstname'] or not row['PrimaryReferrer']:
                continue
            pc = getToolByName(self.context, 'portal_catalog')
            client = pc(portal_type='Client', Title=row['PrimaryReferrer'])
            if len(client) == 0:
                raise IndexError("Primary referrer invalid: '%s'" % row['PrimaryReferrer'])

            client = client[0].getObject()
            _id = folder.invokeFactory('Patient', id=tmpID())
            obj = folder[_id]
            obj.unmarkCreationFlag()
            renameAfterCreation(obj)
            Fullname = (row['Firstname'] + " " + row.get('Surname', '')).strip()
            obj.edit(title=Fullname,
                     ClientPatientID = row.get('ClientPatientID', ''),
                     Salutation = row.get('Salutation', ''),
                     Firstname = row.get('Firstname', ''),
                     Surname = row.get('Surname', ''),
                     PrimaryReferrer = client.UID(),
                     Gender = row.get('Gender', 'dk'),
                     Age = row.get('Age', ''),
                     BirthDate = row.get('BirthDate', ''),
                     BirthDateEstimated =self.to_bool(row.get('BirthDateEstimated','False')),
                     BirthPlace = row.get('BirthPlace', ''),
                     Ethnicity = row.get('Ethnicity', ''),
                     Citizenship =row.get('Citizenship', ''),
                     MothersName = row.get('MothersName', ''),
                     CivilStatus =row.get('CivilStatus', ''),
                     Anonymous = self.to_bool(row.get('Anonymous','False'))
                     )
            self.fill_contactfields(row, obj)
            self.fill_addressfields(row, obj)
            if 'Photo' in row and row['Photo']:
                try:
                    path = resource_filename("bika.lims",
                                             "setupdata/%s/%s" \
                                             % (self.dataset_name, row['Photo']))
                    file_data = open(path, "rb").read()
                    obj.setPhoto(file_data)
                except:
                    logger.error("Unable to load Photo %s"%row['Photo'])

            if 'Feature' in row and row['Feature']:
                try:
                    path = resource_filename("bika.lims",
                                             "setupdata/%s/%s" \
                                             % (self.dataset_name, row['Feature']))
                    file_data = open(path, "rb").read()
                    obj.setFeature(file_data)
                except:
                    logger.error("Unable to load Feature %s"%row['Feature'])

            obj.unmarkCreationFlag()
            renameAfterCreation(obj)
Exemple #16
0
 def Import(self):
     folder = self.context.bika_setup.bika_casesyndromicclassifications
     for row in self.get_rows(3):
         obj = _createObjectByType('CaseSyndromicClassification', folder, tmpID())
         if row['title']:
             obj.edit(title=row['title'],
                      description=row.get('description', ''),)
             obj.unmarkCreationFlag()
             renameAfterCreation(obj)
Exemple #17
0
 def Import(self):
     folder = self.context.bika_setup.bika_identifiertypes
     for row in self.get_rows(3):
         obj = _createObjectByType('IdentifierType', folder, tmpID())
         if row['title']:
             obj.edit(title=row['title'],
                      description=row.get('description', ''),)
             obj.unmarkCreationFlag()
             renameAfterCreation(obj)
Exemple #18
0
 def Import(self):
     folder = self.context.bika_setup.bika_cultivars
     for row in self.get_rows(3):
         if 'title' in row and row['title']:
             _id = folder.invokeFactory('Cultivar', id=tmpID())
             obj = folder[_id]
             obj.edit(title=row['title'],
                      description=row['description'])
             obj.unmarkCreationFlag()
             renameAfterCreation(obj)
Exemple #19
0
 def Import(self):
     folder = self.context.bika_setup.bika_ethnicities
     rows = self.get_rows(3)
     for row in rows:
         _id = folder.invokeFactory('Ethnicity', id=tmpID())
         obj = folder[_id]
         if row.get('Title', None):
             obj.edit(title=row['Title'],
                      description=row.get('Description', ''))
             obj.unmarkCreationFlag()
             renameAfterCreation(obj)
Exemple #20
0
 def Import(self):
     folder = self.context.bika_setup.bika_casestatuses
     rows = self.get_rows(3)
     for row in rows:
         if row['title']:
             _id = folder.invokeFactory('CaseStatus', id=tmpID())
             obj = folder[_id]
             obj.edit(title=row['title'],
                      description=row.get('description', ''))
             obj.unmarkCreationFlag()
             renameAfterCreation(obj)
Exemple #21
0
 def Import(self):
     folder = self.context.bika_setup.bika_diseases
     rows = self.get_rows(3)
     for row in rows:
         _id = folder.invokeFactory('Disease', id=tmpID())
         obj = folder[_id]
         if row['Title']:
             obj.edit(ICDCode=row.get('ICDCode', ''),
                      title=row['Title'],
                      description=row.get('Description', ''))
             obj.unmarkCreationFlag()
             renameAfterCreation(obj)
Exemple #22
0
 def Import(self):
     print "EOOO"
     s_t = ''
     c_t = 'lab'
     bucket = {}
     pc = getToolByName(self.context, 'portal_catalog')
     bsc = getToolByName(self.context, 'bika_setup_catalog')
     # collect up all values into the bucket
     for row in self.get_rows(3):
         c_t = row['Client_title'] if row['Client_title'] else 'lab'
         if c_t not in bucket:
             bucket[c_t] = {}
         s_t = row['SampleType_title'] if row['SampleType_title'] else s_t
         if s_t not in bucket[c_t]:
             bucket[c_t][s_t] = []
         service = bsc(portal_type='AnalysisService', title=row['service'])
         if not service:
             service = bsc(portal_type='AnalysisService',
                           getKeyword=row['service'])
         try:
             service = service[0].getObject()
             bucket[c_t][s_t].append({
             'keyword': service.getKeyword(),
             'min': row.get('min','0'),
             'max': row.get('max','0'),
             'minpanic': row.get('minpanic','0'),
             'maxpanic': row.get('maxpanic','0'),
             'error': row.get('error','0'),
             })
         except IndexError:
             warning = "Error with service name %s on sheet %s. Service not uploaded."
             logger.warning(warning, row.get('service', ''), self.sheetname)
     # write objects.
     for c_t in bucket:
         if c_t == 'lab':
             folder = self.context.bika_setup.bika_analysisspecs
         else:
             folder = pc(portal_type='Client', title=c_t)
             if (not folder or len(folder) != 1):
                 logger.warn("Client %s not found. Omiting client specifications." % c_t)
                 continue
             folder = folder[0].getObject()
         for s_t in bucket[c_t]:
             resultsrange = bucket[c_t][s_t]
             sampletype = bsc(portal_type='SampleType', title=s_t)[0]
             _id = folder.invokeFactory('AnalysisSpec', id=tmpID())
             obj = folder[_id]
             obj.edit(
                 title=sampletype.Title,
                 ResultsRange=resultsrange)
             obj.setSampleType(sampletype.UID)
             obj.unmarkCreationFlag()
             renameAfterCreation(obj)
Exemple #23
0
 def Import(self):
     folder = self.context.bika_setup.bika_storagetypes
     rows = self.get_rows(3)
     for row in rows:
         title = row.get('title')
         description = row.get('description', '')
         obj = _createObjectByType('StorageType', folder, tmpID())
         obj.edit(
             title=title,
             description=description
         )
         obj.unmarkCreationFlag()
         renameAfterCreation(obj)
Exemple #24
0
 def add_to_logs(self, instrument, interface, log, filename):
     if not log:
         return
     log = ''.join(log)
     log = log[:80]+'...' if len(log) > 80 else log
     _id = instrument.invokeFactory("AutoImportLog", id=tmpID(),
                                    Instrument=instrument,
                                    Interface=interface,
                                    Results=log,
                                    ImportedFile=filename)
     item = instrument[_id]
     item.unmarkCreationFlag()
     renameAfterCreation(item)
Exemple #25
0
 def Import(self):
     subtypes = self.get_subtypes()
     folder = self.context.bika_setup.bika_aetiologicagents
     for row in self.get_rows(3):
         obj = _createObjectByType('AetiologicAgent', folder, tmpID())
         if not row['title']:
             continue
         ae_title = row['title']
         ae_subtypes = subtypes.get(ae_title, [])
         obj.edit(title=row['title'],
                  description=row.get('description', ''),
                  AetiologicAgentSubtypes=ae_subtypes)
         obj.unmarkCreationFlag()
         renameAfterCreation(obj)
Exemple #26
0
 def Import(self):
     folder = self.context.bika_setup.bika_treatments
     for row in self.get_rows(3):
         obj = _createObjectByType('Treatment', folder, tmpID())
         if row['title']:
             obj.edit(title=row['title'],
                      description=row.get('description', ''),
                      Type=row.get('Type', 'active'),
                      Procedure=row.get('Procedure', ''),
                      Care=row.get('Care', ''),
                      SubjectiveClinicalFindings=row.get('SubjectiveClinicalFindings', ''),
                      ObjectiveClinicalFindings=row.get('ObjectiveClinicalFindings', ''),)
             obj.unmarkCreationFlag()
             renameAfterCreation(obj)
Exemple #27
0
 def Import(self):
     folder = self.context.bika_setup.bika_drugs
     for row in self.get_rows(3):
         obj = _createObjectByType('Drug', folder, tmpID())
         if row['title']:
             obj.edit(title=row['title'],
                      description=row.get('description', ''),
                      Category=row.get('Category', ''),
                      Indications=row.get('Indications', ''),
                      Posology=row.get('Posology', ''),
                      SideEffects=row.get('SideEffects', ''),
                      Preservation=row.get('Preservation', ''),)
             obj.unmarkCreationFlag()
             renameAfterCreation(obj)
Exemple #28
0
 def Import(self):
     folder = self.context.bika_setup.bika_symptoms
     rows = self.get_rows(3)
     for row in rows:
         _id = folder.invokeFactory('Symptom', id=tmpID())
         obj = folder[_id]
         if row['Title']:
             obj.edit(Code=row.get('Code', ''),
                      title=row['Title'],
                      description=row.get('Description', ''),
                      Gender=row.get('Gender', 'dk'),
                      SeverityAllowed=self.to_bool(row.get('SeverityAllowed', 1)))
             obj.unmarkCreationFlag()
             renameAfterCreation(obj)
 def load_worksheet_templates(self, sheet):
     nr_rows = sheet.get_highest_row()
     nr_cols = sheet.get_highest_column()
     ##        self.request.response.write("<input type='hidden' id='load_section' value='Worksheet Templates' max='%s'/>"%(nr_rows-3))
     ##        self.request.response.flush()
     rows = [[sheet.cell(row=row_nr, column=col_nr).value for col_nr in range(nr_cols)] for row_nr in range(nr_rows)]
     fields = rows[1]
     folder = self.context.bika_setup.bika_worksheettemplates
     for row in rows[3:]:
         row = dict(zip(fields, row))
         if not row["title"]:
             if row["pos"]:
                 control_ref = self.definitions.get(unicode(row["control_ref"]), "")
                 blank_ref = self.definitions.get(unicode(row["blank_ref"]), "")
                 l = [
                     {
                         "pos": unicode(row["pos"]),
                         "type": unicode(row["type"]),
                         "control_ref": control_ref,
                         "blank_ref": blank_ref,
                         "dup": unicode(row["dup"]),
                     }
                 ]
                 wst_obj.setLayout(wst_obj.getLayout() + l)
             continue
         _id = folder.invokeFactory("WorksheetTemplate", id="tmp")
         obj = folder[_id]
         services = row["Service"] and [d.strip() for d in unicode(row["Service"]).split(",")] or []
         proxies = services and self.bsc(portal_type="AnalysisService", getKeyword=services) or []
         services = [p.UID for p in proxies]
         control_ref = self.definitions.get(unicode(row["control_ref"]), "")
         blank_ref = self.definitions.get(unicode(row["blank_ref"]), "")
         obj.edit(
             title=unicode(row["title"]),
             description=unicode(row["description"]),
             Service=services,
             Layout=[
                 {
                     "pos": unicode(row["pos"]),
                     "type": unicode(row["type"]),
                     "control_ref": control_ref,
                     "blank_ref": blank_ref,
                     "dup": unicode(row["dup"]),
                 }
             ],
         )
         wst_obj = obj
         obj.unmarkCreationFlag()
         renameAfterCreation(obj)
    def CreateServiceObjects(self, services):
        deferred = 0
        if not hasattr(self, "service_objs"):
            self.service_objs = {}
        if not self.deferred.has_key("Analysis Services"):
            self.deferred["Analysis Services"] = []

        folder = self.context.bika_setup.bika_analysisservices

        for row in services:
            if row["Calculation"] and not row["Calculation"] in [c.Title() for c in self.calcs.values()]:
                self.deferred["Analysis Services"].append(row)
                deferred = 1
                continue
            deferred = 0

            _id = folder.invokeFactory("AnalysisService", id="tmp")
            obj = folder[_id]
            obj.edit(
                title=unicode(row["title"]),
                description=row["description"] and unicode(row["description"]) or "",
                Method=row["Method"] and self.methods[unicode(row["Method"])] or None,
                Container=row["Container"] and [self.containers[c] for c in row["Container"].split(",")] or [],
                Preservation=row["Preservation"]
                and [self.preservations[c] for c in row["Preservation"].split(",")]
                or [],
                PointOfCapture=unicode(row["PointOfCapture"]),
                Unit=row["Unit"] and unicode(row["Unit"]) or None,
                Category=self.cats[unicode(row["Category"])].UID(),
                Price="%02f" % float(row["Price"]),
                CorporatePrice="%02f" % float(row["BulkPrice"]),
                VAT="%02f" % float(row["VAT"]),
                Precision=unicode(row["Precision"]),
                Accredited=row["Accredited"] and True or False,
                Keyword=unicode(row["Keyword"]),
                MaxTimeAllowed=row["MaxTimeAllowed"] and eval(row["MaxTimeAllowed"]) or {},
                DuplicateVariation="%02f" % float(row["DuplicateVariation"]),
                Uncertanties=row["Uncertainties"] and eval(row["Uncertainties"]) or [],
                ResultOptions=row["ResultOptions"] and eval(row["ResultOptions"]) or [],
                ReportDryMatter=row["ReportDryMatter"] and True or False,
            )
            if row["Instrument"]:
                obj.setInstrument(row["Instrument"] in self.instruments and self.instruments[row["Instrument"]].UID()),
            if row["Calculation"]:
                obj.setCalculation(self.calcs[row["Calculation"]])
            service_obj = obj
            self.services[row["Keyword"]] = obj
            obj.unmarkCreationFlag()
            renameAfterCreation(obj)
Exemple #31
0
    def publishFromHTML(self, aruid, results_html):
        # The AR can be published only and only if allowed
        uc = getToolByName(self.context, 'uid_catalog')
        #ars = uc(UID=aruid)
        ars = [p.getObject() for p in uc(UID=aruid)]
        if not ars or len(ars) != 1:
            return []

        ar = ars[0]
        wf = getToolByName(ar, 'portal_workflow')
        allowed_states = ['verified', 'published']
        # Publish/Republish allowed?
        if wf.getInfoFor(ar, 'review_state') not in allowed_states:
            # Pre-publish allowed?
            if not ar.getAnalyses(review_state=allowed_states):
                return []

        # HTML written to debug file
        debug_mode = App.config.getConfiguration().debug_mode
        if debug_mode:
            tmp_fn = tempfile.mktemp(suffix=".html")
            logger.debug("Writing HTML for %s to %s" % (ar.Title(), tmp_fn))
            open(tmp_fn, "wb").write(results_html)

        # Create the pdf report (will always be attached to the AR)
        # we must supply the file ourself so that createPdf leaves it alone.
        pdf_fn = tempfile.mktemp(suffix=".pdf")
        pdf_report = createPdf(htmlreport=results_html, outfile=pdf_fn)

        # PDF written to debug file
        if debug_mode:
            logger.debug("Writing PDF for %s to %s" % (ar.Title(), pdf_fn))
        else:
            os.remove(pdf_fn)

        recipients = []
        contact = ar.getContact()
        lab = ar.bika_setup.laboratory

        # BIKA Cannabis hack.  Create the CSV they desire here now
        #csvdata = self.create_cannabis_csv(ars)
        csvdata = self.create_metrc_csv(ars)
        pdf_fn = to_utf8(ar.getRequestID())
        if pdf_report:
            if contact:
                recipients = [{
                    'UID':
                    contact.UID(),
                    'Username':
                    to_utf8(contact.getUsername()),
                    'Fullname':
                    to_utf8(contact.getFullname()),
                    'EmailAddress':
                    to_utf8(contact.getEmailAddress()),
                    'PublicationModes':
                    contact.getPublicationPreference()
                }]
            reportid = ar.generateUniqueId('ARReport')
            report = _createObjectByType("ARReport", ar, reportid)
            report.edit(AnalysisRequest=ar.UID(),
                        Pdf=pdf_report,
                        CSV=csvdata,
                        Html=results_html,
                        Recipients=recipients)
            report.unmarkCreationFlag()
            renameAfterCreation(report)
            # Set blob properties for fields containing file data
            fld = report.getField('Pdf')
            fld.get(report).setFilename(pdf_fn + ".pdf")
            fld.get(report).setContentType('application/pdf')
            fld = report.getField('CSV')
            fld.get(report).setFilename(pdf_fn + ".csv")
            fld.get(report).setContentType('text/csv')

            # Set status to prepublished/published/republished
            status = wf.getInfoFor(ar, 'review_state')
            transitions = {'verified': 'publish', 'published': 'republish'}
            transition = transitions.get(status, 'prepublish')
            try:
                wf.doActionFor(ar, transition)
            except WorkflowException:
                pass

            # compose and send email.
            # The managers of the departments for which the current AR has
            # at least one AS must receive always the pdf report by email.
            # https://github.com/bikalabs/Bika-LIMS/issues/1028
            mime_msg = MIMEMultipart('related')
            mime_msg['Subject'] = self.get_mail_subject(ar)[0]
            mime_msg['From'] = formataddr(
                (encode_header(lab.getName()), lab.getEmailAddress()))
            mime_msg.preamble = 'This is a multi-part MIME message.'
            msg_txt = MIMEText(results_html, _subtype='html')
            mime_msg.attach(msg_txt)

            to = []
            #mngrs = ar.getResponsible()
            #for mngrid in mngrs['ids']:
            #    name = mngrs['dict'][mngrid].get('name', '')
            #    email = mngrs['dict'][mngrid].get('email', '')
            #    if (email != ''):
            #        to.append(formataddr((encode_header(name), email)))

            #if len(to) > 0:
            #    # Send the email to the managers
            #    mime_msg['To'] = ','.join(to)
            #    attachPdf(mime_msg, pdf_report, pdf_fn)

            #    # BIKA Cannabis hack.  Create the CSV they desire here now
            #    fn = pdf_fn
            #    attachCSV(mime_msg,csvdata,fn)

            #    try:
            #        host = getToolByName(ar, 'MailHost')
            #        host.send(mime_msg.as_string(), immediate=True)
            #    except SMTPServerDisconnected as msg:
            #        logger.warn("SMTPServerDisconnected: %s." % msg)
            #    except SMTPRecipientsRefused as msg:
            #        raise WorkflowException(str(msg))

        # Send report to recipients
        recips = self.get_recipients(ar)
        for recip in recips:
            if 'email' not in recip.get('pubpref', []) \
                    or not recip.get('email', ''):
                continue

            title = encode_header(recip.get('title', ''))
            email = recip.get('email')
            formatted = formataddr((title, email))

            # Create the new mime_msg object, cause the previous one
            # has the pdf already attached
            mime_msg = MIMEMultipart('related')
            mime_msg['Subject'] = self.get_mail_subject(ar)[0]
            mime_msg['From'] = formataddr(
                (encode_header(lab.getName()), lab.getEmailAddress()))
            mime_msg.preamble = 'This is a multi-part MIME message.'
            msg_txt = MIMEText(results_html, _subtype='html')
            mime_msg.attach(msg_txt)
            mime_msg['To'] = formatted

            # Attach the pdf to the email if requested
            if pdf_report and 'pdf' in recip.get('pubpref'):
                attachPdf(mime_msg, pdf_report, pdf_fn)
                # BIKA Cannabis hack.  Create the CSV they desire here now
                fn = pdf_fn
                attachCSV(mime_msg, csvdata, fn)

            # For now, I will simply ignore mail send under test.
            if hasattr(self.portal, 'robotframework'):
                continue

            msg_string = mime_msg.as_string()

            # content of outgoing email written to debug file
            if debug_mode:
                tmp_fn = tempfile.mktemp(suffix=".email")
                logger.debug("Writing MIME message for %s to %s" %
                             (ar.Title(), tmp_fn))
                open(tmp_fn, "wb").write(msg_string)

            try:
                host = getToolByName(ar, 'MailHost')
                host.send(msg_string, immediate=True)
            except SMTPServerDisconnected as msg:
                logger.warn("SMTPServerDisconnected: %s." % msg)
            except SMTPRecipientsRefused as msg:
                raise WorkflowException(str(msg))

        # Save file on the filesystem
        folder = os.environ.get('COAs_FOLDER', '')
        if len(folder) != 0:
            client_path = '{}/{}/'.format(folder, ar.getClientID())
            if not os.path.exists(client_path):
                os.makedirs(client_path)

            today = self.ulocalized_time(DateTime(), long_format=0)
            today_path = '{}{}/'.format(client_path, today)
            if not os.path.exists(today_path):
                os.makedirs(today_path)

            fname = '{}{}.pdf'.format(today_path, pdf_fn)
            f = open(fname, 'w')
            f.write(pdf_report)
            f.close()

            csvname = '{}{}.csv'.format(today_path, pdf_fn)
            fcsv = open(csvname, 'w')
            fcsv.write(csvdata)
            fcsv.close()

        return [ar]
Exemple #32
0
 def _renameAfterCreation(self, check_auto_id=False):
     """Rename with the IDServer
     """
     from bika.lims.idserver import renameAfterCreation
     renameAfterCreation(self)
Exemple #33
0
    def Import(self):
        folder = self.context.patients
        rows = self.get_rows(3)
        for row in rows:
            if not row['Firstname'] or not row['PrimaryReferrer']:
                continue
            pc = getToolByName(self.context, 'portal_catalog')
            client = pc(portal_type='Client', Title=row['PrimaryReferrer'])
            if len(client) == 0:
                error = "Primary referrer invalid: '%s'. Patient '%s %s' will not be uploaded"
                logger.error(error, row['PrimaryReferrer'], row['Firstname'],
                             row.get('Surname', ''))
                continue

            client = client[0].getObject()

            # Getting an existing ethnicity
            bsc = getToolByName(self.context, 'bika_setup_catalog')
            ethnicity = bsc(portal_type='Ethnicity',
                            Title=row.get('Ethnicity', ''))
            if len(ethnicity) == 0:
                raise IndexError("Invalid ethnicity: '%s'" % row['Ethnicity'])
            ethnicity = ethnicity[0].getObject()

            _id = folder.invokeFactory('Patient', id=tmpID())
            obj = folder[_id]
            obj.unmarkCreationFlag()
            renameAfterCreation(obj)
            Fullname = (row['Firstname'] + " " +
                        row.get('Surname', '')).strip()
            obj.edit(
                PatientID=row.get('PatientID'),
                title=Fullname,
                ClientPatientID=row.get('ClientPatientID', ''),
                Salutation=row.get('Salutation', ''),
                Firstname=row.get('Firstname', ''),
                Surname=row.get('Surname', ''),
                PrimaryReferrer=client.UID(),
                Gender=row.get('Gender', 'dk'),
                Age=row.get('Age', ''),
                BirthDate=row.get('BirthDate', ''),
                BirthDateEstimated=self.to_bool(
                    row.get('BirthDateEstimated', 'False')),
                BirthPlace=row.get('BirthPlace', ''),
                # TODO Ethnicity_Obj -> Ethnicity on health v319
                Ethnicity_Obj=ethnicity.UID(),
                Citizenship=row.get('Citizenship', ''),
                MothersName=row.get('MothersName', ''),
                CivilStatus=row.get('CivilStatus', ''),
                Anonymous=self.to_bool(row.get('Anonymous', 'False')))
            self.fill_contactfields(row, obj)
            self.fill_addressfields(row, obj)
            if 'Photo' in row and row['Photo']:
                try:
                    path = resource_filename(self.dataset_project,
                                             "setupdata/%s/%s" \
                                             % (self.dataset_name, row['Photo']))
                    file_data = open(path, "rb").read() if os.path.isfile(path) \
                        else open(path+'.jpg', "rb").read()
                    obj.setPhoto(file_data)
                except:
                    logger.error("Unable to load Photo %s" % row['Photo'])

            if 'Feature' in row and row['Feature']:
                try:
                    path = resource_filename(self.dataset_project,
                                             "setupdata/%s/%s" \
                                             % (self.dataset_name, row['Feature']))
                    file_data = open(path, "rb").read() if os.path.isfile(path) \
                        else open(path+'.pdf', "rb").read()
                    obj.setFeature(file_data)
                except:
                    logger.error("Unable to load Feature %s" % row['Feature'])

            obj.unmarkCreationFlag()
            transaction.savepoint(optimistic=True)
            if row.get('PatientID'):
                # To maintain the patient spreadsheet's IDs, we cannot do a 'renameaftercreation()'
                if obj.getPatientID() != row.get('PatientID'):
                    transaction.savepoint(optimistic=True)
                    obj.aq_inner.aq_parent.manage_renameObject(
                        obj.id, row.get('PatientID'))
            else:
                renameAfterCreation(obj)
Exemple #34
0
    def Import(self):
        logger.info("*** Custom import of Analysis Specifications ***")
        for row in self.get_rows(3):
            keyword = row.get('utestid')
            if not keyword:
                logger.warn("No keyword found")
                continue

            query = dict(portal_type="AnalysisService", getKeyword=keyword)
            analysis = api.search(query, 'bika_setup_catalog')
            if not analysis:
                logger.warn("No analysis service found for {}".format(keyword))
                continue
            if len(analysis) > 1:
                logger.warn(
                    "More than one service found for {}".format(keyword))
                continue
            analysis = api.get_object(analysis[0])

            # TODO No Sample Type defined in the file, just use Whole Blood
            st_title = row.get('sample_type', 'Whole Blood')
            query = dict(portal_type="SampleType", title=st_title)
            sample_type = api.search(query, 'bika_setup_catalog')
            if not sample_type:
                logger.warn("No sample type found for {}".format(st_title))
                continue
            if len(sample_type) > 1:
                logger.warn(
                    "More than one sample type found for {}".format(st_title))
                continue
            sample_type = api.get_object(sample_type[0])

            unit = row.get('utestid_units')
            min_spec = row.get('lln', '')
            max_spec = row.get('uln', '')
            gender = row.get('gender', 'a')
            gender = gender == 'mf' and 'a' or gender
            age_low = row.get('age_low', '')
            if age_low:
                age_low = '{}{}'.format(age_low, row.get('age_low_unit', 'd'))
            age_high = row.get('age_high', '')
            if age_high:
                age_high = '{}{}'.format(age_high,
                                         row.get('age_high_unit', 'd'))
            if not age_low and not age_high:
                logger.warn(
                    "Cannot create Spec, Age low and high not defined.")
                continue
            max_panic = row.get('panic_high_value', '')
            min_panic = row.get('panic_low_value', '')

            # TODO No Specs title defined in the file, just use sample type's
            specs_title = row.get('title', st_title)
            specs_key = []
            specs_key.append(specs_title)
            if gender:
                str_gender = gender.upper()
                if gender == 'a':
                    str_gender = 'MF'
                specs_key.append(str_gender)
            if age_low and age_high:
                specs_key.append('{} - {}'.format(age_low, age_high))
            elif age_low:
                specs_key.append('({}+)'.format(age_low))
            elif age_high:
                specs_key.append('(-{})'.format(age_high))
            specs_title = ' '.join(specs_key)

            specs_dict = {
                'keyword': analysis.getKeyword(),
                'min': min_spec,
                'max': max_spec,
                'minpanic': min_panic,
                'maxpanic': max_panic,
                'warn_min': '',
                'warn_max': '',
                'hidemin': '',
                'hidemax': '',
                'rangecomments': '',
            }

            query = dict(portal_type='AnalysisSpec', title=specs_title)
            aspec = api.search(query, 'bika_setup_catalog')
            if not aspec:
                # Create a new one
                folder = self.context.bika_setup.bika_analysisspecs
                _id = folder.invokeFactory('AnalysisSpec', id=tmpID())
                aspec = folder[_id]
                aspec.edit(title=specs_title)
                aspec.Schema().getField("Gender").set(aspec, gender)
                aspec.Schema().getField("Agefrom").set(aspec, age_low)
                aspec.Schema().getField("Ageto").set(aspec, age_high)
                aspec.unmarkCreationFlag()
                renameAfterCreation(aspec)

            elif len(aspec) > 1:
                logger.warn(
                    "More than one Analysis Specification found for {}".format(
                        specs_title))
                continue
            else:
                aspec = api.get_object(aspec[0])

            result_range = aspec.Schema().getField('ResultsRange').get(aspec)
            result_range.append(specs_dict)
            aspec.Schema().getField('ResultsRange').set(aspec, result_range)
            aspec.setSampleType(sample_type.UID())
            aspec.reindexObject()
Exemple #35
0
def create_analysisrequest(client,
                           request,
                           values,
                           analyses=None,
                           partitions=None,
                           specifications=None,
                           prices=None):
    """This is meant for general use and should do everything necessary to
    create and initialise an AR and any other required auxilliary objects
    (Sample, SamplePartition, Analysis...)
    :param client:
        The container (Client) in which the ARs will be created.
    :param request:
        The current Request object.
    :param values:
        a dict, where keys are AR|Sample schema field names.
    :param analyses:
        Analysis services list.  If specified, augments the values in
        values['Analyses']. May consist of service objects, UIDs, or Keywords.
    :param partitions:
        A list of dictionaries, if specific partitions are required.  If not
        specified, AR's sample is created with a single partition.
    :param specifications:
        These values augment those found in values['Specifications']
    :param prices:
        Allow different prices to be set for analyses.  If not set, prices
        are read from the associated analysis service.
    """
    # Don't pollute the dict param passed in
    values = dict(values.items())

    # Create the Analysis Request
    ar = _createObjectByType('AnalysisRequest', client, tmpID())

    # Resolve the services uids and set the analyses for this Analysis Request
    service_uids = get_services_uids(context=client,
                                     values=values,
                                     analyses_serv=analyses)
    ar.setAnalyses(service_uids, prices=prices, specs=specifications)
    values.update({"Analyses": service_uids})
    ar.processForm(REQUEST=request, values=values)

    # Handle rejection reasons
    rejection_reasons = resolve_rejection_reasons(values)
    ar.setRejectionReasons(rejection_reasons)

    # Handle secondary Analysis Request
    primary = ar.getPrimaryAnalysisRequest()
    if primary:
        # Mark the secondary with the `IAnalysisRequestSecondary` interface
        alsoProvides(ar, IAnalysisRequestSecondary)

        # Rename the secondary according to the ID server setup
        renameAfterCreation(ar)

        # Set dates to match with those from the primary
        ar.setDateSampled(primary.getDateSampled())
        ar.setSamplingDate(primary.getSamplingDate())
        ar.setDateReceived(primary.getDateReceived())

        # Force the transition of the secondary to received and set the
        # description/comment in the transition accordingly.
        if primary.getDateReceived():
            primary_id = primary.getId()
            comment = "Auto-received. Secondary Sample of {}".format(
                primary_id)
            changeWorkflowState(ar,
                                AR_WORKFLOW_ID,
                                "sample_received",
                                action="receive",
                                comments=comment)

            # Mark the secondary as received
            alsoProvides(ar, IReceived)

            # Initialize analyses
            do_action_to_analyses(ar, "initialize")

            # Notify the ar has ben modified
            modified(ar)

            # Reindex the AR
            ar.reindexObject()

            # If rejection reasons have been set, reject automatically
            if rejection_reasons:
                doActionFor(ar, "reject")

            # In "received" state already
            return ar

    # Try first with no sampling transition, cause it is the most common config
    success, message = doActionFor(ar, "no_sampling_workflow")
    if not success:
        doActionFor(ar, "to_be_sampled")

    # If rejection reasons have been set, reject the sample automatically
    if rejection_reasons:
        doActionFor(ar, "reject")

    return ar
def create_analysisrequest(client,
                           request,
                           values,
                           analyses=None,
                           results_ranges=None,
                           prices=None):
    """Creates a new AnalysisRequest (a Sample) object
    :param client: The container where the Sample will be created
    :param request: The current Http Request object
    :param values: A dict, with keys as AnalaysisRequest's schema field names
    :param analyses: List of Services or Analyses (brains, objects, UIDs,
        keywords). Extends the list from values["Analyses"]
    :param results_ranges: List of Results Ranges. Extends the results ranges
        from the Specification object defined in values["Specification"]
    :param prices: Mapping of AnalysisService UID -> price. If not set, prices
        are read from the associated analysis service.
    """
    # Don't pollute the dict param passed in
    values = dict(values.items())

    # Resolve the Service uids of analyses to be added in the Sample. Values
    # passed-in might contain Profiles and also values that are not uids. Also,
    # additional analyses can be passed-in through either values or services
    service_uids = to_services_uids(values=values, services=analyses)

    # Remove the Analyses from values. We will add them manually
    values.update({"Analyses": []})

    # Create the Analysis Request and submit the form
    ar = _createObjectByType('AnalysisRequest', client, tmpID())
    ar.processForm(REQUEST=request, values=values)

    # Set the analyses manually
    ar.setAnalyses(service_uids, prices=prices, specs=results_ranges)

    # Handle hidden analyses from template and profiles
    # https://github.com/senaite/senaite.core/issues/1437
    # https://github.com/senaite/senaite.core/issues/1326
    apply_hidden_services(ar)

    # Handle rejection reasons
    rejection_reasons = resolve_rejection_reasons(values)
    ar.setRejectionReasons(rejection_reasons)

    # Handle secondary Analysis Request
    primary = ar.getPrimaryAnalysisRequest()
    if primary:
        # Mark the secondary with the `IAnalysisRequestSecondary` interface
        alsoProvides(ar, IAnalysisRequestSecondary)

        # Rename the secondary according to the ID server setup
        renameAfterCreation(ar)

        # Set dates to match with those from the primary
        ar.setDateSampled(primary.getDateSampled())
        ar.setSamplingDate(primary.getSamplingDate())
        ar.setDateReceived(primary.getDateReceived())

        # Force the transition of the secondary to received and set the
        # description/comment in the transition accordingly.
        if primary.getDateReceived():
            primary_id = primary.getId()
            comment = "Auto-received. Secondary Sample of {}".format(
                primary_id)
            changeWorkflowState(ar,
                                AR_WORKFLOW_ID,
                                "sample_received",
                                action="receive",
                                comments=comment)

            # Mark the secondary as received
            alsoProvides(ar, IReceived)

            # Initialize analyses
            do_action_to_analyses(ar, "initialize")

            # Notify the ar has ben modified
            modified(ar)

            # Reindex the AR
            ar.reindexObject()

            # If rejection reasons have been set, reject automatically
            if rejection_reasons:
                doActionFor(ar, "reject")

            # In "received" state already
            return ar

    # Try first with no sampling transition, cause it is the most common config
    success, message = doActionFor(ar, "no_sampling_workflow")
    if not success:
        doActionFor(ar, "to_be_sampled")

    # If rejection reasons have been set, reject the sample automatically
    if rejection_reasons:
        doActionFor(ar, "reject")

    return ar
Exemple #37
0
 def _renameAfterCreation(self, check_auto_id=False):
     """Autogenerate the ID of the object based on core's ID formatting
     settings for this type
     """
     idserver.renameAfterCreation(self)
Exemple #38
0
    def cloneAR(self, ar):
        newar = _createObjectByType("AnalysisRequest", ar.aq_parent, tmpID())
        newar.title = ar.title
        newar.description = ar.description
        newar.setContact(ar.getContact())
        newar.setCCContact(ar.getCCContact())
        newar.setCCEmails(ar.getCCEmails())
        newar.setBatch(ar.getBatch())
        newar.setTemplate(ar.getTemplate())
        newar.setProfile(ar.getProfile())
        newar.setSamplingDate(ar.getSamplingDate())
        newar.setSampleType(ar.getSampleType())
        newar.setSamplePoint(ar.getSamplePoint())
        newar.setStorageLocation(ar.getStorageLocation())
        newar.setSamplingDeviation(ar.getSamplingDeviation())
        newar.setPriority(ar.getPriority())
        newar.setSampleCondition(ar.getSampleCondition())
        newar.setSample(ar.getSample())
        newar.setClientOrderNumber(ar.getClientOrderNumber())
        newar.setClientReference(ar.getClientReference())
        newar.setClientSampleID(ar.getClientSampleID())
        newar.setDefaultContainerType(ar.getDefaultContainerType())
        newar.setAdHoc(ar.getAdHoc())
        newar.setComposite(ar.getComposite())
        newar.setReportDryMatter(ar.getReportDryMatter())
        newar.setInvoiceExclude(ar.getInvoiceExclude())
        newar.setAttachment(ar.getAttachment())
        newar.setInvoice(ar.getInvoice())
        newar.setDateReceived(ar.getDateReceived())
        newar.setMemberDiscount(ar.getMemberDiscount())
        # Set the results for each AR analysis
        ans = ar.getAnalyses(full_objects=True)
        for an in ans:
            nan = _createObjectByType("Analysis", newar, an.getKeyword())
            nan.setService(an.getService())
            nan.setCalculation(an.getCalculation())
            nan.setInterimFields(an.getInterimFields())
            nan.setResult(an.getResult())
            nan.setResultDM(an.getResultDM())
            nan.setRetested = False,
            nan.setMaxTimeAllowed(an.getMaxTimeAllowed())
            nan.setDueDate(an.getDueDate())
            nan.setDuration(an.getDuration())
            nan.setReportDryMatter(an.getReportDryMatter())
            nan.setAnalyst(an.getAnalyst())
            nan.setInstrument(an.getInstrument())
            nan.setSamplePartition(an.getSamplePartition())
            nan.unmarkCreationFlag()
            zope.event.notify(ObjectInitializedEvent(nan))
            changeWorkflowState(nan, 'bika_analysis_workflow',
                                'to_be_verified')
            nan.reindexObject()

        newar.reindexObject()
        newar.aq_parent.reindexObject()
        renameAfterCreation(newar)
        newar.setRequestID(newar.getId())

        if hasattr(ar, 'setChildAnalysisRequest'):
            ar.setChildAnalysisRequest(newar)
        newar.setParentAnalysisRequest(ar)
        return newar
Exemple #39
0
    def publishFromHTML(self, ar_uids, results_html):
        """ar_uids can be a single UID or a list of AR uids.  The resulting
        ARs will be published together (ie, sent as a single outbound email)
        and the entire report will be saved in each AR's published-results
        tab.
        """
        debug_mode = App.config.getConfiguration().debug_mode
        wf = getToolByName(self.context, 'portal_workflow')

        # The AR can be published only and only if allowed
        uc = getToolByName(self.context, 'uid_catalog')
        ars = [p.getObject() for p in uc(UID=ar_uids)]
        if not ars:
            return []

        results_html = self.localize_images(results_html)
        # Create the pdf report for the supplied HTML.
        open('/tmp/2499.html', 'w').write(results_html)
        pdf_report = createPdf(results_html, False)
        # PDF written to debug file?
        if debug_mode:
            pdf_fn = tempfile.mktemp(suffix=".pdf")
            logger.info("Writing PDF for {} to {}".format(
                ', '.join([ar.Title() for ar in ars]), pdf_fn))
            open(pdf_fn, 'wb').write(pdf_report)

        for ar in ars:
            # Generate in each relevant AR, a new ARReport
            reportid = ar.generateUniqueId('ARReport')
            report = _createObjectByType("ARReport", ar, reportid)
            report.edit(
                AnalysisRequest=ar.UID(),
                Pdf=pdf_report,
                Html=results_html,
            )
            report.unmarkCreationFlag()
            renameAfterCreation(report)
            # Modify the workflow state of each AR that's been published
            status = wf.getInfoFor(ar, 'review_state')
            transitions = {'verified': 'publish', 'published': 'republish'}
            transition = transitions.get(status, 'prepublish')
            try:
                wf.doActionFor(ar, transition)
            except WorkflowException:
                pass

        # compose and send email.
        # The managers of the departments for which the current AR has
        # at least one AS must receive always the pdf report by email.
        # https://github.com/bikalabs/Bika-LIMS/issues/1028
        lab = ars[0].bika_setup.laboratory
        mime_msg = MIMEMultipart('related')
        mime_msg['Subject'] = "Published results for %s" % \
                              ",".join([ar.Title() for ar in ars])
        mime_msg['From'] = formataddr(
            (encode_header(lab.getName()), lab.getEmailAddress()))
        mime_msg.preamble = 'This is a multi-part MIME message.'
        msg_txt = MIMEText(results_html, _subtype='html')
        mime_msg.attach(msg_txt)

        to = []
        to_emails = []

        mngrs = []
        for ar in ars:
            resp = ar.getResponsible()
            if 'dict' in resp and resp['dict']:
                for mngrid, mngr in resp['dict'].items():
                    if mngr['email'] not in [m['email'] for m in mngrs]:
                        mngrs.append(mngr)
        for mngr in mngrs:
            name = mngr['name']
            email = mngr['email']
            to.append(formataddr((encode_header(name), email)))

        # Send report to recipients
        for ar in ars:
            recips = self.get_recipients(ar)
            for recip in recips:
                if 'email' not in recip.get('pubpref', []) \
                        or not recip.get('email', ''):
                    continue
                title = encode_header(recip.get('title', ''))
                email = recip.get('email')
                if email not in to_emails:
                    to.append(formataddr((title, email)))
                    to_emails.append(email)

        # Create the new mime_msg object, cause the previous one
        # has the pdf already attached
        mime_msg = MIMEMultipart('related')
        mime_msg['Subject'] = "Published results for %s" % \
                              ",".join([ar.Title() for ar in ars])
        mime_msg['From'] = formataddr(
            (encode_header(lab.getName()), lab.getEmailAddress()))
        mime_msg.preamble = 'This is a multi-part MIME message.'
        msg_txt = MIMEText(results_html, _subtype='html')
        mime_msg.attach(msg_txt)
        mime_msg['To'] = ",".join(to)

        # Attach the pdf to the email
        fn = "_".join([ar.Title() for ar in ars])
        attachPdf(mime_msg, pdf_report, fn)

        # ALS hack.  Create the CSV they desire here now
        csvdata = self.create_als_csv(ars)
        # Attach to email
        part = MIMEBase('text', "csv")
        fn = self.current_certificate_number()
        part.add_header('Content-Disposition',
                        'attachment; filename="{}.csv"'.format(fn))
        part.set_payload(csvdata)
        mime_msg.attach(part)

        msg_string = mime_msg.as_string()

        try:
            host = getToolByName(ars[0], 'MailHost')
            host.send(msg_string, immediate=True)
        except SMTPServerDisconnected as msg:
            logger.warn("SMTPServerDisconnected: %s." % msg)
        except SMTPRecipientsRefused as msg:
            raise WorkflowException(str(msg))

        return ars
Exemple #40
0
def notify_rejection(analysisrequest):
    """
    Notifies via email that a given Analysis Request has been rejected. The
    notification is sent to the Client contacts assigned to the Analysis
    Request.

    :param analysisrequest: Analysis Request to which the notification refers
    :returns: true if success
    """

    # We do this imports here to avoid circular dependencies until we deal
    # better with this notify_rejection thing.
    from bika.lims.browser.analysisrequest.reject import \
        AnalysisRequestRejectPdfView, AnalysisRequestRejectEmailView

    arid = analysisrequest.getId()

    # This is the template to render for the pdf that will be either attached
    # to the email and attached the the Analysis Request for further access
    tpl = AnalysisRequestRejectPdfView(analysisrequest,
                                       analysisrequest.REQUEST)
    html = tpl.template()
    html = safe_unicode(html).encode('utf-8')
    filename = '%s-rejected' % arid
    pdf_fn = tempfile.mktemp(suffix=".pdf")
    pdf = createPdf(htmlreport=html, outfile=pdf_fn)
    if pdf:
        # Attach the pdf to the Analysis Request
        attid = analysisrequest.aq_parent.generateUniqueId('Attachment')
        att = _createObjectByType("Attachment", analysisrequest.aq_parent,
                                  attid)
        att.setAttachmentFile(open(pdf_fn))
        # Awkward workaround to rename the file
        attf = att.getAttachmentFile()
        attf.filename = '%s.pdf' % filename
        att.setAttachmentFile(attf)
        att.unmarkCreationFlag()
        renameAfterCreation(att)
        analysisrequest.addAttachment(att)
        os.remove(pdf_fn)

    # This is the message for the email's body
    tpl = AnalysisRequestRejectEmailView(analysisrequest,
                                         analysisrequest.REQUEST)
    html = tpl.template()
    html = safe_unicode(html).encode('utf-8')

    # compose and send email.
    mailto = []
    lab = analysisrequest.bika_setup.laboratory
    mailfrom = formataddr(
        (encode_header(lab.getName()), lab.getEmailAddress()))
    mailsubject = _('%s has been rejected') % arid
    contacts = [analysisrequest.getContact()] + analysisrequest.getCCContact()
    for contact in contacts:
        name = to_utf8(contact.getFullname())
        email = to_utf8(contact.getEmailAddress())
        if email:
            mailto.append(formataddr((encode_header(name), email)))
    if not mailto:
        return False
    mime_msg = MIMEMultipart('related')
    mime_msg['Subject'] = mailsubject
    mime_msg['From'] = mailfrom
    mime_msg['To'] = ','.join(mailto)
    mime_msg.preamble = 'This is a multi-part MIME message.'
    msg_txt = MIMEText(html, _subtype='html')
    mime_msg.attach(msg_txt)
    if pdf:
        attachPdf(mime_msg, pdf, filename)

    try:
        host = getToolByName(analysisrequest, 'MailHost')
        host.send(mime_msg.as_string(), immediate=True)
    except:
        logger.warning(
            "Email with subject %s was not sent (SMTP connection error)" %
            mailsubject)

    return True
Exemple #41
0
 def _renameAfterCreation(self, check_auto_id=False):
     renameAfterCreation(self)
Exemple #42
0
    def __call__(self):
        form = self.request.form
        plone.protect.CheckAuthenticator(form)
        workflow = getToolByName(self.context, 'portal_workflow')
        rc = getToolByName(self.context, REFERENCE_CATALOG)
        uc = getToolByName(self.context, 'uid_catalog')
        action, came_from = WorkflowAction._get_form_workflow_action(self)

        if action == 'duplicate':
            selected_services = WorkflowAction._get_selected_items(self)

            ## Create a copy of the selected services
            folder = self.context.bika_setup.bika_analysisservices
            created = []
            for service in selected_services.values():
                _id = folder.invokeFactory('AnalysisService', id = 'tmp')
                folder[_id].setTitle('%s (copy)' % service.Title())
                _id = renameAfterCreation(folder[_id])
                folder[_id].unmarkCreationFlag()

                folder[_id].edit(
                    description = service.Description(),
                    PointOfCapture = service.getPointOfCapture(),
                    ReportDryMatter = service.getReportDryMatter(),
                    Unit = service.getUnit(),
                    Precision = service.getPrecision(),
                    Price = service.getPrice(),
                    BulkDiscount = service.getBulkPrice(),
                    VAT = service.getVAT(),
                    Calculation = service.getCalculation(),
                    Instrument = service.getInstrument(),
                    MaxTimeAllowed = service.getMaxTimeAllowed(),
                    DuplicateVariation = service.getDuplicateVariation(),
                    Category = service.getCategory(),
                    Department = service.getDepartment(),
                    Accredited = service.getAccredited(),
                    Uncertainties = service.getUncertainties(),
                    ResultOptions = service.getResultOptions()
                )
                folder[_id].reindexObject()
                created.append(_id)

            if len(created) > 1:
                message = self.context.translate(
                    _('Services ${services} were successfully created.',
                      mapping = {'services': ', '.join(created)}))
                self.destination_url = \
                    self.request.get_header("referer",
                                            self.context.absolute_url())
            else:
                message = self.context.translate(
                    _('Analysis request ${service} was successfully created.',
                    mapping = {'service': ', '.join(created)}))
                self.destination_url = folder[_id].absolute_url() + "/base_edit"

            self.context.plone_utils.addPortalMessage(message, 'info')
            self.request.response.redirect(self.destination_url)

        else:
            # default bika_listing.py/WorkflowAction for other transitions
            WorkflowAction.__call__(self)
Exemple #43
0
 def _renameAfterCreation(self, check_auto_id=False):
     from bika.lims.idserver import renameAfterCreation
     # ResourceLockedError: Object "multifile..." is locked via WebDAV
     self.wl_clearLocks()
     renameAfterCreation(self)
Exemple #44
0
    def workflow_script_import(self):
        """Create objects from valid ARImport
        """
        def convert_date_string(datestr):
            return datestr.replace('-', '/')

        def lookup_sampler_uid(import_user):
            #Lookup sampler's uid
            found = False
            userid = None
            user_ids = []
            users = getUsers(self, ['LabManager', 'Sampler']).items()
            for (samplerid, samplername) in users:
                if import_user == samplerid:
                    found = True
                    userid = samplerid
                    break
                if import_user == samplername:
                    user_ids.append(samplerid)
            if found:
                return userid
            if len(user_ids) == 1:
                return user_ids[0]
            if len(user_ids) > 1:
                #raise ValueError('Sampler %s is ambiguous' % import_user)
                return ''
            #Otherwise
            #raise ValueError('Sampler %s not found' % import_user)
            return ''

        bsc = getToolByName(self, 'bika_setup_catalog')
        workflow = getToolByName(self, 'portal_workflow')
        client = self.aq_parent

        title = _('Submitting AR Import')
        description = _('Creating and initialising objects')
        bar = ProgressBar(self, self.REQUEST, title, description)
        notify(InitialiseProgressBar(bar))

        profiles = [x.getObject() for x in bsc(portal_type='AnalysisProfile')]

        gridrows = self.schema['SampleData'].get(self)
        row_cnt = 0
        for therow in gridrows:
            row = therow.copy()
            row_cnt += 1
            # Create Sample
            sample = _createObjectByType('Sample', client, tmpID())
            sample.unmarkCreationFlag()
            # First convert all row values into something the field can take
            sample.edit(**row)
            sample._renameAfterCreation()
            event.notify(ObjectInitializedEvent(sample))
            sample.at_post_create_script()
            swe = self.bika_setup.getSamplingWorkflowEnabled()
            if swe:
                workflow.doActionFor(sample, 'sampling_workflow')
            else:
                workflow.doActionFor(sample, 'no_sampling_workflow')
            part = _createObjectByType('SamplePartition', sample, 'part-1')
            part.unmarkCreationFlag()
            renameAfterCreation(part)
            if swe:
                workflow.doActionFor(part, 'sampling_workflow')
            else:
                workflow.doActionFor(part, 'no_sampling_workflow')
            container = self.get_row_container(row)
            if container:
                part.edit(Container=container)

            # Profiles are titles, profile keys, or UIDS: convert them to UIDs.
            newprofiles = []
            for title in row['Profiles']:
                objects = [
                    x for x in profiles
                    if title in (x.getProfileKey(), x.UID(), x.Title())
                ]
                for obj in objects:
                    newprofiles.append(obj.UID())
            row['Profiles'] = newprofiles

            # BBB in bika.lims < 3.1.9, only one profile is permitted
            # on an AR.  The services are all added, but only first selected
            # profile name is stored.
            row['Profile'] = newprofiles[0] if newprofiles else None

            # Same for analyses
            newanalyses = set(
                self.get_row_services(row) +
                self.get_row_profile_services(row))
            row['Analyses'] = []
            # get batch
            batch = self.schema['Batch'].get(self)
            if batch:
                row['Batch'] = batch
            # Add AR fields from schema into this row's data
            row['ClientReference'] = self.getClientReference()
            row['ClientOrderNumber'] = self.getClientOrderNumber()
            row['Contact'] = self.getContact()
            row['DateSampled'] = convert_date_string(row['DateSampled'])
            if row['Sampler']:
                row['Sampler'] = lookup_sampler_uid(row['Sampler'])

            # Create AR
            ar = _createObjectByType("AnalysisRequest", client, tmpID())
            ar.setSample(sample)
            ar.unmarkCreationFlag()
            ar.edit(**row)
            ar._renameAfterCreation()
            ar.setAnalyses(list(newanalyses))
            for analysis in ar.getAnalyses(full_objects=True):
                analysis.setSamplePartition(part)
            ar.at_post_create_script()
            if swe:
                workflow.doActionFor(ar, 'sampling_workflow')
            else:
                workflow.doActionFor(ar, 'no_sampling_workflow')

            # If the Sampling Workflow field values are valid,
            # and the SamplingWorkflow is enabled, we will
            # automatically kick off the "sample" transition now
            tids = [t['id'] for t in get_transitions_for(ar)]
            if 'sample' in tids and ar.getSampler() and ar.getDateSampled():
                do_transition_for(ar, 'sample')

            progress_index = float(row_cnt) / len(gridrows) * 100
            progress = ProgressState(self.REQUEST, progress_index)
            notify(UpdateProgressEvent(progress))
        # document has been written to, and redirect() fails here
        self.REQUEST.response.write(
            '<script>document.location.href="%s"</script>' %
            (self.aq_parent.absolute_url()))
Exemple #45
0
def create_retest(ar):
    """Creates a retest (Analysis Request) from an invalidated Analysis Request
    :param ar: The invalidated Analysis Request
    :type ar: IAnalysisRequest
    :rtype: IAnalysisRequest
    """
    if not ar:
        raise ValueError("Source Analysis Request cannot be None")

    if not IAnalysisRequest.providedBy(ar):
        raise ValueError("Type not supported: {}".format(repr(type(ar))))

    if ar.getRetest():
        # Do not allow the creation of another retest!
        raise ValueError("Retest already set")

    if not ar.isInvalid():
        # Analysis Request must be in 'invalid' state
        raise ValueError(
            "Cannot do a retest from an invalid Analysis Request".format(
                repr(ar)))

    # Open the actions pool
    actions_pool = ActionHandlerPool.get_instance()
    actions_pool.queue_pool()

    # Create the Retest (Analysis Request)
    ignore = ['Analyses', 'DatePublished', 'Invalidated', 'Sample']
    retest = _createObjectByType("AnalysisRequest", ar.aq_parent, tmpID())
    copy_field_values(ar, retest, ignore_fieldnames=ignore)

    # Mark the retest with the `IAnalysisRequestRetest` interface
    alsoProvides(retest, IAnalysisRequestRetest)

    # Assign the source to retest
    retest.setInvalidated(ar)

    # Rename the retest according to the ID server setup
    renameAfterCreation(retest)

    # Copy the analyses from the source
    intermediate_states = ['retracted', 'reflexed']
    for an in ar.getAnalyses(full_objects=True):
        if (api.get_workflow_status_of(an) in intermediate_states):
            # Exclude intermediate analyses
            continue

        nan = _createObjectByType("Analysis", retest, an.getKeyword())

        # Make a copy
        ignore_fieldnames = ['DataAnalysisPublished']
        copy_field_values(an, nan, ignore_fieldnames=ignore_fieldnames)
        nan.unmarkCreationFlag()
        push_reindex_to_actions_pool(nan)

    # Transition the retest to "sample_received"!
    changeWorkflowState(retest, 'bika_ar_workflow', 'sample_received')
    alsoProvides(retest, IReceived)

    # Initialize analyses
    for analysis in retest.getAnalyses(full_objects=True):
        if not IRoutineAnalysis.providedBy(analysis):
            continue
        changeWorkflowState(analysis, "bika_analysis_workflow", "unassigned")

    # Reindex and other stuff
    push_reindex_to_actions_pool(retest)
    push_reindex_to_actions_pool(retest.aq_parent)

    # Resume the actions pool
    actions_pool.resume()
    return retest
Exemple #46
0
def auto_generate_id(obj, event):
    """Generate ID with the IDServer from senaite.core
    """
    logger.info("Auto-Generate ID for {}".format(repr(obj)))
    renameAfterCreation(obj)
Exemple #47
0
def import_specifications(portal):
    """Creates (or updates) dynamic specifications from
    resources/results_ranges.xlsx
    """

    logger.info("*** Importing specifications ***")

    def get_bs_object(xlsx_row, xlsx_keyword, portal_type, criteria):
        text_value = xlsx_row.get(xlsx_keyword, None)
        if not text_value:
            logger.warn("Value not set for keyword {}".format(xlsx_keyword))
            return None

        query = {"portal_type": portal_type, criteria: text_value}
        brain = api.search(query, 'bika_setup_catalog')
        if not brain:
            logger.warn("No objects found for type {} and {} '{}'".format(
                portal_type, criteria, text_value))
            return None
        if len(brain) > 1:
            logger.warn(
                "More than one object found for type {} and {} '{}'".format(
                    portal_type, criteria, text_value))
            return None

        return api.get_object(brain[0])

    raw_specifications = get_xls_specifications()
    for spec in raw_specifications:

        # Valid Sample Type?
        sample_type = get_bs_object(spec, "sample_type", "SampleType", "title")
        if not sample_type:
            continue

        # Valid Analysis Service?
        service = get_bs_object(spec, "keyword", "AnalysisService",
                                "getKeyword")
        if not service:
            continue

        # The calculation exists?
        calc_title = "Ranges calculation"
        query = dict(calculation=calc_title)
        calc = get_bs_object(query, "calculation", "Calculation", "title")
        if not calc:
            # Create a new one
            folder = portal.bika_setup.bika_calculations
            _id = folder.invokeFactory("Calculation", id=tmpID())
            calc = folder[_id]
            calc.edit(title=calc_title,
                      PythonImports=[{
                          "module": "bhp.lims.specscalculations",
                          "function": "get_specification_for"
                      }],
                      Formula="get_specification_for($spec)")
            calc.unmarkCreationFlag()
            renameAfterCreation(calc)

        # Existing AnalysisSpec?
        specs_title = "{} - calculated".format(sample_type.Title())
        query = dict(portal_type='AnalysisSpec', title=specs_title)
        aspec = api.search(query, 'bika_setup_catalog')
        if not aspec:
            # Create the new AnalysisSpecs object!
            folder = portal.bika_setup.bika_analysisspecs
            _id = folder.invokeFactory('AnalysisSpec', id=tmpID())
            aspec = folder[_id]
            aspec.edit(title=specs_title)
            aspec.unmarkCreationFlag()
            renameAfterCreation(aspec)
        elif len(aspec) > 1:
            logger.warn(
                "More than one Analysis Specification found for {}".format(
                    specs_title))
            continue
        else:
            aspec = api.get_object(aspec[0])
        aspec.setSampleType(sample_type)

        # Set the analysis keyword and bind it to the calculation to use
        keyword = service.getKeyword()
        specs_dict = {
            'keyword': keyword,
            'min_operator': 'geq',
            'min': '0',
            'max_operator': 'lt',
            'max': '0',
            'minpanic': '',
            'maxpanic': '',
            'warn_min': '',
            'warn_max': '',
            'hidemin': '',
            'hidemax': '',
            'rangecomments': '',
            'calculation': api.get_uid(calc),
        }
        ranges = _api.get_field_value(aspec, 'ResultsRange', [{}])
        ranges = filter(lambda val: val.get('keyword') != keyword, ranges)
        ranges.append(specs_dict)
        aspec.setResultsRange(ranges)