Пример #1
0
    def _lab_data(self):
        """ Returns a dictionary that represents the lab object
            Keys: obj, title, url, address, confidence, accredited,
                  accreditation_body, accreditation_logo, logo
        """
        portal = self.context.portal_url.getPortalObject()
        lab = self.context.bika_setup.laboratory
        lab_address = lab.getPostalAddress() \
            or lab.getBillingAddress() \
            or lab.getPhysicalAddress()
        if lab_address:
            _keys = ['address', 'city', 'state', 'zip', 'country']
            _list = ["<div>%s</div>" % lab_address.get(v) for v in _keys
                     if lab_address.get(v)]
            lab_address = "".join(_list)
        else:
            lab_address = ''

        return {'obj': lab,
                'title': to_utf8(lab.Title()),
                'url': to_utf8(lab.getLabURL()),
                'address': to_utf8(lab_address),
                'confidence': lab.getConfidence(),
                'accredited': lab.getLaboratoryAccredited(),
                'accreditation_body': to_utf8(lab.getAccreditationBody()),
                'accreditation_logo': lab.getAccreditationBodyLogo(),
                'logo': "%s/logo_print.png" % portal.absolute_url()}
Пример #2
0
    def _client_data(self, ar):
        data = {}
        client = ar.aq_parent
        if client:
            data['obj'] = client
            data['id'] = client.id
            data['url'] = client.absolute_url()
            data['name'] = to_utf8(client.getName())
            data['phone'] = to_utf8(client.getPhone())
            data['fax'] = to_utf8(client.getFax())

            client_address = client.getPostalAddress()
            if not client_address:
                # Data from the first contact
                contact = self.getAnalysisRequest().getContact()
                if contact and contact.getBillingAddress():
                    client_address = contact.getBillingAddress()
                elif contact and contact.getPhysicalAddress():
                    client_address = contact.getPhysicalAddress()

            if client_address:
                _keys = ['address', 'city', 'state', 'zip', 'country']
                _list = ["<div>%s</div>" % client_address.get(v) for v in _keys
                         if client_address.get(v)]
                client_address = "".join(_list)
            else:
                client_address = ''
            data['address'] = to_utf8(client_address)
        return data
Пример #3
0
    def __call__(self, value, *args, **kwargs):
        instance = kwargs['instance']
        translate = getToolByName(instance, 'translation_service').translate

        # remove spaces from formatted
        IBAN = ''.join(c for c in value if c.isalnum())

        IBAN = IBAN[4:] + IBAN[:4]
        country = IBAN[-4:-2]

        if country not in country_dic:
            msg = _('Unknown IBAN country %s' % country)
            return to_utf8(translate(msg))

        length_c, name_c = country_dic[country]

        if len(IBAN) != length_c:
            diff = len(IBAN) - length_c
            msg = _('Wrong IBAN length by %s: %s' %
                    (('short by %i' % -diff)
                     if diff < 0 else ('too long by %i' % diff), value))
            return to_utf8(translate(msg))
        # Validating procedure
        elif int("".join(str(letter_dic[x]) for x in IBAN)) % 97 != 1:
            msg = _('Incorrect IBAN number: %s' % value)
            return to_utf8(translate(msg))

        else:
            # Accepted:
            return True
Пример #4
0
    def __call__(self, value, *args, **kwargs):
        instance = kwargs['instance']
        fieldname = kwargs['field'].getName()
        request = kwargs.get('REQUEST', {})
        form = request.form
        form_value = form.get(fieldname)

        translate = getToolByName(instance, 'translation_service').translate
        # bsc = getToolByName(instance, 'bika_setup_catalog')

        # ResultValue must always be a number
        for field in form_value:
            try:
                float(field['ResultValue'])
            except:
                return to_utf8(
                    translate(
                        _("Validation failed: "
                          "Result Values must be numbers")))
            if 'ResultText' not in field:
                return to_utf8(
                    translate(
                        _("Validation failed: Result Text cannot be blank")))

        return True
Пример #5
0
 def _createdby_data(self, ws):
     """ Returns a dict that represents the user who created the ws
         Keys: username, fullmame, email
     """
     username = ws.getOwner().getUserName()
     return {'username': username,
             'fullname': to_utf8(self.user_fullname(username)),
             'email': to_utf8(self.user_email(username))}
Пример #6
0
 def _get_sample_attributes(self, ar):
     # Sample attributes
     sample = ar.getSample()
     self.sample = {
         'obj': sample,
         'getClientSampleID': to_utf8(sample.getClientSampleID()),
         'sampletype_title': to_utf8(sample.getSampleType().Title()),
     }
Пример #7
0
 def _analyst_data(self, ws):
     """ Returns a dict that represent the analyst assigned to the
         worksheet.
         Keys: username, fullname, email
     """
     username = ws.getAnalyst()
     return {'username': username,
             'fullname': to_utf8(self.user_fullname(username)),
             'email': to_utf8(self.user_email(username))}
Пример #8
0
 def _contact_data(self, ar):
     data = {}
     contact = ar.getContact()
     if contact:
         data = {'obj': contact,
                 'fullname': to_utf8(contact.getFullname()),
                 'email': to_utf8(contact.getEmailAddress()),
                 'pubpref': contact.getPublicationPreference()}
     return data
Пример #9
0
    def __call__(self, value, *args, **kwargs):

        instance = kwargs['instance']
        request = kwargs.get('REQUEST', {})
        fieldname = kwargs['field'].getName()

        translate = getToolByName(instance, 'translation_service').translate

        mins = request.get('min', {})[0]
        maxs = request.get('max', {})[0]
        errors = request.get('error', {})[0]

        # We run through the validator once per form submit, and check all values
        # this value in request prevents running once per subfield value.
        key = instance.id + fieldname
        if instance.REQUEST.get(key, False):
            return True

        # Retrieve all AS uids
        for uid in mins.keys():

            # Foreach AS, check spec. input values
            minv = mins.get(uid, '') == '' and '0' or mins[uid]
            maxv = maxs.get(uid, '') == '' and '0' or maxs[uid]
            err = errors.get(uid, '') == '' and '0' or errors[uid]

            # Values must be numbers
            try:
                minv = float(minv)
            except ValueError:
                instance.REQUEST[key] = to_utf8(translate(_("Validation failed: Min values must be numeric")))
                return instance.REQUEST[key]
            try:
                maxv = float(maxv)
            except ValueError:
                instance.REQUEST[key] = to_utf8(translate(_("Validation failed: Max values must be numeric")))
                return instance.REQUEST[key]
            try:
                err = float(err)
            except ValueError:
                instance.REQUEST[key] = to_utf8(translate(_("Validation failed: Percentage error values must be numeric")))
                return instance.REQUEST[key]

            # Min value must be < max
            if minv > maxv:
                instance.REQUEST[key] = to_utf8(translate(_("Validation failed: Max values must be greater than Min values")))
                return instance.REQUEST[key]

            # Error percentage must be between 0 and 100
            if err < 0 or err > 100:
                instance.REQUEST[key] = to_utf8(translate(_("Validation failed: Error percentage must be between 0 and 100")))
                return instance.REQUEST[key]

        instance.REQUEST[key] = True
        return True
Пример #10
0
    def _order_data(self, order, excludearuids=[]):
        """ Creates an order dict, accessible from the view and from each
            specific template.
        """
         
        data = {'obj': order,
                'id': order.getId(),
                'order_number': order.getOrderNumber(),
                'title': order.Title(),
                'description': order.Description(),
                'supplier_id': order.getSupplierUID(),
                'date_dispatched': self.ulocalized_time(order.getDateDispatched(), long_format=1),
                'remarks': order.getRemarks(),
                'date_published': self.ulocalized_time(DateTime(), long_format=1),
                'subtotal': order.getSubtotal(),
                'vat_amount': order.getVATAmount(),
                'url': order.absolute_url(),
                'remarks': to_utf8(order.getRemarks()),
                'footer': to_utf8(self.context.bika_setup.getResultFooter()),
                }

        data['supplier'] = self._supplier_data(order)

        # Get the Product List for the Order
        # print order.order_lineitems
        items = order.order_lineitems
        # products = order.aq_parent.objectValues('Product')
        products = self.context.get_supplier_products()
        item_list = []
        grand_total = 0.00
        for item in items:
            withvat_price = 0.00
            prodid = item['Product']
            product = [pro for pro in products if pro.getId() == prodid][0]
            price = float(item['Price'])
            vat = float(item['VAT'])
            qty = float(item['Quantity'])
            withvat_price = price * qty * ((vat /100) + 1)
            item_list.append({
                'title': product.Title(),
                'description': product.Description(),
                'unit': product.getUnit(),
                'price': price,
                'vat': '%s%%' % vat,
                'quantity': qty,
                'subtotal': '%.2f' % (price * qty),
                'withvat' : '%.2f' % (withvat_price)
            })
            grand_total += withvat_price
        item_list = sorted(item_list, key = itemgetter('title'))

        data['products'] = item_list
        data["grandTotal"] = '%.2f' % grand_total
        return data
Пример #11
0
 def __call__(self, context):
     translate = context.translate
     types = (
         ('AnalysisRequest', translate(to_utf8(_('Analysis Request')))),
         ('Batch', translate(to_utf8(_('Batch')))),
         ('Sample', translate(to_utf8(_('Sample')))),
         ('ReferenceSample', translate(to_utf8(_('Reference Sample')))),
         ('Worksheet', translate(to_utf8(_('Worksheet'))))
     )
     items = [SimpleTerm(i[0], i[0], i[1]) for i in types]
     return SimpleVocabulary(items)
Пример #12
0
 def __call__(self, context):
     translate = context.translate
     types = (
         ("AnalysisRequest", translate(to_utf8(_("Analysis Request")))),
         ("Batch", translate(to_utf8(_("Batch")))),
         ("Sample", translate(to_utf8(_("Sample")))),
         ("ReferenceSample", translate(to_utf8(_("Reference Sample")))),
         ("Worksheet", translate(to_utf8(_("Worksheet")))),
     )
     items = [SimpleTerm(i[0], i[0], i[1]) for i in types]
     return SimpleVocabulary(items)
Пример #13
0
def SearchableText(self):
    """This overrides the default method from Archetypes.BaseObject,
    and allows Identifiers field to be included in SearchableText despite
    the field being an incompatible type.
    """
    data = []
    for field in self.Schema().fields():
        if not field.searchable:
            continue
        #### The following is the addition made to the default AT method:
        fieldname = field.getName()
        if IHaveIdentifiers.providedBy(self) and fieldname == 'Identifiers':
            identifiers = self.Schema()['Identifiers'].get(self)
            idents = [to_utf8(i['Identifier']) for i in identifiers]
            if idents:
                data.extend(idents)
            continue
        ### The code from this point on is lifted directly from BaseObject
        method = field.getIndexAccessor(self)
        try:
            datum = method(mimetype="text/plain")
        except TypeError:
            # Retry in case typeerror was raised because accessor doesn't
            # handle the mimetype argument
            try:
                datum = method()
            except (ConflictError, KeyboardInterrupt):
                raise
            except:
                continue
        if datum:
            vocab = field.Vocabulary(self)
            if isinstance(datum, (list, tuple)):
                #  Unmangle vocabulary: we index key AND value
                vocab_values = map(
                    lambda value, vocab=vocab: vocab.getValue(value, ''),
                    datum)
                datum = list(datum)
                datum.extend(vocab_values)
                datum = ' '.join(datum)
            elif isinstance(datum, basestring):
                if isinstance(datum, unicode):
                    datum = to_utf8(datum)
                value = vocab.getValue(datum, '')
                if isinstance(value, unicode):
                    value = to_utf8(value)
                datum = "%s %s" % (datum, value,)

            if isinstance(datum, unicode):
                datum = to_utf8(datum)
            data.append(str(datum))
    data = ' '.join(data)
    return data
Пример #14
0
    def __call__(self, value, *args, **kwargs):
        if not value:
            return True
        instance = kwargs['instance']
        # fieldname = kwargs['field'].getName()
        request = kwargs.get('REQUEST', {})
        form = request.form
        interim_fields = form.get('InterimFields')

        translate = getToolByName(instance, 'translation_service').translate
        bsc = getToolByName(instance, 'bika_setup_catalog')
        interim_keywords = interim_fields and \
            [f['keyword'] for f in interim_fields] or []
        keywords = re.compile(r"\[([^\.^\]]+)\]").findall(value)

        for keyword in keywords:
            # Check if the service keyword exists and is active.
            dep_service = bsc(getKeyword=keyword, inactive_state="active")
            if not dep_service and keyword not in interim_keywords:
                msg = _(
                    "Validation failed: Keyword '${keyword}' is invalid",
                    mapping={
                        'keyword': safe_unicode(keyword)
                    })
                return to_utf8(translate(msg))

        # Wildcards
        # LIMS-1769 Allow to use LDL and UDL in calculations
        # https://jira.bikalabs.com/browse/LIMS-1769
        allowedwds = ['LDL', 'UDL', 'BELOWLDL', 'ABOVEUDL']
        keysandwildcards = re.compile(r"\[([^\]]+)\]").findall(value)
        keysandwildcards = [k for k in keysandwildcards if '.' in k]
        keysandwildcards = [k.split('.', 1) for k in keysandwildcards]
        errwilds = [k[1] for k in keysandwildcards if k[0] not in keywords]
        if len(errwilds) > 0:
            msg = _(
                "Wildcards for interims are not allowed: ${wildcards}",
                mapping={
                    'wildcards': safe_unicode(', '.join(errwilds))
                })
            return to_utf8(translate(msg))

        wildcards = [k[1] for k in keysandwildcards if k[0] in keywords]
        wildcards = [wd for wd in wildcards if wd not in allowedwds]
        if len(wildcards) > 0:
            msg = _(
                "Invalid wildcards found: ${wildcards}",
                mapping={
                    'wildcards': safe_unicode(', '.join(wildcards))
                })
            return to_utf8(translate(msg))

        return True
Пример #15
0
 def _get_user_attributes(self):
     self.member = self.context.portal_membership.getAuthenticatedMember()
     self.username = self.member.getUserName()
     self.reporter = to_utf8(self.user_fullname(self.username))
     self.reporter_email = to_utf8(self.user_email(self.username))
     self.reporter_signature = ""
     c = [x for x in self.bika_setup_catalog(portal_type='LabContact')
          if x.getObject().getUsername() == self.username]
     if c:
         sf = c[0].getObject().getSignature()
         if sf:
             self.reporter_signature = sf.absolute_url() + "/Signature"
Пример #16
0
    def _lab_data(self):
        portal = self.context.portal_url.getPortalObject()
        lab = self.context.bika_setup.laboratory

        return {'obj': lab,
                'title': to_utf8(lab.Title()),
                'url': to_utf8(lab.getLabURL()),
                'address': to_utf8(self._lab_address(lab)),
                'confidence': lab.getConfidence(),
                'accredited': lab.getLaboratoryAccredited(),
                'accreditation_body': to_utf8(lab.getAccreditationBody()),
                'accreditation_logo': lab.getAccreditationBodyLogo(),
                'logo': "%s/logo_print.png" % portal.absolute_url()}
Пример #17
0
    def _client_data(self, ar):
        data = {}
        client = ar.aq_parent
        if client:
            data['obj'] = client
            data['id'] = client.id
            data['url'] = client.absolute_url()
            data['name'] = to_utf8(client.getName())
            data['phone'] = to_utf8(client.getPhone())
            data['fax'] = to_utf8(client.getFax())

            data['address'] = to_utf8(self._client_address(client))
        return data
Пример #18
0
 def _get_contact_attributes(self, ar):
     # ## Primary contact attributes
     contact = ar.getContact()
     self.contact = {
         'obj': contact,
         'getFullname': to_utf8(contact.getFullname()),
         'getEmailAddress': to_utf8(contact.getEmailAddress()),
         'getPublicationPreference': contact.getPublicationPreference(),
     } if contact else {
         'obj': contact,
         'getFullname': "",
         'getEmailAddress': "",
         'getPublicationPreference': "",
     }
Пример #19
0
    def __call__(self, value, *args, **kwargs):
        instance = kwargs['instance']
        translate = getToolByName(instance, 'translation_service').translate
        try:
            value = float(value)
        except:
            msg = _("Validation failed: value must be float")
            return to_utf8(translate(msg))

        if value < 0 or value > 1000:
            msg = _("Validation failed: value must be between 0 and 1000")
            return to_utf8(translate(msg))

        return True
Пример #20
0
    def __call__(self, value, *args, **kwargs):
        instance = kwargs['instance']
        # fieldname = kwargs['field'].getName()
        # request = kwargs.get('REQUEST', {})
        # form = request.get('form', {})

        translate = getToolByName(instance, 'translation_service').translate

        if re.findall(r"[^A-Za-z\w\d\-\_]", value):
            return _("Validation failed: keyword contains invalid characters")

        # check the value against all AnalysisService keywords
        # this has to be done from catalog so we don't
        # clash with ourself
        bsc = getToolByName(instance, 'bika_setup_catalog')
        services = bsc(portal_type='AnalysisService', getKeyword=value)
        for service in services:
            if service.UID != instance.UID():
                msg = _(
                    "Validation failed: '${title}': This keyword "
                    "is already in use by service '${used_by}'",
                    mapping={
                        'title': safe_unicode(value),
                        'used_by': safe_unicode(service.Title)
                    })
                return to_utf8(translate(msg))

        calc = hasattr(instance, 'getCalculation') and \
            instance.getCalculation() or None
        our_calc_uid = calc and calc.UID() or ''

        # check the value against all Calculation Interim Field ids
        calcs = [c for c in bsc(portal_type='Calculation')]
        for calc in calcs:
            calc = calc.getObject()
            interim_fields = calc.getInterimFields()
            if not interim_fields:
                continue
            for field in interim_fields:
                if field['keyword'] == value and our_calc_uid != calc.UID():
                    msg = _(
                        "Validation failed: '${title}': This keyword "
                        "is already in use by calculation '${used_by}'",
                        mapping={
                            'title': safe_unicode(value),
                            'used_by': safe_unicode(calc.Title())
                        })
                    return to_utf8(translate(msg))
        return True
Пример #21
0
    def _managers_data(self, ar):
        managers = {'ids': [], 'dict': {}}
        departments = {}
        ar_mngrs = ar.getResponsible()
        for id in ar_mngrs['ids']:
            new_depts = ar_mngrs['dict'][id]['departments'].split(',')
            if id in managers['ids']:
                for dept in new_depts:
                    if dept not in departments[id]:
                        departments[id].append(dept)
            else:
                departments[id] = new_depts
                managers['ids'].append(id)
                managers['dict'][id] = ar_mngrs['dict'][id]

        mngrs = departments.keys()
        for mngr in mngrs:
            final_depts = ''
            for dept in departments[mngr]:
                if final_depts:
                    final_depts += ', '
                final_depts += to_utf8(dept)
            managers['dict'][mngr]['departments'] = final_depts

        return managers
Пример #22
0
    def __call__(self, value, *args, **kwargs):

        instance = kwargs['instance']
        # fieldname = kwargs['field'].getName()
        # request = kwargs.get('REQUEST', {})
        # form = request.get('form', {})
        method = instance.getMethod()
        bsc = getToolByName(instance, 'bika_setup_catalog')
        query = {
            'portal_type': 'AnalysisService',
            'getAvailableMethodUIDs': method.UID()
        }
        method_ans_uids = [b.UID for b in bsc(query)]
        rules = instance.getReflexRules()
        error = ''
        pc = getToolByName(instance, 'portal_catalog')
        for rule in rules:
            as_uid = rule.get('analysisservice', '')
            as_brain = pc(
                UID=as_uid,
                portal_type='AnalysisService',
                inactive_state='active')
            if as_brain[0] and as_brain[0].UID in method_ans_uids:
                pass
            else:
                error += as_brain['title'] + ' '
        if error:
            translate = getToolByName(instance,
                                      'translation_service').translate
            msg = _("The following analysis services don't belong to the"
                    "current method: " + error)
            return to_utf8(translate(msg))
        return True
Пример #23
0
    def __call__(self, value, *args, **kwargs):
        instance = kwargs['instance']
        fieldname = kwargs['field'].getName()
        # request = kwargs.get('REQUEST', {})
        # form = request.get('form', {})

        translate = getToolByName(instance, 'translation_service').translate

        if value == instance.get(fieldname):
            return True

        for item in aq_parent(instance).objectValues():
            if hasattr(item, 'UID') and item.UID() != instance.UID() and \
                            fieldname in item.Schema() and \
                            str(item.Schema()[fieldname].get(item)) == str(
                        value):  # We have to compare them as strings because
                # even if a number (as an  id) is saved inside
                # a string widget and string field, it will be
                # returned as an int. I don't know if it is
                # caused because is called with
                # <item.Schema()[fieldname].get(item)>,
                # but it happens...
                msg = _("Validation failed: '${value}' is not unique",
                        mapping={'value': safe_unicode(value)})
                return to_utf8(translate(msg))
        return True
Пример #24
0
    def __call__(self, value, *args, **kwargs):
        instance = kwargs['instance']
        # fieldname = kwargs['field'].getName()
        # request = kwargs.get('REQUEST', {})
        # form = request.get('form', {})

        translate = getToolByName(instance, 'translation_service').translate
        bsc = getToolByName(instance, 'bika_setup_catalog')
        # uc = getToolByName(instance, 'uid_catalog')

        failures = []

        for category in value:
            if not category:
                continue
            services = bsc(portal_type="AnalysisService",
                           getCategoryUID=category)
            for service in services:
                service = service.getObject()
                calc = service.getCalculation()
                deps = calc and calc.getDependentServices() or []
                for dep in deps:
                    if dep.getCategoryUID() not in value:
                        title = dep.getCategoryTitle()
                        if title not in failures:
                            failures.append(title)
        if failures:
            msg = _("Validation failed: The selection requires the following "
                    "categories to be selected: ${categories}",
                    mapping={'categories': safe_unicode(','.join(failures))})
            return to_utf8(translate(msg))

        return True
Пример #25
0
    def localise_images(self, htmlreport):
        """WeasyPrint will attempt to retrieve attachments directly from the URL
        referenced in the HTML report, which may refer back to a single-threaded
        (and currently occupied) zeoclient, hanging it.  All "attachments"
        using urls ending with at_download/AttachmentFile must be converted
        to local files.

        Returns a list of files which were created, and a modified copy
        of htmlreport.
        """
        cleanup = []

        _htmltext = to_utf8(htmlreport)
        # first regular image tags
        for match in re.finditer("""http.*at_download\/AttachmentFile""", _htmltext, re.I):
            url = match.group()
            att_path = url.replace(self.portal_url+"/", "")
            attachment = self.portal.unrestrictedTraverse(att_path)
            af = attachment.getAttachmentFile()
            filename = af.filename
            extension = "."+filename.split(".")[-1]
            outfile, outfilename = tempfile.mkstemp(suffix=extension)
            outfile = open(outfilename, 'wb')
            outfile.write(str(af.data))
            outfile.close()
            _htmltext.replace(url, outfilename)
            cleanup.append(outfilename)
        return cleanup, _htmltext
Пример #26
0
    def __init__(self, context, request):
        BikaListingView.__init__(self, context, request)

        self.show_sort_column = False
        self.show_select_row = False
        self.show_select_column = False
        self.show_workflow_action_buttons = False
        self.pagesize = 0

        self.icon = self.portal_url + "/++resource++bika.lims.images/%s_big.png" % \
            context.portal_type.lower()
        self.title = to_utf8(self.context.Title()) + " " + t(_("Log"))
        self.description = ""

        self.columns = {
            'Version': {'title': _('Version'), 'sortable': False},
            'Date': {'title': _('Date'), 'sortable': False},
            'User': {'title': _('User'), 'sortable': False},
            'Action': {'title': _('Action'), 'sortable': False},
            'Description': {'title': _('Description'), 'sortable': False},
        }
        self.review_states = [
            {'id': 'default',
             'title': 'All',
             'contentFilter': {},
             'columns': ['Version',
                         'Date',
                         'User',
                         'Action',
                         'Description']},
        ]
Пример #27
0
def load_field_values(instance, include_fields):
    """Load values from an AT object schema fields into a list of dictionaries
    """
    ret = {}
    schema = instance.Schema()
    val = None
    for field in schema.fields():
        fieldname = field.getName()
        if include_fields and fieldname not in include_fields:
            continue
        try:
            val = field.get(instance)
        except AttributeError:
            # If this error is raised, make a look to the add-on content
            # expressions used to obtain their data.
            print "AttributeError:", sys.exc_info()[1]
            print "Unreachable object. Maybe the object comes from an Add-on"
            print traceback.format_exc()

        if val:
            field_type = field.type
            # If it a proxy field, we should know to the type of the proxied
            # field
            if field_type == 'proxy':
                actual_field = field.get_proxy(instance)
                field_type = actual_field.type
            if field_type == "blob" or field_type == 'file':
                continue
            # I put the UID of all references here in *_uid.
            if field_type == 'reference':
                if type(val) in (list, tuple):
                    ret[fieldname + "_uid"] = [v.UID() for v in val]
                    val = [to_utf8(v.Title()) for v in val]
                else:
                    ret[fieldname + "_uid"] = val.UID()
                    val = to_utf8(val.Title())
            elif field_type == 'boolean':
                val = True if val else False
            elif field_type == 'text':
                val = to_utf8(val)

        try:
            json.dumps(val)
        except:
            val = str(val)
        ret[fieldname] = val
    return ret
Пример #28
0
    def __call__(self, value, *args, **kwargs):
        field = kwargs['field']
        fieldname = field.getName()
        instance = kwargs['instance']
        translate = getToolByName(instance, 'translation_service').translate

        # return directly if nothing changed
        if value == field.get(instance):
            return True

        # We want to use the catalog to speed things up, as using `objectValues`
        # is very expensive if the parent object contains many items
        parent_objects = []

        # 1. Get the right catalog for this object
        catalogs = api.get_catalogs_for(instance)
        catalog = catalogs[0]

        # 2. Check if the field accessor is indexed
        field_index = None
        accessor = field.getAccessor(instance)
        if accessor:
            field_index = accessor.__name__

        # 3. Check if the field index is in the indexes
        # Field is indexed, use the catalog instead of objectValues
        parent_path = api.get_parent_path(instance)
        portal_type = instance.portal_type
        catalog_query = {"portal_type": portal_type,
                            "path": {"query": parent_path, "depth": 1}}

        if field_index and field_index in catalog.indexes():
            # We use the field index to reduce the results list
            catalog_query[field_index] = value
            parent_objects = map(api.get_object, catalog(catalog_query))
        elif fieldname in catalog.indexes():
            # We use the fieldname as index to reduce the results list
            catalog_query[fieldname] = value
            parent_objects = map(api.get_object, catalog(catalog_query))
        else:
            # fall back to the objectValues :(
            parent_object = api.get_parent(instance)
            parent_objects = parent_object.objectValues()

        for item in parent_objects:
            if hasattr(item, 'UID') and item.UID() != instance.UID() and \
               fieldname in item.Schema() and \
               str(item.Schema()[fieldname].get(item)) == str(value):
                # We have to compare them as strings because
                # even if a number (as an  id) is saved inside
                # a string widget and string field, it will be
                # returned as an int. I don't know if it is
                # caused because is called with
                # <item.Schema()[fieldname].get(item)>,
                # but it happens...
                msg = _("Validation failed: '${value}' is not unique",
                        mapping={'value': safe_unicode(value)})
                return to_utf8(translate(msg))
        return True
Пример #29
0
    def _reporter_data(self, ar):
        data = {}
        member = self.context.portal_membership.getAuthenticatedMember()
        if member:
            username = member.getUserName()
            data['username'] = username
            data['fullname'] = to_utf8(self.user_fullname(username))
            data['email'] = to_utf8(self.user_email(username))

            c = [x for x in self.bika_setup_catalog(portal_type='LabContact')
                 if x.getObject().getUsername() == username]
            if c:
                sf = c[0].getObject().getSignature()
                if sf:
                    data['signature'] = sf.absolute_url() + "/Signature"

        return data
Пример #30
0
    def get_recipients(self, ar):
        """ Returns a list with the recipients and all its publication prefs
        """
        recips = []

        # Contact and CC's
        contact = ar.getContact()
        if contact:
            recips.append({'title': to_utf8(contact.Title()),
                           'email': contact.getEmailAddress(),
                           'pubpref': contact.getPublicationPreference()})
        for cc in ar.getCCContact():
            recips.append({'title': to_utf8(cc.Title()),
                           'email': cc.getEmailAddress(),
                           'pubpref': contact.getPublicationPreference()})

        return recips
Пример #31
0
 def __call__(self, value, *args, **kwargs):
     # avoid the catalog query if the option is not selected
     if not api.get_bika_setup().ClientPatientIDUnique:
         return True
     query = dict(getClientPatientID=value)
     patients = api.search(query, CATALOG_PATIENTS)
     instance = kwargs.get('instance')
     # If there are no patients with this Client Patient ID
     # then it is valid
     if not patients:
         return True
     # If there is only one patient with this Client Patient ID
     # and it is the patient being edited then it also valid
     if len(patients) == 1 and api.get_uid(
             patients[0]) == api.get_uid(instance):
         return True
     trans = getToolByName(instance, 'translation_service').translate
     msg = _("Validation failed: '${value}' is not unique",
             mapping={'value': safe_unicode(value)})
     return to_utf8(trans(msg))
Пример #32
0
    def __call__(self, value, *args, **kwargs):
        """
        Check the NIB number
        value:: string with NIB.
        """
        instance = kwargs['instance']
        translate = getToolByName(instance, 'translation_service').translate
        LEN_NIB = 21
        table = (73, 17, 89, 38, 62, 45, 53, 15, 50, 5, 49, 34, 81, 76, 27, 90,
                 9, 30, 3)

        # convert to entire numbers list
        nib = _toIntList(value)

        # checking the length of the number
        if len(nib) != LEN_NIB:
            msg = _('Incorrect NIB number: %s' % value)
            return to_utf8(translate(msg))
        # last numbers algorithm validator
        return nib[-2] * 10 + nib[-1] == 98 - _sumLists(table, nib[:-2]) % 97
Пример #33
0
 def analysisprofiles(self):
     """ Return applicable client and Lab AnalysisProfile records
     """
     res = []
     profiles = []
     client = self.context.portal_type == 'AnalysisRequest' \
         and self.context.aq_parent or self.context
     for profile in client.objectValues("AnalysisProfile"):
         if isActive(profile):
             profiles.append((profile.Title(), profile))
     profiles.sort(lambda x, y: cmp(x[0], y[0]))
     res += profiles
     profiles = []
     for profile in self.context.bika_setup.bika_analysisprofiles.objectValues("AnalysisProfile"):
         if isActive(profile):
             lab = t(_('Lab'))
             title = to_utf8(profile.Title())
             profiles.append(("%s: %s" % (lab, title), profile))
     profiles.sort(lambda x, y: cmp(x[0], y[0]))
     res += profiles
     return res
Пример #34
0
 def artemplates(self):
     """ Return applicable client and Lab ARTemplate records
     """
     res = []
     templates = []
     client = self.context.portal_type == 'AnalysisRequest' \
         and self.context.aq_parent or self.context
     for template in client.objectValues("ARTemplate"):
         if isActive(template):
             templates.append((template.Title(), template))
     templates.sort(lambda x, y: cmp(x[0], y[0]))
     res += templates
     templates = []
     for template in self.context.bika_setup.bika_artemplates.objectValues("ARTemplate"):
         if isActive(template):
             lab = t(_('Lab'))
             title = to_utf8(template.Title())
             templates.append(("%s: %s" % (lab, title), template))
     templates.sort(lambda x, y: cmp(x[0], y[0]))
     res += templates
     return res
Пример #35
0
    def __call__(self, value, *args, **kwargs):
        context = kwargs['instance']
        uid = api.get_uid(context)
        field = kwargs['field']
        fieldname = field.getName()
        translate = getToolByName(context, 'translation_service').translate

        # return directly if nothing changed
        if value == field.get(context):
            return True

        # Fetch the parent object candidates by catalog or by objectValues
        #
        # N.B. We want to use the catalog to speed things up, because using
        # `parent.objectValues` is very expensive if the parent object contains
        # many items and causes the UI to block too long
        catalog_query = self.make_catalog_query(context, field, value)
        parent_objects = self.query_parent_objects(
            context, query=catalog_query)

        for item in parent_objects:
            if hasattr(item, 'UID') and item.UID() != uid and \
               fieldname in item.Schema() and \
               str(item.Schema()[fieldname].get(item)) == str(value).strip():
                # We have to compare them as strings because
                # even if a number (as an  id) is saved inside
                # a string widget and string field, it will be
                # returned as an int. I don't know if it is
                # caused because is called with
                # <item.Schema()[fieldname].get(item)>,
                # but it happens...
                msg = _(
                    "Validation failed: '${value}' is not unique",
                    mapping={
                        'value': safe_unicode(value)
                    })
                return to_utf8(translate(msg))
        return True
Пример #36
0
    def __call__(self, value, *args, **kwargs):
        # If not prepreserved, no validation required.
        if not value:
            return True

        instance = kwargs['instance']
        # fieldname = kwargs['field'].getName()
        request = kwargs.get('REQUEST', {})
        form = request.form
        preservation = form.get('Preservation')

        if type(preservation) in (list, tuple):
            preservation = preservation[0]

        if preservation:
            return True

        translate = getToolByName(instance, 'translation_service').translate
        # bsc = getToolByName(instance, 'bika_setup_catalog')

        if not preservation:
            msg = _("Validation failed: PrePreserved containers "
                    "must have a preservation selected.")
            return to_utf8(translate(msg))
Пример #37
0
    def _ar_data(self, ar, excludearuids=[]):
        """ Creates an ar dict, accessible from the view and from each
            specific template.
        """
        if ar.UID() in self._cache['_ar_data']:
            return self._cache['_ar_data'][ar.UID()]
        #Not sure why the following 2 lines are need, doing ar.getStrain or ar.getSample().getStrain does not work
        strain = ''
        bsc =  self.bika_setup_catalog
        strains = bsc(UID=ar.getSample()['Strain'])
        if strains:
             strain = strains[0].Title

        mme_id = state_id = ''
        client_state_lincense_id = ar.getClientStateLicenseID().split(',')
        if len(client_state_lincense_id) == 4:
            mme_id = client_state_lincense_id[1] #LicenseID
            state_id = client_state_lincense_id[2] #LicenseNumber
        data = {'obj': ar,
                'id': ar.getRequestID(),
                #'client_order_num': ar.getClientOrderNumber(),
                'client_reference': ar.getClientReference(),
                'client_sampleid': ar.getClientSampleID(),
                #'adhoc': ar.getAdHoc(),
                #'composite': ar.getComposite(),
                #'report_drymatter': ar.getReportDryMatter(),
                #'invoice_exclude': ar.getInvoiceExclude(),
                'sampling_date': self.ulocalized_time(
                    ar.getSamplingDate(), long_format=1),
                'date_received': self.ulocalized_time(
                    ar.getDateReceived(), long_format=1),
                #'member_discount': ar.getMemberDiscount(),
                'date_sampled': self.ulocalized_time(
                    ar.getDateSampled(), long_format=1),
                'date_published': self.ulocalized_time(DateTime(), long_format=1),
                #'invoiced': ar.getInvoiced(),
                #'late': ar.getLate(),
                #'subtotal': ar.getSubtotal(),
                #'vat_amount': ar.getVATAmount(),
                #'totalprice': ar.getTotalPrice(),
                'invalid': ar.isInvalid(),
                'url': ar.absolute_url(),
                'remarks': to_utf8(ar.getRemarks().replace('\n', '').replace("===", "<br/>")),
                'footer': to_utf8(self.context.bika_setup.getResultFooter()),
                'prepublish': False,
                'published': False,
                #'child_analysisrequest': None,
                #'parent_analysisrequest': None,
                #'resultsinterpretation':ar.getResultsInterpretation(),
                'lot': ar['Lot'],#To be fixed
                'strain': strain, # To be fixed
                'cultivation_batch': ar['CultivationBatch'],
                #'resultsinterpretation':ar.getResultsInterpretation(),
                'ar_attachments': self._get_ar_attachments(ar),
                'an_attachments': self._get_an_attachments(ar),
                'attachment_src': None,
                'attachment_width': None,
                'attachment_height': None,
                'mme_id': mme_id,
                'state_id': state_id,}

        # Sub-objects
        #excludearuids.append(ar.UID())
        #puid = ar.getRawParentAnalysisRequest()
        #if puid and puid not in excludearuids:
        #    data['parent_analysisrequest'] = self._ar_data(ar.getParentAnalysisRequest(), excludearuids)
        #cuid = ar.getRawChildAnalysisRequest()
        #if cuid and cuid not in excludearuids:
        #    data['child_analysisrequest'] = self._ar_data(ar.getChildAnalysisRequest(), excludearuids)

        wf = getToolByName(ar, 'portal_workflow')
        allowed_states = ['verified', 'published']
        data['prepublish'] = wf.getInfoFor(ar, 'review_state') not in allowed_states
        if wf.getInfoFor(ar, 'review_state') == 'published':
            data['published'] = True

        data['contact'] = self._contact_data(ar)
        data['client'] = self._client_data(ar)
        #data['sample'] = self._sample_data(ar)
        data['product'] = self._sample_type(ar).get('title', '')
        #data['batch'] = self._batch_data(ar)
        #data['specifications'] = self._specs_data(ar)
        #data['analyses'] = self._analyses_data(ar, ['verified', 'published'])
        #data['qcanalyses'] = self._qcanalyses_data(ar, ['verified', 'published'])
        #data['points_of_capture'] = sorted(set([an['point_of_capture'] for an in data['analyses']]))
        #data['categories'] = sorted(set([an['category'] for an in data['analyses']]))
        #data['hasblanks'] = len([an['reftype'] for an in data['qcanalyses'] if an['reftype'] == 'b']) > 0
        #data['hascontrols'] = len([an['reftype'] for an in data['qcanalyses'] if an['reftype'] == 'c']) > 0
        #data['hasduplicates'] = len([an['reftype'] for an in data['qcanalyses'] if an['reftype'] == 'd']) > 0
        # Attachment src/link
        attachments = ar.getAttachment()
        for attachment in attachments:
            if attachment.getReportOption() != 'r':
                continue
            filename = attachment.getAttachmentFile().filename 
            extension = filename.split('.')[-1]
            if extension in ['png', 'jpg', 'jpeg']: #Check other image extensions
                file_url =  attachment.absolute_url()
                data['attachment_src'] = '{}/at_download/AttachmentFile'.format(file_url)
                [width, height] = attachment.getAttachmentFile().getSize()
                maxwidth = 248 # 80% of 310px
                maxheight = 100
                resize_ratio = min(maxwidth/float(width), maxheight/float(height))
                data['attachment_width'] = width*resize_ratio
                data['attachment_height'] = height*resize_ratio
                break

        # Categorize analyses
        #data['categorized_analyses'] = {}
        data['department_analyses'] = {}
        #for an in data['analyses']:
        #    poc = an['point_of_capture']
        #    cat = an['category']
        #    pocdict = data['categorized_analyses'].get(poc, {})
        #    catlist = pocdict.get(cat, [])
        #    catlist.append(an)
        #    pocdict[cat] = catlist
        #    data['categorized_analyses'][poc] = pocdict

        #    # Group by department too
        #    anobj = an['obj']
        #    dept = anobj.getService().getDepartment() if anobj.getService() else None
        #    if dept:
        #        dept = dept.UID()
        #        dep = data['department_analyses'].get(dept, {})
        #        dep_pocdict = dep.get(poc, {})
        #        dep_catlist = dep_pocdict.get(cat, [])
        #        dep_catlist.append(an)
        #        dep_pocdict[cat] = dep_catlist
        #        dep[poc] = dep_pocdict
        #        data['department_analyses'][dept] = dep

        # Categorize qcanalyses
        #data['categorized_qcanalyses'] = {}
        #for an in data['qcanalyses']:
        #    qct = an['reftype']
        #    poc = an['point_of_capture']
        #    cat = an['category']
        #    qcdict = data['categorized_qcanalyses'].get(qct, {})
        #    pocdict = qcdict.get(poc, {})
        #    catlist = pocdict.get(cat, [])
        #    catlist.append(an)
        #    pocdict[cat] = catlist
        #    qcdict[poc] = pocdict
        #    data['categorized_qcanalyses'][qct] = qcdict

        #data['reporter'] = self._reporter_data(ar)
        data['managers'] = self._managers_data(ar)

        portal = self.context.portal_url.getPortalObject()
        data['portal'] = {'obj': portal,
                          'url': portal.absolute_url()}
        data['laboratory'] = self._lab_data()

        #results interpretation
        ri = {}
        if (ar.getResultsInterpretationByDepartment(None)):
            ri[''] = ar.getResultsInterpretationByDepartment(None)
        depts = ar.getDepartments()
        for dept in depts:
            ri[dept.Title()] = ar.getResultsInterpretationByDepartment(dept)
        data['resultsinterpretationdepts'] = ri

        self._cache['_ar_data'][ar.UID()] = data
        return data
Пример #38
0
    def create_metrc_csv(self, ars):
        analyses = []
        output = StringIO.StringIO()
        writer = csv.writer(output)
        for ar in ars:
            ar_id = ar.id
            date_published = ar.getDatePublished()
            if date_published:
                date_published = date_published.split(' ')[0]
            else:
                date_published = self.ulocalized_time(DateTime(), long_format=0)

            client_sampleid = to_utf8(ar.getClientSampleID())
            as_keyword = ''
            result = ''
            is_in_range = 'True'
            unit_and_ar_id = ''
            sample_type_uid = ar.getSampleType().UID()
            bsc = getToolByName(self, 'bika_setup_catalog')
            analysis_specs = bsc(portal_type='AnalysisSpec',
                          getSampleTypeUID=sample_type_uid)
            dmk = ar.bika_setup.getResultsDecimalMark()

            lines = []
            analyses = ar.getAnalyses(full_objects=True)
            for analysis in analyses:
                service = analysis.getService()
                if service.getHidden():
                    continue
                specification =  analysis.getResultsRange()
                result =  analysis.getFormattedResult(html=False)
                if not specification:
                    rr = dicts_to_dict(analysis.aq_parent.getResultsRange(), 'keyword')
                    specification = rr.get(analysis.getKeyword(), None)
                    # No specs available, assume in range:
                    if not specification:
                        is_in_range = True
                else:
                    minimum = specification.get('min', '')
                    maximum = specification.get('max', '')
                    error = specification.get('error', '')
                    if minimum == '' and maximum == '' and error == '':
                        is_in_range = True
                    else:
                        outofrange, acceptable = \
                            isOutOfRange(result, minimum, maximum, error)
                        if outofrange == False:
                            is_in_range = True
                        elif outofrange == True:
                            is_in_range = False

                unit = service.getUnit()
                unit = '({})-'.format(unit) if unit else ''
                unit_and_ar_id = '{}{}'.format(unit, ar_id)

                #Check unit conversion
                if sample_type_uid:
                    i = 0
                    new_text = []
                    hide_original = False
                    an_dict = {'converted_units': []}
                    for unit_conversion in service.getUnitConversions():
                        if unit_conversion.get('SampleType') and \
                           unit_conversion.get('Unit') and \
                           unit_conversion.get('SampleType') == sample_type_uid:
                            i += 1
                            new = dict({})
                            conv = ploneapi.content.get(
                                                UID=unit_conversion['Unit'])
                            unit_and_ar_id = '({})-{}'.format(
                                                    conv.converted_unit, ar_id)
                            result = convert_unit(
                                            analysis.getResult(),
                                            conv.formula,
                                            dmk,
                                            analysis.getPrecision())
                            break

                line = {'date_published': date_published,
                        'client_sampleid': client_sampleid,
                        'as_keyword': service.getShortTitle(),
                        'result': result,
                        'is_in_range': is_in_range,
                        'unit_and_ar_id' : unit_and_ar_id,
                        }
                lines.append(line)

            for l in lines:
                writer.writerow([l['date_published'], l['client_sampleid'],
                                l['as_keyword'], l['result'],
                                l['is_in_range'], l['unit_and_ar_id'],
                                ])

        return output.getvalue()
Пример #39
0
    def getARAnayses(self, ar):
        """ Returns a dict with the following structure:
            [ {'headers': ['cat1_title', 'unit1', 'unit2'],
               'rows': [ ['AS 1 title', val1, val2],
                         ['AS 2 title', val1, val2]],
                       ],
               'footnotes': ['note 1 ', 'Note2']
              },
            ]
            {'category_1_name':
                {'service_1_title':
                    {'service_1_uid':
                        {'service': <AnalysisService-1>,
                         'ars': {'ar1_id': [<Analysis (for as-1)>,
                                           <Analysis (for as-1)>],
                                 'ar2_id': [<Analysis (for as-1)>]
                                },
                        },
                    },
                {'_data':
                    {'footnotes': service.getCategory().Comments()',
                     'unit': service.getUnit}
                },
                {'service_2_title':
                     {'service_2_uid':
                        {'service': <AnalysisService-2>,
                         'ars': {'ar1_id': [<Analysis (for as-2)>,
                                           <Analysis (for as-2)>],
                                 'ar2_id': [<Analysis (for as-2)>]
                                },
                        },
                    },
                ...
                },
            }
        """
        def get_sample_type_uid(analysis):
            if getattr(analysis.aq_parent, 'getSample'):
                return analysis.aq_parent.getSample().getSampleType().UID()

        analyses = {}
        count = 0
        dmk = ar.bika_setup.getResultsDecimalMark()
        ans = [an.getObject() for an in ar.getAnalyses()]
        sample_type_uid = ar.getSampleType().UID()
        bsc = getToolByName(self, 'bika_setup_catalog')
        analysis_specs = bsc(portal_type='AnalysisSpec',
                      getSampleTypeUID=sample_type_uid)
        analysis_spec = None
        if len(analysis_specs) > 0:
            analysis_spec = analysis_specs[0].getObject()
        for an in ans:
            service = an.getService()
            if service.getHidden():
                continue
            cat = service.getCategoryTitle()
            if cat not in analyses:
                analyses[cat] = {}
            cat_dict = analyses[cat]
            if service.title not in cat_dict:
                cat_dict[service.title] = {}

            an_dict = cat_dict[service.title]
            an_dict['ars'] = an.getFormattedResult()
            an_dict['accredited'] = service.getAccredited()
            an_dict['service'] = service
            an_dict['unit'] = service.getUnit()
            an_dict['include_original'] = True
            an_dict['converted_units'] = []
            an_dict['limits_units'] = []
            # add unit conversion information
            if sample_type_uid:
                i = 0
                new_text = []
                hide_original = False
                for unit_conversion in service.getUnitConversions():
                    if unit_conversion.get('SampleType') and \
                       unit_conversion.get('Unit') and \
                       unit_conversion.get('SampleType') == sample_type_uid:
                        i += 1
                        new = dict({})
                        conv = ploneapi.content.get(
                                            UID=unit_conversion['Unit'])
                        new['unit'] = conv.converted_unit
                        new['ars'] = convert_unit(
                                        an.getResult(),
                                        conv.formula,
                                        dmk,
                                        an.getPrecision())
                        an_dict['converted_units'].append(new)
                        if service.title in cat_dict.keys() and \
                           unit_conversion.get('HideOriginalUnit') == '1':
                               an_dict['include_original'] = False

                if analysis_spec:
                    keyword = service.getKeyword()
                    if keyword:
                        spec_string = analysis_spec.getAnalysisSpecsStr(keyword)
                        if spec_string:
                            new = dict({})
                            new['unit'] = 'Limits'
                            new['ars'] = spec_string.split(' ')[-1]
                            an_dict['limits_units'].append(new)

            if '_data' not in cat_dict:
                cat_dict['_data'] = {}
            cat_dict['_data']['footnotes'] = service.getCategory().Comments()
            if 'unit' not in cat_dict['_data']:
                cat_dict['_data']['unit'] = []
            unit = to_utf8(service.getUnit())
            if unit not in cat_dict['_data']['unit']:
                cat_dict['_data']['unit'].append(unit)
        #Transpose analyses into a table like structure for the page template
        result = []
        cats = self.sorted_by_sort_key(analyses.keys())
        for cat_title in cats:
            cat_dict_out = {'headers': [cat_title,], 'rows': [], 'notes': []}
            cat_dict_in = analyses[cat_title]
            keys = sorted(cat_dict_in.keys())
            #Get headers
            headers = cat_dict_out['headers']
            limits = []
            for key in keys:
                if key == '_data':
                    notes = cat_dict_in[key].get('footnotes', '')
                    if len(notes):
                        cat_dict_out['notes'].append(notes)
                    continue
                row_dict = cat_dict_in[key] 
                if len(row_dict.get('converted_units', [])) > 0:
                    for unit in row_dict['converted_units']:
                        if unit['unit'] not in headers:
                            headers.append(unit['unit'])
                if row_dict['include_original']:
                    unit = row_dict['unit']
                    if unit is None or len(unit) == 0:
                        unit = 'Result'
                    if unit not in headers:
                        headers.append(unit)
                if len(row_dict.get('limits_units', [])) > 0:
                    for unit in row_dict['limits_units']:
                        if unit['unit'] not in limits:
                            limits.append(unit['unit'])
            for limit in limits:
                if limit not in headers:
                    headers.append(limit)

            #Contruct rows with in header constraints
            rows = cat_dict_out['rows']
            for key in keys:
                if key == '_data':
                    continue
                row = [key,]
                for h in headers[:-1]:
                    row.append('')
                row_dict = cat_dict_in[key] 
                if len(row_dict.get('converted_units', [])) > 0:
                    for unit in row_dict['converted_units']:
                        idx = headers.index(unit['unit'])
                        row[idx] = unit['ars']
                if row_dict['include_original']:
                    unit = row_dict['unit']
                    if unit is None or len(unit) == 0:
                        unit = 'Result'
                    idx = headers.index(unit)
                    row[idx] = row_dict['ars']
                if len(row_dict.get('limits_units', [])) > 0:
                    for unit in row_dict['limits_units']:
                        idx = headers.index(unit['unit'])
                        row[idx] = unit['ars']
                rows.append(row)

            for idx in range(len(headers)):
                if idx == 0:
                    continue
                elif headers[idx] == 'Result':
                    continue
                elif headers[idx] == 'Limits':
                    continue
                headers[idx] = 'Results (%s)' % headers[idx]
            result.append(cat_dict_out)
        #print str(result)
        return result
Пример #40
0
    def __call__(self, value, *args, **kwargs):

        instance = kwargs['instance']
        # fieldname = kwargs['field'].getName()
        request = kwargs.get('REQUEST', {})
        fieldname = kwargs['field'].getName()

        translate = getToolByName(instance, 'translation_service').translate

        ress = request.get('result', {})[0]
        mins = request.get('min', {})[0]
        maxs = request.get('max', {})[0]
        errs = request.get('error', {})[0]

        # Retrieve all AS uids
        uids = ress.keys()
        for uid in uids:

            # Foreach AS, check spec. input values
            res = ress[uid] if ress[uid] else '0'
            min = mins[uid] if mins[uid] else '0'
            max = maxs[uid] if maxs[uid] else '0'
            err = errs[uid] if errs[uid] else '0'

            # Values must be numbers
            try:
                res = float(res)
            except ValueError:
                return to_utf8(
                    translate(
                        _("Validation failed: Expected values must be numeric")
                    ))
            try:
                min = float(min)
            except ValueError:
                return to_utf8(
                    translate(
                        _("Validation failed: Min values must be numeric")))
            try:
                max = float(max)
            except ValueError:
                return to_utf8(
                    translate(
                        _("Validation failed: Max values must be numeric")))
            try:
                err = float(err)
            except ValueError:
                return to_utf8(
                    translate(
                        _("Validation failed: Percentage error values must be numeric"
                          )))

            # Min value must be < max
            if min > max:
                return to_utf8(
                    translate(
                        _("Validation failed: Max values must be greater than Min values"
                          )))

            # Expected result must be between min and max
            if res < min or res > max:
                return to_utf8(
                    translate(
                        _("Validation failed: Expected values must be between Min and Max values"
                          )))

            # Error percentage must be between 0 and 100
            if err < 0 or err > 100:
                return to_utf8(
                    translate(
                        _("Validation failed: Percentage error values must be between 0 and 100"
                          )))

        return True
Пример #41
0
    def __call__(self, value, *args, **kwargs):
        field = kwargs['field']
        fieldname = field.getName()
        instance = kwargs['instance']
        translate = getToolByName(instance, 'translation_service').translate

        # return directly if nothing changed
        if value == field.get(instance):
            return True

        # We want to use the catalog to speed things up, as using `objectValues`
        # is very expensive if the parent object contains many items
        parent_objects = []

        # 1. Get the right catalog for this object
        catalogs = api.get_catalogs_for(instance)
        catalog = catalogs[0]

        # 2. Check if the field accessor is indexed
        field_index = None
        accessor = field.getAccessor(instance)
        if accessor:
            field_index = accessor.__name__

        # 3. Check if the field index is in the indexes
        # Field is indexed, use the catalog instead of objectValues
        parent_path = api.get_parent_path(instance)
        portal_type = instance.portal_type
        catalog_query = {
            "portal_type": portal_type,
            "path": {
                "query": parent_path,
                "depth": 1
            }
        }

        if field_index and field_index in catalog.indexes():
            # We use the field index to reduce the results list
            catalog_query[field_index] = value
            parent_objects = map(api.get_object, catalog(catalog_query))
        elif fieldname in catalog.indexes():
            # We use the fieldname as index to reduce the results list
            catalog_query[fieldname] = value
            parent_objects = map(api.get_object, catalog(catalog_query))
        else:
            # fall back to the objectValues :(
            parent_object = api.get_parent(instance)
            parent_objects = parent_object.objectValues()

        for item in parent_objects:
            if hasattr(item, 'UID') and item.UID() != instance.UID() and \
               fieldname in item.Schema() and \
               str(item.Schema()[fieldname].get(item)) == str(value):
                # We have to compare them as strings because
                # even if a number (as an  id) is saved inside
                # a string widget and string field, it will be
                # returned as an int. I don't know if it is
                # caused because is called with
                # <item.Schema()[fieldname].get(item)>,
                # but it happens...
                msg = _("Validation failed: '${value}' is not unique",
                        mapping={'value': safe_unicode(value)})
                return to_utf8(translate(msg))
        return True
Пример #42
0
 def Title(self):
     return to_utf8(safe_unicode(self.title))
Пример #43
0
def notify_rejection(analysisrequest):
    """
    Notifies via email that a given Analysis Request has been rejected. The
    notification is sent to the Client contacts assigned to the Analysis
    Request.

    :param analysisrequest: Analysis Request to which the notification refers
    :returns: true if success
    """
    arid = analysisrequest.getRequestID()

    # This is the template to render for the pdf that will be either attached
    # to the email and attached the the Analysis Request for further access
    from bika.lims.browser.analysisrequest.reject import AnalysisRequestRejectPdfView
    tpl = AnalysisRequestRejectPdfView(analysisrequest,
                                       analysisrequest.REQUEST)
    html = tpl.template()
    html = safe_unicode(html).encode('utf-8')
    filename = '%s-rejected' % arid
    pdf_fn = tempfile.mktemp(suffix=".pdf")
    pdf_success = createPdf(html, pdf_fn)
    pdf_data = None
    if pdf_success:
        # Attach the pdf to the Analysis Request
        pdf_data = open(pdf_fn, 'rb').read()
        attid = analysisrequest.aq_parent.generateUniqueId('Attachment')
        att = _createObjectByType("Attachment", analysisrequest.aq_parent,
                                  tmpID())
        att.setAttachmentFile(open(pdf_fn))
        # Awkward workaround to rename the file
        attf = att.getAttachmentFile()
        attf.filename = '%s.pdf' % filename
        att.setAttachmentFile(attf)
        att.unmarkCreationFlag()
        renameAfterCreation(att)
        atts = analysisrequest.getAttachment() + [att] if \
                analysisrequest.getAttachment() else [att]
        atts = [a.UID() for a in atts]
        analysisrequest.setAttachment(atts)
        os.remove(pdf_fn)

    # This is the message for the email's body
    from bika.lims.browser.analysisrequest.reject import AnalysisRequestRejectEmailView
    tpl = AnalysisRequestRejectEmailView(analysisrequest,
                                         analysisrequest.REQUEST)
    html = tpl.template()
    html = safe_unicode(html).encode('utf-8')

    # compose and send email.
    mailto = []
    lab = analysisrequest.bika_setup.laboratory
    mailfrom = formataddr(
        (encode_header(lab.getName()), lab.getEmailAddress()))
    mailsubject = _('%s has been rejected') % arid
    contacts = [analysisrequest.getContact()] + analysisrequest.getCCContact()
    for contact in contacts:
        name = to_utf8(contact.getFullname())
        email = to_utf8(contact.getEmailAddress())
        if email:
            mailto.append(formataddr((encode_header(name), email)))

    if not mailto:
        return False
    mime_msg = MIMEMultipart('related')
    mime_msg['Subject'] = mailsubject
    mime_msg['From'] = mailfrom
    mime_msg['To'] = ','.join(mailto)
    mime_msg.preamble = 'This is a multi-part MIME message.'
    msg_txt = MIMEText(html, _subtype='html')
    mime_msg.attach(msg_txt)
    if pdf_success:
        attachPdf(mime_msg, pdf_data, filename)

    try:
        host = getToolByName(analysisrequest, 'MailHost')
        host.send(mime_msg.as_string(), immediate=True)
    except:
        pass

    return True
Пример #44
0
    def _folderitems(self, full_objects=False):
        """WARNING: :full_objects: could create a big performance hit.
        """
        # Setting up some attributes
        plone_layout = getMultiAdapter((self.context.aq_inner, self.request),
                                       name=u'plone_layout')
        plone_utils = getToolByName(self.context.aq_inner, 'plone_utils')
        portal_types = getToolByName(self.context.aq_inner, 'portal_types')
        if self.request.form.get('show_all', '').lower() == 'true' \
                or self.show_all is True \
                or self.pagesize == 0:
            show_all = True
        else:
            show_all = False

        # idx increases one unit each time an object is added to the 'items'
        # dictionary to be returned. Note that if the item is not rendered,
        # the idx will not increase.
        idx = 0
        results = []
        self.show_more = False
        brains = self._fetch_brains(self.limit_from)
        for obj in brains:
            # avoid creating unnecessary info for items outside the current
            # batch;  only the path is needed for the "select all" case...
            # we only take allowed items into account
            if not show_all and idx >= self.pagesize:
                # Maximum number of items to be shown reached!
                self.show_more = True
                break

            # we don't know yet if it's a brain or an object
            path = hasattr(obj, 'getPath') and obj.getPath() or \
                "/".join(obj.getPhysicalPath())

            # This item must be rendered, we need the object instead of a brain
            obj = obj.getObject() if hasattr(obj, 'getObject') else obj

            # check if the item must be rendered or not (prevents from
            # doing it later in folderitems) and dealing with paging
            if not obj or not self.isItemAllowed(obj):
                continue

            uid = obj.UID()
            title = obj.Title()
            description = obj.Description()
            icon = plone_layout.getIcon(obj)
            url = obj.absolute_url()
            relative_url = obj.absolute_url(relative=True)

            fti = portal_types.get(obj.portal_type)
            if fti is not None:
                type_title_msgid = fti.Title()
            else:
                type_title_msgid = obj.portal_type

            url_href_title = '%s at %s: %s' % (t(type_title_msgid), path,
                                               to_utf8(description))

            modified = self.ulocalized_time(obj.modified()),

            # element css classes
            type_class = 'contenttype-' + \
                         plone_utils.normalizeString(obj.portal_type)

            state_class = ''
            states = {}
            for w in self.workflow.getWorkflowsFor(obj):
                state = w._getWorkflowStateOf(obj).id
                states[w.state_var] = state
                state_class += "state-%s " % state

            results_dict = dict(
                obj=obj,
                id=obj.getId(),
                title=title,
                uid=uid,
                path=path,
                url=url,
                fti=fti,
                item_data=json.dumps([]),
                url_href_title=url_href_title,
                obj_type=obj.Type,
                size=obj.getObjSize,
                modified=modified,
                icon=icon.html_tag(),
                type_class=type_class,
                # a list of lookups for single-value-select fields
                choices={},
                state_class=state_class,
                relative_url=relative_url,
                view_url=url,
                table_row_class="",
                category='None',

                # a list of names of fields that may be edited on this item
                allow_edit=[],

                # a list of names of fields that are compulsory (if editable)
                required=[],
                # a dict where the column name works as a key and the value is
                # the name of the field related with the column. It is used
                # when the name given to the column and the content field it
                # represents diverges. bika_listing_table_items.pt defines an
                # attribute for each item, this attribute is named 'field' and
                # the system fills it taking advantage of this dictionary or
                # the name of the column if it isn't defined in the dict.
                field={},
                # "before", "after" and replace: dictionary (key is column ID)
                # A snippet of HTML which will be rendered
                # before/after/instead of the table cell content.
                before={},  # { before : "<a href=..>" }
                after={},
                replace={},
            )

            rs = None
            wf_state_var = None

            workflows = self.workflow.getWorkflowsFor(obj)
            for wf in workflows:
                if wf.state_var:
                    wf_state_var = wf.state_var
                    break

            if wf_state_var is not None:
                rs = self.workflow.getInfoFor(obj, wf_state_var)
                st_title = self.workflow.getTitleForStateOnType(
                    rs, obj.portal_type)
                st_title = t(_(st_title))

            if rs:
                results_dict['review_state'] = rs

            for state_var, state in states.items():
                if not st_title:
                    st_title = self.workflow.getTitleForStateOnType(
                        state, obj.portal_type)
                results_dict[state_var] = state
            results_dict['state_title'] = st_title

            results_dict['class'] = {}

            # As far as I am concerned, adapters for IFieldIcons are only used
            # for Analysis content types. Since AnalysesView is not using this
            # "classic" folderitems from bikalisting anymore, this logic has
            # been added in AnalysesView. Even though, this logic hasn't been
            # removed from here, cause this _folderitems function is marked as
            # deprecated, so it will be eventually removed alltogether.
            for name, adapter in getAdapters((obj, ), IFieldIcons):
                auid = obj.UID() if hasattr(obj, 'UID') and callable(
                    obj.UID) else None
                if not auid:
                    continue
                alerts = adapter()
                # logger.info(str(alerts))
                if alerts and auid in alerts:
                    if auid in self.field_icons:
                        self.field_icons[auid].extend(alerts[auid])
                    else:
                        self.field_icons[auid] = alerts[auid]

            # Search for values for all columns in obj
            for key in self.columns.keys():
                # if the key is already in the results dict
                # then we don't replace it's value
                value = results_dict.get(key, '')
                if key not in results_dict:
                    attrobj = getFromString(obj, key)
                    value = attrobj if attrobj else value

                    # Custom attribute? Inspect to set the value
                    # for the current column dinamically
                    vattr = self.columns[key].get('attr', None)
                    if vattr:
                        attrobj = getFromString(obj, vattr)
                        value = attrobj if attrobj else value
                    results_dict[key] = value

                # Replace with an url?
                replace_url = self.columns[key].get('replace_url', None)
                if replace_url:
                    attrobj = getFromString(obj, replace_url)
                    if attrobj:
                        url = self.url_or_path_to_url(attrobj)
                        results_dict['replace'][key] = \
                            '<a href="%s">%s</a>' % (url, value)

            # The item basics filled. Delegate additional actions to folderitem
            # service. folderitem service is frequently overriden by child
            # objects
            item = self.folderitem(obj, results_dict, idx)

            # Call folder_item from subscriber adapters
            for subscriber in self.get_listing_view_adapters():
                subscriber.folder_item(obj, item, idx)

            if item:
                results.append(item)
                idx += 1

        return results
Пример #45
0
    def __call__(self, value, *args, **kwargs):
        instance = kwargs['instance']
        fieldname = kwargs['field'].getName()
        request = kwargs.get('REQUEST', {})
        form = request.form
        interim_fields = form.get(fieldname, [])

        translate = getToolByName(instance, 'translation_service').translate
        bsc = getToolByName(instance, 'bika_setup_catalog')

        # We run through the validator once per form submit, and check all
        # values
        # this value in request prevents running once per subfield value.
        key = instance.id + fieldname
        if instance.REQUEST.get(key, False):
            return True

        for x in range(len(interim_fields)):
            row = interim_fields[x]
            keys = row.keys()
            if 'title' not in keys:
                instance.REQUEST[key] = to_utf8(
                    translate(_("Validation failed: title is required")))
                return instance.REQUEST[key]
            if 'keyword' not in keys:
                instance.REQUEST[key] = to_utf8(
                    translate(_("Validation failed: keyword is required")))
                return instance.REQUEST[key]
            if not re.match(r"^[A-Za-z\w\d\-\_]+$", row['keyword']):
                instance.REQUEST[key] = _(
                    "Validation failed: keyword contains invalid characters")
                return instance.REQUEST[key]

        # keywords and titles used once only in the submitted form
        keywords = {}
        titles = {}
        for field in interim_fields:
            if 'keyword' in field:
                if field['keyword'] in keywords:
                    keywords[field['keyword']] += 1
                else:
                    keywords[field['keyword']] = 1
            if 'title' in field:
                if field['title'] in titles:
                    titles[field['title']] += 1
                else:
                    titles[field['title']] = 1
        for k in [k for k in keywords.keys() if keywords[k] > 1]:
            msg = _(
                "Validation failed: '${keyword}': duplicate keyword",
                mapping={
                    'keyword': safe_unicode(k)
                })
            instance.REQUEST[key] = to_utf8(translate(msg))
            return instance.REQUEST[key]
        for t in [t for t in titles.keys() if titles[t] > 1]:
            msg = _(
                "Validation failed: '${title}': duplicate title",
                mapping={
                    'title': safe_unicode(t)
                })
            instance.REQUEST[key] = to_utf8(translate(msg))
            return instance.REQUEST[key]

        # check all keywords against all AnalysisService keywords for dups
        services = bsc(portal_type='AnalysisService', getKeyword=value)
        if services:
            msg = _(
                "Validation failed: '${title}': "
                "This keyword is already in use by service '${used_by}'",
                mapping={
                    'title': safe_unicode(value),
                    'used_by': safe_unicode(services[0].Title)
                })
            instance.REQUEST[key] = to_utf8(translate(msg))
            return instance.REQUEST[key]

        # any duplicated interimfield titles must share the same keyword
        # any duplicated interimfield keywords must share the same title
        calcs = bsc(portal_type='Calculation')
        keyword_titles = {}
        title_keywords = {}
        for calc in calcs:
            if calc.UID == instance.UID():
                continue
            calc = calc.getObject()
            for field in calc.getInterimFields():
                keyword_titles[field['keyword']] = field['title']
                title_keywords[field['title']] = field['keyword']
        for field in interim_fields:
            if field['keyword'] != value:
                continue
            if 'title' in field and \
               field['title'] in title_keywords.keys() and \
               title_keywords[field['title']] != field['keyword']:
                msg = _(
                    "Validation failed: column title '${title}' "
                    "must have keyword '${keyword}'",
                    mapping={
                        'title': safe_unicode(field['title']),
                        'keyword': safe_unicode(title_keywords[field['title']])
                    })
                instance.REQUEST[key] = to_utf8(translate(msg))
                return instance.REQUEST[key]
            if 'keyword' in field and \
               field['keyword'] in keyword_titles.keys() and \
               keyword_titles[field['keyword']] != field['title']:
                msg = _(
                    "Validation failed: keyword '${keyword}' "
                    "must have column title '${title}'",
                    mapping={
                        'keyword': safe_unicode(field['keyword']),
                        'title': safe_unicode(keyword_titles[field['keyword']])
                    })
                instance.REQUEST[key] = to_utf8(translate(msg))
                return instance.REQUEST[key]

        instance.REQUEST[key] = True
        return True
Пример #46
0
    def set(self, instance, value, **kwargs):
        """ Mutator. """

        rc = getToolByName(instance, REFERENCE_CATALOG)
        targetUIDs = [
            ref.targetUID
            for ref in rc.getReferences(instance, self.relationship)
        ]

        # empty value
        if not value:
            value = ()
        # list with one empty item
        if type(value) in (list, tuple) and len(value) == 1 and not value[0]:
            value = ()

        if not value and not targetUIDs:
            return

        if not isinstance(value, (list, tuple)):
            value = value,
        elif not self.multiValued and len(value) > 1:
            raise ValueError(
                "Multiple values given for single valued field %r" % self)

        ts = getToolByName(instance, "translation_service").translate

        #convert objects to uids
        #convert uids to objects
        uids = []
        targets = {}
        for v in value:
            if isinstance(v, basestring):
                uids.append(v)
                targets[v] = rc.lookupObject(v)
            elif hasattr(v, 'UID'):
                target_uid = callable(v.UID) and v.UID() or v.UID
                uids.append(target_uid)
                targets[target_uid] = v
            else:
                logger.info("Target has no UID: %s/%s" % (v, value))

        sub = [t for t in targetUIDs if t not in uids]
        add = [v for v in uids if v and v not in targetUIDs]

        newuids = [t for t in list(targetUIDs) + list(uids) if t not in sub]
        for uid in newuids:
            # update version_id of all existing references that aren't
            # about to be removed anyway (contents of sub)
            version_id = hasattr(targets[uid], 'version_id') and \
                       targets[uid].version_id or None
            if version_id is None:
                # attempt initial save of unversioned targets
                pr = getToolByName(instance, 'portal_repository')
                if pr.isVersionable(targets[uid]):
                    pr.save(obj=targets[uid],
                            comment=to_utf8(ts(_("Initial revision"))))
            if not hasattr(instance, 'reference_versions'):
                instance.reference_versions = {}
            if not hasattr(targets[uid], 'version_id'):
                targets[uid].version_id = None
            instance.reference_versions[uid] = targets[uid].version_id

        # tweak keyword arguments for addReference
        addRef_kw = kwargs.copy()
        addRef_kw.setdefault('referenceClass', self.referenceClass)
        if 'schema' in addRef_kw:
            del addRef_kw['schema']
        for uid in add:
            __traceback_info__ = (instance, uid, value, targetUIDs)
            # throws IndexError if uid is invalid
            rc.addReference(instance, uid, self.relationship, **addRef_kw)

        for uid in sub:
            rc.deleteReference(instance, uid, self.relationship)

        if self.referencesSortable:
            if not hasattr(aq_base(instance), 'at_ordered_refs'):
                instance.at_ordered_refs = {}

            instance.at_ordered_refs[self.relationship] = \
                tuple(filter(None, uids))

        if self.callStorageOnSet:
            #if this option is set the reference fields's values get written
            #to the storage even if the reference field never use the storage
            #e.g. if i want to store the reference UIDs into an SQL field
            ObjectField.set(self, instance, self.getRaw(instance), **kwargs)
Пример #47
0
    def _order_data(self, order, excludearuids=[]):
        """ Creates an order dict, accessible from the view and from each
            specific template.
        """

        data = {
            'obj':
            order,
            'id':
            order.getId(),
            'order_number':
            order.getOrderNumber(),
            'title':
            order.Title(),
            'description':
            order.Description(),
            'supplier_id':
            order.getSupplierUID(),
            'date_dispatched':
            self.ulocalized_time(order.getDateDispatched(), long_format=1),
            'remarks':
            order.getRemarks(),
            'date_published':
            self.ulocalized_time(DateTime(), long_format=1),
            'subtotal':
            order.getSubtotal(),
            'vat_amount':
            order.getVATAmount(),
            'url':
            order.absolute_url(),
            'remarks':
            to_utf8(order.getRemarks()),
            'footer':
            to_utf8(self.context.bika_setup.getResultFooter()),
        }

        data['supplier'] = self._supplier_data(order)

        # Get the Product List for the Order
        # print order.order_lineitems
        items = order.order_lineitems
        # products = order.aq_parent.objectValues('Product')
        products = self.context.get_supplier_products()
        item_list = []
        grand_total = 0.00
        for item in items:
            withvat_price = 0.00
            prodid = item['Product']
            product = [pro for pro in products if pro.getId() == prodid][0]
            price = float(item['Price'])
            vat = float(item['VAT'])
            qty = float(item['Quantity'])
            withvat_price = price * qty * ((vat / 100) + 1)
            item_list.append({
                'title': product.Title(),
                'description': product.Description(),
                'unit': product.getUnit(),
                'price': price,
                'vat': '%s%%' % vat,
                'quantity': qty,
                'subtotal': '%.2f' % (price * qty),
                'withvat': '%.2f' % (withvat_price)
            })
            grand_total += withvat_price
        item_list = sorted(item_list, key=itemgetter('title'))

        data['products'] = item_list
        data["grandTotal"] = '%.2f' % grand_total
        return data
Пример #48
0
    def _ar_data(self, ar):
        """ Creates an ar dict, accessible from the view and from each
            specific template
        """
        data = {'obj': ar,
                'id': ar.getRequestID(),
                'client_order_num': ar.getClientOrderNumber(),
                'client_reference': ar.getClientReference(),
                'client_sampleid': ar.getClientSampleID(),
                'adhoc': ar.getAdHoc(),
                'composite': ar.getComposite(),
                'report_drymatter': ar.getReportDryMatter(),
                'invoice_exclude': ar.getInvoiceExclude(),
                'date_received': self.ulocalized_time(ar.getDateReceived(), long_format=1),
                'remarks': ar.getRemarks(),
                'member_discount': ar.getMemberDiscount(),
                'date_sampled': self.ulocalized_time(ar.getDateSampled(), long_format=1),
                'date_published': self.ulocalized_time(ar.getDatePublished(), long_format=1),
                'invoiced': ar.getInvoiced(),
                'late': ar.getLate(),
                'subtotal': ar.getSubtotal(),
                'vat_amount': ar.getVATAmount(),
                'totalprice': ar.getTotalPrice(),
                'invalid': ar.isInvalid(),
                'url': ar.absolute_url(),
                'remarks': to_utf8(ar.getRemarks()),
                'footer': to_utf8(self.context.bika_setup.getResultFooter()),
                'prepublish': False,
                'child_analysisrequest': None,
                'parent_analysisrequest': None}

        # Sub-objects
        if ar.getParentAnalysisRequest():
            data['parent_analysisrequest'] = self._artodict(ar.getParentAnalysisRequest())
        if ar.getChildAnalysisRequest():
            data['child_analysisrequest'] = self._artodict(ar.getChildAnalysisRequest())

        wf = getToolByName(ar, 'portal_workflow')
        allowed_states = ['verified', 'published']
        data['prepublish'] = wf.getInfoFor(ar, 'review_state') not in allowed_states

        data['contact'] = self._contact_data(ar)
        data['client'] = self._client_data(ar)
        data['sample'] = self._sample_data(ar)
        data['batch'] = self._batch_data(ar)
        data['specifications'] = self._specs_data(ar)
        data['analyses'] = self._analyses_data(ar, ['verified', 'published'])
        data['qcanalyses'] = self._qcanalyses_data(ar, ['verified', 'published'])
        data['points_of_capture'] = sorted(set([an['point_of_capture'] for an in data['analyses']]))
        data['categories'] = sorted(set([an['category'] for an in data['analyses']]))
        data['haspreviousresults'] = len([an['previous_results'] for an in data['analyses'] if an['previous_results']]) > 0
        data['hasblanks'] = len([an['reftype'] for an in data['qcanalyses'] if an['reftype'] == 'b']) > 0
        data['hascontrols'] = len([an['reftype'] for an in data['qcanalyses'] if an['reftype'] == 'c']) > 0
        data['hasduplicates'] = len([an['reftype'] for an in data['qcanalyses'] if an['reftype'] == 'd']) > 0

        # Categorize analyses
        data['categorized_analyses'] = {}
        for an in data['analyses']:
            poc = an['point_of_capture']
            cat = an['category']
            pocdict = data['categorized_analyses'].get(poc, {})
            catlist = pocdict.get(cat, [])
            catlist.append(an)
            pocdict[cat] = catlist
            data['categorized_analyses'][poc] = pocdict

        # Categorize qcanalyses
        data['categorized_qcanalyses'] = {}
        for an in data['qcanalyses']:
            qct = an['reftype']
            poc = an['point_of_capture']
            cat = an['category']
            qcdict = data['categorized_qcanalyses'].get(qct, {})
            pocdict = qcdict.get(poc, {})
            catlist = pocdict.get(cat, [])
            catlist.append(an)
            pocdict[cat] = catlist
            qcdict[poc] = pocdict
            data['categorized_qcanalyses'][qct] = qcdict

        data['reporter'] = self._reporter_data(ar)
        data['managers'] = self._managers_data(ar)

        portal = self.context.portal_url.getPortalObject()
        data['portal'] = {'obj': portal,
                          'url': portal.absolute_url()}
        data['laboratory'] = self._lab_data()

        return data
Пример #49
0
 def get_formatted_unit(self, analysis):
     """Return formatted Unit
     """
     return format_supsub(to_utf8(analysis.Unit))
Пример #50
0
    def _ar_data(self, ar, excludearuids=[]):
        """ Creates an ar dict, accessible from the view and from each
            specific template.
        """
        if ar.UID() in self._cache['_ar_data']:
            return self._cache['_ar_data'][ar.UID()]
        data = {'obj': ar,
                'id': ar.getRequestID(),
                'client_order_num': ar.getClientOrderNumber(),
                'client_reference': ar.getClientReference(),
                'client_sampleid': ar.getClientSampleID(),
                'adhoc': ar.getAdHoc(),
                'composite': ar.getComposite(),
                'report_drymatter': ar.getReportDryMatter(),
                'invoice_exclude': ar.getInvoiceExclude(),
                'date_received': self.ulocalized_time(ar.getDateReceived(), long_format=1),
                'member_discount': ar.getMemberDiscount(),
                'date_sampled': self.ulocalized_time(
                    ar.getDateSampled(), long_format=1),
                'date_published': self.ulocalized_time(DateTime(), long_format=1),
                'invoiced': ar.getInvoiced(),
                'late': ar.getLate(),
                'subtotal': ar.getSubtotal(),
                'vat_amount': ar.getVATAmount(),
                'totalprice': ar.getTotalPrice(),
                'invalid': ar.isInvalid(),
                'url': ar.absolute_url(),
                'remarks': to_utf8(ar.getRemarks()),
                'footer': to_utf8(self.context.bika_setup.getResultFooter()),
                'prepublish': False,
                'child_analysisrequest': None,
                'parent_analysisrequest': None,
                'resultsinterpretation':ar.getResultsInterpretation(),
                'ar_attachments': self._get_ar_attachments(ar),
                'an_attachments': self._get_an_attachments(ar),
        }

        # Sub-objects
        excludearuids.append(ar.UID())
        puid = ar.getRawParentAnalysisRequest()
        if puid and puid not in excludearuids:
            data['parent_analysisrequest'] = self._ar_data(ar.getParentAnalysisRequest(), excludearuids)
        cuid = ar.getRawChildAnalysisRequest()
        if cuid and cuid not in excludearuids:
            data['child_analysisrequest'] = self._ar_data(ar.getChildAnalysisRequest(), excludearuids)

        wf = getToolByName(ar, 'portal_workflow')
        allowed_states = ['verified', 'published']
        data['prepublish'] = wf.getInfoFor(ar, 'review_state') not in allowed_states

        data['contact'] = self._contact_data(ar)
        data['client'] = self._client_data(ar)
        data['sample'] = self._sample_data(ar)
        data['batch'] = self._batch_data(ar)
        data['specifications'] = self._specs_data(ar)
        data['analyses'] = self._analyses_data(ar, ['verified', 'published'])
        data['qcanalyses'] = self._qcanalyses_data(ar, ['verified', 'published'])
        data['points_of_capture'] = sorted(set([an['point_of_capture'] for an in data['analyses']]))
        data['categories'] = sorted(set([an['category'] for an in data['analyses']]))
        data['haspreviousresults'] = len([an['previous_results'] for an in data['analyses'] if an['previous_results']]) > 0
        data['hasblanks'] = len([an['reftype'] for an in data['qcanalyses'] if an['reftype'] == 'b']) > 0
        data['hascontrols'] = len([an['reftype'] for an in data['qcanalyses'] if an['reftype'] == 'c']) > 0
        data['hasduplicates'] = len([an['reftype'] for an in data['qcanalyses'] if an['reftype'] == 'd']) > 0

        # Categorize analyses
        data['categorized_analyses'] = {}
        data['department_analyses'] = {}
        for an in data['analyses']:
            poc = an['point_of_capture']
            cat = an['category']
            pocdict = data['categorized_analyses'].get(poc, {})
            catlist = pocdict.get(cat, [])
            catlist.append(an)
            pocdict[cat] = catlist
            data['categorized_analyses'][poc] = pocdict

            # Group by department too
            anobj = an['obj']
            dept = anobj.getService().getDepartment() if anobj.getService() else None
            if dept:
                dept = dept.UID()
                dep = data['department_analyses'].get(dept, {})
                dep_pocdict = dep.get(poc, {})
                dep_catlist = dep_pocdict.get(cat, [])
                dep_catlist.append(an)
                dep_pocdict[cat] = dep_catlist
                dep[poc] = dep_pocdict
                data['department_analyses'][dept] = dep

        # Categorize qcanalyses
        data['categorized_qcanalyses'] = {}
        for an in data['qcanalyses']:
            qct = an['reftype']
            poc = an['point_of_capture']
            cat = an['category']
            qcdict = data['categorized_qcanalyses'].get(qct, {})
            pocdict = qcdict.get(poc, {})
            catlist = pocdict.get(cat, [])
            catlist.append(an)
            pocdict[cat] = catlist
            qcdict[poc] = pocdict
            data['categorized_qcanalyses'][qct] = qcdict

        data['reporter'] = self._reporter_data(ar)
        data['managers'] = self._managers_data(ar)

        portal = self.context.portal_url.getPortalObject()
        data['portal'] = {'obj': portal,
                          'url': portal.absolute_url()}
        data['laboratory'] = self._lab_data()

        #results interpretation
        ri = {}
        if (ar.getResultsInterpretationByDepartment(None)):
            ri[''] = ar.getResultsInterpretationByDepartment(None)
        depts = ar.getDepartments()
        for dept in depts:
            ri[dept.Title()] = ar.getResultsInterpretationByDepartment(dept)
        data['resultsinterpretationdepts'] = ri

        self._cache['_ar_data'][ar.UID()] = data
        return data
Пример #51
0
    def __call__(self, value, *args, **kwargs):

        instance = kwargs['instance']
        request = kwargs.get('REQUEST', {})
        fieldname = kwargs['field'].getName()

        translate = getToolByName(instance, 'translation_service').translate

        mins = request.get('min', {})[0]
        maxs = request.get('max', {})[0]
        errors = request.get('error', {})[0]

        # We run through the validator once per form submit, and check all values
        # this value in request prevents running once per subfield value.
        key = instance.id + fieldname
        if instance.REQUEST.get(key, False):
            return True

        # Retrieve all AS uids
        for uid in mins.keys():

            # Foreach AS, check spec. input values
            minv = mins.get(uid, '') == '' and '0' or mins[uid]
            maxv = maxs.get(uid, '') == '' and '0' or maxs[uid]
            err = errors.get(uid, '') == '' and '0' or errors[uid]

            # Values must be numbers
            try:
                minv = float(minv)
            except ValueError:
                instance.REQUEST[key] = to_utf8(
                    translate(
                        _("Validation failed: Min values must be numeric")))
                return instance.REQUEST[key]
            try:
                maxv = float(maxv)
            except ValueError:
                instance.REQUEST[key] = to_utf8(
                    translate(
                        _("Validation failed: Max values must be numeric")))
                return instance.REQUEST[key]
            try:
                err = float(err)
            except ValueError:
                instance.REQUEST[key] = to_utf8(
                    translate(
                        _("Validation failed: Percentage error values must be numeric"
                          )))
                return instance.REQUEST[key]

            # Min value must be < max
            if minv > maxv:
                instance.REQUEST[key] = to_utf8(
                    translate(
                        _("Validation failed: Max values must be greater than Min values"
                          )))
                return instance.REQUEST[key]

            # Error percentage must be between 0 and 100
            if err < 0 or err > 100:
                instance.REQUEST[key] = to_utf8(
                    translate(
                        _("Validation failed: Error percentage must be between 0 and 100"
                          )))
                return instance.REQUEST[key]

        instance.REQUEST[key] = True
        return True
Пример #52
0
    def _analysis_data(self, analysis, decimalmark=None):
        if analysis.UID() in self._cache['_analysis_data']:
            return self._cache['_analysis_data'][analysis.UID()]

        keyword = analysis.getKeyword()
        service = analysis.getService()
        andict = {'obj': analysis,
                  'id': analysis.id,
                  'title': analysis.Title(),
                  'keyword': keyword,
                  'scientific_name': service.getScientificName(),
                  'accredited': service.getAccredited(),
                  'point_of_capture': to_utf8(POINTS_OF_CAPTURE.getValue(service.getPointOfCapture())),
                  'category': to_utf8(service.getCategoryTitle()),
                  'result': analysis.getResult(),
                  'isnumber': isnumber(analysis.getResult()),
                  'unit': to_utf8(service.getUnit()),
                  'formatted_unit': format_supsub(to_utf8(service.getUnit())),
                  'capture_date': analysis.getResultCaptureDate(),
                  'request_id': analysis.aq_parent.getId(),
                  'formatted_result': '',
                  'uncertainty': analysis.getUncertainty(),
                  'formatted_uncertainty': '',
                  'retested': analysis.getRetested(),
                  'remarks': to_utf8(analysis.getRemarks()),
                  'resultdm': to_utf8(analysis.getResultDM()),
                  'outofrange': False,
                  'type': analysis.portal_type,
                  'reftype': analysis.getReferenceType() \
                            if hasattr(analysis, 'getReferenceType')
                            else None,
                  'worksheet': None,
                  'specs': {},
                  'formatted_specs': ''}

        if analysis.portal_type == 'DuplicateAnalysis':
            andict['reftype'] = 'd'

        ws = analysis.getBackReferences('WorksheetAnalysis')
        andict['worksheet'] = ws[0].id if ws and len(ws) > 0 else None
        andict['worksheet_url'] = ws[0].absolute_url if ws and len(ws) > 0 else None
        andict['refsample'] = analysis.getSample().id \
                            if analysis.portal_type == 'Analysis' \
                            else '%s - %s' % (analysis.aq_parent.id, analysis.aq_parent.Title())

        if analysis.portal_type == 'ReferenceAnalysis':
            # The analysis is a Control or Blank. We might use the
            # reference results instead other specs
            uid = analysis.getServiceUID()
            specs = analysis.aq_parent.getResultsRangeDict().get(uid, {})

        else:
            # Get the specs directly from the analysis. The getResultsRange
            # function already takes care about which are the specs to be used:
            # AR, client or lab.
            specs = analysis.getResultsRange()

        andict['specs'] = specs
        scinot = self.context.bika_setup.getScientificNotationReport()
        fresult =  analysis.getFormattedResult(specs=specs, sciformat=int(scinot), decimalmark=decimalmark)

        # We don't use here cgi.encode because results fields must be rendered
        # using the 'structure' wildcard. The reason is that the result can be
        # expressed in sci notation, that may include <sup></sup> html tags.
        # Please note the default value for the 'html' parameter from
        # getFormattedResult signature is set to True, so the service will
        # already take into account LDLs and UDLs symbols '<' and '>' and escape
        # them if necessary.
        andict['formatted_result'] = fresult;

        fs = ''
        if specs.get('min', None) and specs.get('max', None):
            fs = '%s - %s' % (specs['min'], specs['max'])
        elif specs.get('min', None):
            fs = '> %s' % specs['min']
        elif specs.get('max', None):
            fs = '< %s' % specs['max']
        andict['formatted_specs'] = formatDecimalMark(fs, decimalmark)
        andict['formatted_uncertainty'] = format_uncertainty(analysis, analysis.getResult(), decimalmark=decimalmark, sciformat=int(scinot))

        # Out of range?
        if specs:
            adapters = getAdapters((analysis, ), IResultOutOfRange)
            bsc = getToolByName(self.context, "bika_setup_catalog")
            for name, adapter in adapters:
                ret = adapter(specification=specs)
                if ret and ret['out_of_range']:
                    andict['outofrange'] = True
                    break
        self._cache['_analysis_data'][analysis.UID()]  = andict
        return andict
Пример #53
0
    def _analysis_data(self, analysis):
        """ Returns a dict that represents the analysis
        """
        decimalmark = analysis.aq_parent.aq_parent.getDecimalMark()
        keyword = analysis.getKeyword()
        service = analysis.getService()
        andict = {'obj': analysis,
                  'id': analysis.id,
                  'title': analysis.Title(),
                  'keyword': keyword,
                  'scientific_name': service.getScientificName(),
                  'accredited': service.getAccredited(),
                  'point_of_capture': to_utf8(POINTS_OF_CAPTURE.getValue(service.getPointOfCapture())),
                  'category': to_utf8(service.getCategoryTitle()),
                  'result': analysis.getResult(),
                  'unit': to_utf8(service.getUnit()),
                  'formatted_unit': format_supsub(to_utf8(service.getUnit())),
                  'capture_date': analysis.getResultCaptureDate(),
                  'request_id': analysis.aq_parent.getId(),
                  'formatted_result': '',
                  'uncertainty': analysis.getUncertainty(),
                  'formatted_uncertainty': '',
                  'retested': analysis.getRetested(),
                  'remarks': to_utf8(analysis.getRemarks()),
                  'resultdm': to_utf8(analysis.getResultDM()),
                  'outofrange': False,
                  'type': analysis.portal_type,
                  'reftype': analysis.getReferenceType() \
                            if hasattr(analysis, 'getReferenceType')
                            else None,
                  'worksheet': None,
                  'specs': {},
                  'formatted_specs': ''}

        andict['refsample'] = analysis.getSample().id \
                            if analysis.portal_type == 'Analysis' \
                            else '%s - %s' % (analysis.aq_parent.id, analysis.aq_parent.Title())

        # Which analysis specs must be used?
        # Try first with those defined at AR Publish Specs level
        if analysis.portal_type == 'ReferenceAnalysis':
            # The analysis is a Control or Blank. We might use the
            # reference results instead other specs
            uid = analysis.getServiceUID()
            specs = analysis.aq_parent.getResultsRangeDict().get(uid, {})

        else:
            # Get the specs directly from the analysis. The getResultsRange
            # function already takes care about which are the specs to be used:
            # AR, client or lab.
            specs = analysis.getResultsRange()

        andict['specs'] = specs
        scinot = self.context.bika_setup.getScientificNotationReport()
        andict['formatted_result'] = analysis.getFormattedResult(
            specs=specs, sciformat=int(scinot), decimalmark=decimalmark)

        fs = ''
        if specs.get('min', None) and specs.get('max', None):
            fs = '%s - %s' % (specs['min'], specs['max'])
        elif specs.get('min', None):
            fs = '> %s' % specs['min']
        elif specs.get('max', None):
            fs = '< %s' % specs['max']
        andict['formatted_specs'] = formatDecimalMark(fs, decimalmark)
        andict['formatted_uncertainty'] = format_uncertainty(
            analysis,
            analysis.getResult(),
            decimalmark=decimalmark,
            sciformat=int(scinot))

        # Out of range?
        if specs:
            adapters = getAdapters((analysis, ), IResultOutOfRange)
            bsc = getToolByName(self.context, "bika_setup_catalog")
            for name, adapter in adapters:
                ret = adapter(specification=specs)
                if ret and ret['out_of_range']:
                    andict['outofrange'] = True
                    break
        return andict
Пример #54
0
    def publishFromHTML(self, aruid, results_html):
        # The AR can be published only and only if allowed
        uc = getToolByName(self.context, 'uid_catalog')
        ars = uc(UID=aruid)
        if not ars or len(ars) != 1:
            return []

        ar = ars[0].getObject();
        wf = getToolByName(ar, 'portal_workflow')
        allowed_states = ['verified', 'published']
        # Publish/Republish allowed?
        if wf.getInfoFor(ar, 'review_state') not in allowed_states:
            # Pre-publish allowed?
            if not ar.getAnalyses(review_state=allowed_states):
                return []

        # HTML written to debug file
        debug_mode = App.config.getConfiguration().debug_mode
        if debug_mode:
            tmp_fn = tempfile.mktemp(suffix=".html")
            logger.debug("Writing HTML for %s to %s" % (ar.Title(), tmp_fn))
            open(tmp_fn, "wb").write(results_html)

        # Create the pdf report (will always be attached to the AR)
        # we must supply the file ourself so that createPdf leaves it alone.
        pdf_fn = tempfile.mktemp(suffix=".pdf")
        pdf_report = createPdf(htmlreport=results_html, outfile=pdf_fn)

        # PDF written to debug file
        if debug_mode:
            logger.debug("Writing PDF for %s to %s" % (ar.Title(), pdf_fn))
        else:
            os.remove(pdf_fn)

        recipients = []
        contact = ar.getContact()
        lab = ar.bika_setup.laboratory
        if pdf_report:
            if contact:
                recipients = [{
                    'UID': contact.UID(),
                    'Username': to_utf8(contact.getUsername()),
                    'Fullname': to_utf8(contact.getFullname()),
                    'EmailAddress': to_utf8(contact.getEmailAddress()),
                    'PublicationModes': contact.getPublicationPreference()
                }]
            reportid = ar.generateUniqueId('ARReport')
            report = _createObjectByType("ARReport", ar, reportid)
            report.edit(
                AnalysisRequest=ar.UID(),
                Pdf=pdf_report,
                Html=results_html,
                Recipients=recipients
            )
            report.unmarkCreationFlag()
            renameAfterCreation(report)

            # Set status to prepublished/published/republished
            status = wf.getInfoFor(ar, 'review_state')
            transitions = {'verified': 'publish',
                           'published' : 'republish'}
            transition = transitions.get(status, 'prepublish')
            try:
                wf.doActionFor(ar, transition)
            except WorkflowException:
                pass

            # compose and send email.
            # The managers of the departments for which the current AR has
            # at least one AS must receive always the pdf report by email.
            # https://github.com/bikalabs/Bika-LIMS/issues/1028
            mime_msg = MIMEMultipart('related')
            mime_msg['Subject'] = self.get_mail_subject(ar)[0]
            mime_msg['From'] = formataddr(
                (encode_header(lab.getName()), lab.getEmailAddress()))
            mime_msg.preamble = 'This is a multi-part MIME message.'
            msg_txt = MIMEText(results_html, _subtype='html')
            mime_msg.attach(msg_txt)

            to = []
            mngrs = ar.getResponsible()
            for mngrid in mngrs['ids']:
                name = mngrs['dict'][mngrid].get('name', '')
                email = mngrs['dict'][mngrid].get('email', '')
                if (email != ''):
                    to.append(formataddr((encode_header(name), email)))

            if len(to) > 0:
                # Send the email to the managers
                mime_msg['To'] = ','.join(to)
                attachPdf(mime_msg, pdf_report, ar.id)

                try:
                    host = getToolByName(ar, 'MailHost')
                    host.send(mime_msg.as_string(), immediate=True)
                except SMTPServerDisconnected as msg:
                    logger.warn("SMTPServerDisconnected: %s." % msg)
                except SMTPRecipientsRefused as msg:
                    raise WorkflowException(str(msg))

        # Send report to recipients
        recips = self.get_recipients(ar)
        for recip in recips:
            if 'email' not in recip.get('pubpref', []) \
                    or not recip.get('email', ''):
                continue

            title = encode_header(recip.get('title', ''))
            email = recip.get('email')
            formatted = formataddr((title, email))

            # Create the new mime_msg object, cause the previous one
            # has the pdf already attached
            mime_msg = MIMEMultipart('related')
            mime_msg['Subject'] = self.get_mail_subject(ar)[0]
            mime_msg['From'] = formataddr(
            (encode_header(lab.getName()), lab.getEmailAddress()))
            mime_msg.preamble = 'This is a multi-part MIME message.'
            msg_txt = MIMEText(results_html, _subtype='html')
            mime_msg.attach(msg_txt)
            mime_msg['To'] = formatted

            # Attach the pdf to the email if requested
            if pdf_report and 'pdf' in recip.get('pubpref'):
                attachPdf(mime_msg, pdf_report, ar.id)

            # For now, I will simply ignore mail send under test.
            if hasattr(self.portal, 'robotframework'):
                continue

            msg_string = mime_msg.as_string()

            # content of outgoing email written to debug file
            if debug_mode:
                tmp_fn = tempfile.mktemp(suffix=".email")
                logger.debug("Writing MIME message for %s to %s" % (ar.Title(), tmp_fn))
                open(tmp_fn, "wb").write(msg_string)

            try:
                host = getToolByName(ar, 'MailHost')
                host.send(msg_string, immediate=True)
            except SMTPServerDisconnected as msg:
                logger.warn("SMTPServerDisconnected: %s." % msg)
            except SMTPRecipientsRefused as msg:
                raise WorkflowException(str(msg))

        return [ar]
Пример #55
0
    def _analysis_data(self, analysis):
        """ Returns a dict that represents the analysis
        """
        decimalmark = analysis.aq_parent.aq_parent.getDecimalMark()
        keyword = analysis.getKeyword()
        andict = {
            'obj':
            analysis,
            'id':
            analysis.id,
            'title':
            analysis.Title(),
            'keyword':
            keyword,
            'scientific_name':
            analysis.getScientificName(),
            'accredited':
            analysis.getAccredited(),
            'point_of_capture':
            to_utf8(POINTS_OF_CAPTURE.getValue(analysis.getPointOfCapture())),
            'category':
            to_utf8(analysis.getCategoryTitle()),
            'result':
            analysis.getResult(),
            'unit':
            to_utf8(analysis.getUnit()),
            'formatted_unit':
            format_supsub(to_utf8(analysis.getUnit())),
            'capture_date':
            analysis.getResultCaptureDate(),
            'request_id':
            analysis.aq_parent.getId(),
            'formatted_result':
            '',
            'uncertainty':
            analysis.getUncertainty(),
            'formatted_uncertainty':
            '',
            'retested':
            analysis.isRetest(),
            'remarks':
            to_utf8(analysis.getRemarks()),
            'outofrange':
            False,
            'type':
            analysis.portal_type,
            'reftype':
            analysis.getReferenceType()
            if hasattr(analysis, 'getReferenceType') else None,
            'worksheet':
            None,
            'specs': {},
            'formatted_specs':
            '',
            'review_state':
            api.get_workflow_status_of(analysis),
        }

        andict['refsample'] = analysis.getSample().id \
            if IReferenceAnalysis.providedBy(analysis) \
            else analysis.getRequestID()

        specs = analysis.getResultsRange()
        andict['specs'] = specs
        scinot = self.context.bika_setup.getScientificNotationReport()
        andict['formatted_result'] = analysis.getFormattedResult(
            specs=specs, sciformat=int(scinot), decimalmark=decimalmark)

        fs = ''
        if specs.get('min', None) and specs.get('max', None):
            fs = '%s - %s' % (specs['min'], specs['max'])
        elif specs.get('min', None):
            fs = '> %s' % specs['min']
        elif specs.get('max', None):
            fs = '< %s' % specs['max']
        andict['formatted_specs'] = formatDecimalMark(fs, decimalmark)
        andict['formatted_uncertainty'] = format_uncertainty(
            analysis,
            analysis.getResult(),
            decimalmark=decimalmark,
            sciformat=int(scinot))

        # Out of range?
        andict['outofrange'] = is_out_of_range(analysis)[0]
        return andict
Пример #56
0
    def folderitems(self, full_objects = False):
        """
        >>> portal = layer['portal']
        >>> portal_url = portal.absolute_url()
        >>> from plone.app.testing import SITE_OWNER_NAME
        >>> from plone.app.testing import SITE_OWNER_PASSWORD

        Test page batching https://github.com/bikalabs/Bika-LIMS/issues/1276
        When visiting the second page, the Water sampletype should be displayed:

        >>> browser = layer['getBrowser'](portal, loggedIn=True, username=SITE_OWNER_NAME, password=SITE_OWNER_PASSWORD)
        >>> browser.open(portal_url+"/bika_setup/bika_sampletypes/folder_view?",
        ... "list_pagesize=10&list_review_state=default&list_pagenumber=2")
        >>> browser.contents
        '...Water...'
        """

        #self.contentsMethod = self.context.getFolderContents
        if not hasattr(self, 'contentsMethod'):
            self.contentsMethod = getToolByName(self.context, self.catalog)

        context = aq_inner(self.context)
        plone_layout = getMultiAdapter((context, self.request), name = u'plone_layout')
        plone_utils = getToolByName(context, 'plone_utils')
        plone_view = getMultiAdapter((context, self.request), name = u'plone')
        portal_properties = getToolByName(context, 'portal_properties')
        portal_types = getToolByName(context, 'portal_types')
        workflow = getToolByName(context, 'portal_workflow')
        site_properties = portal_properties.site_properties
        norm = getUtility(IIDNormalizer).normalize
        if self.request.get('show_all', '').lower() == 'true' \
                or self.show_all == True \
                or self.pagesize == 0:
            show_all = True
        else:
            show_all = False

        pagenumber = int(self.request.get('pagenumber', 1) or 1)
        pagesize = self.pagesize
        start = (pagenumber - 1) * pagesize
        end = start + pagesize - 1

        if (hasattr(self, 'And') and self.And) \
           or (hasattr(self, 'Or') and self.Or):
            # if contentsMethod is capable, we do an AdvancedQuery.
            if hasattr(self.contentsMethod, 'makeAdvancedQuery'):
                aq = self.contentsMethod.makeAdvancedQuery(self.contentFilter)
                if hasattr(self, 'And') and self.And:
                    tmpAnd = And()
                    for q in self.And:
                        tmpAnd.addSubquery(q)
                    aq &= tmpAnd
                if hasattr(self, 'Or') and self.Or:
                    tmpOr = Or()
                    for q in self.Or:
                        tmpOr.addSubquery(q)
                    aq &= tmpOr
                brains = self.contentsMethod.evalAdvancedQuery(aq)
            else:
                # otherwise, self.contentsMethod must handle contentFilter
                brains = self.contentsMethod(self.contentFilter)
        else:
            brains = self.contentsMethod(self.contentFilter)

        results = []
        self.page_start_index = 0
        current_index = -1
        for i, obj in enumerate(brains):
            # we don't know yet if it's a brain or an object
            path = hasattr(obj, 'getPath') and obj.getPath() or \
                 "/".join(obj.getPhysicalPath())

            if hasattr(obj, 'getObject'):
                obj = obj.getObject()

            # check if the item must be rendered or not (prevents from
            # doing it later in folderitems) and dealing with paging
            if not self.isItemAllowed(obj):
                continue

            # avoid creating unnecessary info for items outside the current
            # batch;  only the path is needed for the "select all" case...
            # we only take allowed items into account
            current_index += 1
            if not show_all and not (start <= current_index <= end):
                results.append(dict(path = path, uid = obj.UID()))
                continue

            uid = obj.UID()
            title = obj.Title()
            description = obj.Description()
            icon = plone_layout.getIcon(obj)
            url = obj.absolute_url()
            relative_url = obj.absolute_url(relative = True)

            fti = portal_types.get(obj.portal_type)
            if fti is not None:
                type_title_msgid = fti.Title()
            else:
                type_title_msgid = obj.portal_type

            url_href_title = '%s at %s: %s' % (
                t(type_title_msgid),
                path,
                to_utf8(description))

            modified = self.ulocalized_time(obj.modified()),

            # element css classes
            type_class = 'contenttype-' + \
                plone_utils.normalizeString(obj.portal_type)

            state_class = ''
            states = {}
            for w in workflow.getWorkflowsFor(obj):
                state = w._getWorkflowStateOf(obj).id
                states[w.state_var] = state
                state_class += "state-%s " % state

            results_dict = dict(
                obj = obj,
                id = obj.getId(),
                title = title,
                uid = uid,
                path = path,
                url = url,
                fti = fti,
                item_data = json.dumps([]),
                url_href_title = url_href_title,
                obj_type = obj.Type,
                size = obj.getObjSize,
                modified = modified,
                icon = icon.html_tag(),
                type_class = type_class,
                # a list of lookups for single-value-select fields
                choices = {},
                state_class = state_class,
                relative_url = relative_url,
                view_url = url,
                table_row_class = "",
                category = 'None',

                # a list of names of fields that may be edited on this item
                allow_edit = [],

                # a list of names of fields that are compulsory (if editable)
                required = [],

                # "before", "after" and replace: dictionary (key is column ID)
                # A snippet of HTML which will be rendered
                # before/after/instead of the table cell content.
                before = {}, # { before : "<a href=..>" }
                after = {},
                replace = {},
            )
            try:
                self.review_state = workflow.getInfoFor(obj, 'review_state')
                state_title = workflow.getTitleForStateOnType(
                    self.review_state, obj.portal_type)
                state_title = t(PMF(state_title))
            except:
                self.review_state = 'active'
                state_title = None
            if self.review_state:
                results_dict['review_state'] = self.review_state
            for state_var, state in states.items():
                if not state_title:
                    state_title = workflow.getTitleForStateOnType(
                        state, obj.portal_type)
                results_dict[state_var] = state
            results_dict['state_title'] = state_title

            # extra classes for individual fields on this item { field_id : "css classes" }
            results_dict['class'] = {}
            for name, adapter in getAdapters((obj, ), IFieldIcons):
                auid = obj.UID() if hasattr(obj, 'UID') and callable(obj.UID) else None
                if not auid:
                    continue
                alerts = adapter()
                # logger.info(str(alerts))
                if alerts and auid in alerts:
                    if auid in self.field_icons:
                        self.field_icons[auid].extend(alerts[auid])
                    else:
                        self.field_icons[auid] = alerts[auid]

            # Search for values for all columns in obj
            for key in self.columns.keys():
                if hasattr(obj, key):
                    # if the key is already in the results dict
                    # then we don't replace it's value
                    if results_dict.has_key(key):
                        continue
                    value = getattr(obj, key)
                    if callable(value):
                        value = value()
                    results_dict[key] = value
            results.append(results_dict)

        return results
Пример #57
0
    def __call__(self, value, **kwargs):
        if not value:
            return True

        instance = kwargs['instance']
        fieldname = kwargs['field'].getName()
        request = instance.REQUEST

        form = request.form
        form_value = form.get(fieldname)

        translate = getToolByName(instance, 'translation_service').translate

        try:
            degrees = int(form_value['degrees'])
        except ValueError:
            return to_utf8(
                translate(_("Validation failed: degrees must be numeric")))

        try:
            minutes = int(form_value['minutes'])
        except ValueError:
            return to_utf8(
                translate(_("Validation failed: minutes must be numeric")))

        try:
            seconds = int(form_value['seconds'])
        except ValueError:
            return to_utf8(
                translate(_("Validation failed: seconds must be numeric")))

        if not 0 <= minutes <= 59:
            return to_utf8(
                translate(_("Validation failed: minutes must be 0 - 59")))

        if not 0 <= seconds <= 59:
            return to_utf8(
                translate(_("Validation failed: seconds must be 0 - 59")))

        bearing = form_value['bearing']

        if fieldname == 'Latitude':
            if not 0 <= degrees <= 90:
                return to_utf8(
                    translate(_("Validation failed: degrees must be 0 - 90")))
            if degrees == 90:
                if minutes != 0:
                    return to_utf8(
                        translate(
                            _("Validation failed: degrees is 90; "
                              "minutes must be zero")))
                if seconds != 0:
                    return to_utf8(
                        translate(
                            _("Validation failed: degrees is 90; "
                              "seconds must be zero")))
            if bearing.lower() not in 'sn':
                return to_utf8(
                    translate(_("Validation failed: Bearing must be N/S")))

        if fieldname == 'Longitude':
            if not 0 <= degrees <= 180:
                return to_utf8(
                    translate(_("Validation failed: degrees must be 0 - 180")))
            if degrees == 180:
                if minutes != 0:
                    return to_utf8(
                        translate(
                            _("Validation failed: degrees is 180; "
                              "minutes must be zero")))
                if seconds != 0:
                    return to_utf8(
                        translate(
                            _("Validation failed: degrees is 180; "
                              "seconds must be zero")))
            if bearing.lower() not in 'ew':
                return to_utf8(
                    translate(_("Validation failed: Bearing must be E/W")))

        return True
Пример #58
0
    def _analysis_data(self, analysis, decimalmark=None):
        keyword = analysis.getKeyword()
        service = analysis.getService()
        andict = {'obj': analysis,
                  'id': analysis.id,
                  'title': analysis.Title(),
                  'keyword': keyword,
                  'scientific_name': service.getScientificName(),
                  'accredited': service.getAccredited(),
                  'point_of_capture': to_utf8(POINTS_OF_CAPTURE.getValue(service.getPointOfCapture())),
                  'category': to_utf8(service.getCategoryTitle()),
                  'result': analysis.getResult(),
                  'isnumber': isnumber(analysis.getResult()),
                  'unit': to_utf8(service.getUnit()),
                  'formatted_unit': format_supsub(to_utf8(service.getUnit())),
                  'capture_date': analysis.getResultCaptureDate(),
                  'request_id': analysis.aq_parent.getId(),
                  'formatted_result': '',
                  'uncertainty': analysis.getUncertainty(),
                  'formatted_uncertainty': '',
                  'retested': analysis.getRetested(),
                  'remarks': to_utf8(analysis.getRemarks()),
                  'resultdm': to_utf8(analysis.getResultDM()),
                  'outofrange': False,
                  'type': analysis.portal_type,
                  'reftype': analysis.getReferenceType() \
                            if hasattr(analysis, 'getReferenceType')
                            else None,
                  'worksheet': None,
                  'specs': {},
                  'formatted_specs': ''}

        if analysis.portal_type == 'DuplicateAnalysis':
            andict['reftype'] = 'd'

        ws = analysis.getBackReferences('WorksheetAnalysis')
        andict['worksheet'] = ws[0].id if ws and len(ws) > 0 else None
        andict['worksheet_url'] = ws[0].absolute_url if ws and len(
            ws) > 0 else None
        andict['refsample'] = analysis.getSample().id \
                            if analysis.portal_type == 'Analysis' \
                            else '%s - %s' % (analysis.aq_parent.id, analysis.aq_parent.Title())

        # Which analysis specs must be used?
        # Try first with those defined at AR Publish Specs level
        if analysis.portal_type == 'ReferenceAnalysis':
            # The analysis is a Control or Blank. We might use the
            # reference results instead other specs
            uid = analysis.getServiceUID()
            specs = analysis.aq_parent.getResultsRangeDict().get(uid, {})

        elif analysis.portal_type == 'DuplicateAnalysis':
            specs = analysis.getAnalysisSpecs()

        else:
            ar = analysis.aq_parent
            specs = ar.getPublicationSpecification()
            if not specs or keyword not in specs.getResultsRangeDict():
                specs = analysis.getAnalysisSpecs()
            specs = specs.getResultsRangeDict().get(keyword, {}) \
                    if specs else {}

        andict['specs'] = specs
        scinot = self.context.bika_setup.getScientificNotationReport()
        andict['formatted_result'] = analysis.getFormattedResult(
            specs=specs, sciformat=int(scinot), decimalmark=decimalmark)

        fs = ''
        if specs.get('min', None) and specs.get('max', None):
            fs = '%s - %s' % (specs['min'], specs['max'])
        elif specs.get('min', None):
            fs = '> %s' % specs['min']
        elif specs.get('max', None):
            fs = '< %s' % specs['max']
        andict['formatted_specs'] = formatDecimalMark(fs, decimalmark)
        andict['formatted_uncertainty'] = format_uncertainty(
            analysis,
            analysis.getResult(),
            decimalmark=decimalmark,
            sciformat=int(scinot))

        # Return specs of current analysis
        andict['specs_dict'] = analysis.getSpecification().getResultsRangeDict(
        ).get(analysis.id)

        # Out of range?
        if specs:
            adapters = getAdapters((analysis, ), IResultOutOfRange)
            bsc = getToolByName(self.context, "bika_setup_catalog")
            for name, adapter in adapters:
                ret = adapter(specification=specs)
                if ret and ret['out_of_range']:
                    andict['outofrange'] = True
                    break
        return andict
Пример #59
0
    def __call__(self, subf_value, *args, **kwargs):

        instance = kwargs['instance']
        request = kwargs.get('REQUEST', {})
        fieldname = kwargs['field'].getName()
        translate = getToolByName(instance, 'translation_service').translate

        # We run through the validator once per form submit, and check all
        # values
        # this value in request prevents running once per subfield value.
        key = instance.id + fieldname
        if instance.REQUEST.get(key, False):
            return True

        for i, value in enumerate(request[fieldname]):

            # Values must be numbers
            try:
                minv = float(value['intercept_min'])
            except ValueError:
                instance.REQUEST[key] = to_utf8(
                    translate(
                        _("Validation failed: Min values must be numeric")))
                return instance.REQUEST[key]
            try:
                maxv = float(value['intercept_max'])
            except ValueError:
                instance.REQUEST[key] = to_utf8(
                    translate(
                        _("Validation failed: Max values must be numeric")))
                return instance.REQUEST[key]

            # values may be percentages; the rest of the numeric validation must
            # still pass once the '%' is stripped off.
            err = value['errorvalue']
            perc = False
            if err.endswith('%'):
                perc = True
                err = err[:-1]
            try:
                err = float(err)
            except ValueError:
                instance.REQUEST[key] = to_utf8(
                    translate(
                        _("Validation failed: Error values must be numeric")))
                return instance.REQUEST[key]

            if perc and (err < 0 or err > 100):
                # Error percentage must be between 0 and 100
                instance.REQUEST[key] = to_utf8(
                    translate(
                        _("Validation failed: Error percentage must be between 0 "
                          "and 100")))
                return instance.REQUEST[key]

            # Min value must be < max
            if minv > maxv:
                instance.REQUEST[key] = to_utf8(
                    translate(
                        _("Validation failed: Max values must be greater than Min "
                          "values")))
                return instance.REQUEST[key]

            # Error values must be >-1
            if err < 0:
                instance.REQUEST[key] = to_utf8(
                    translate(
                        _("Validation failed: Error value must be 0 or greater"
                          )))
                return instance.REQUEST[key]

        instance.REQUEST[key] = True
        return True
Пример #60
0
    def publishFromHTML(self, aruid, results_html):
        # The AR can be published only and only if allowed
        uc = getToolByName(self.context, 'uid_catalog')
        #ars = uc(UID=aruid)
        ars = [p.getObject() for p in uc(UID=aruid)]
        if not ars or len(ars) != 1:
            return []

        ar = ars[0]
        wf = getToolByName(ar, 'portal_workflow')
        allowed_states = ['verified', 'published']
        # Publish/Republish allowed?
        if wf.getInfoFor(ar, 'review_state') not in allowed_states:
            # Pre-publish allowed?
            if not ar.getAnalyses(review_state=allowed_states):
                return []

        # HTML written to debug file
        debug_mode = App.config.getConfiguration().debug_mode
        if debug_mode:
            tmp_fn = tempfile.mktemp(suffix=".html")
            logger.debug("Writing HTML for %s to %s" % (ar.Title(), tmp_fn))
            open(tmp_fn, "wb").write(results_html)

        # Create the pdf report (will always be attached to the AR)
        # we must supply the file ourself so that createPdf leaves it alone.
        pdf_fn = tempfile.mktemp(suffix=".pdf")
        pdf_report = createPdf(htmlreport=results_html, outfile=pdf_fn)

        # PDF written to debug file
        if debug_mode:
            logger.debug("Writing PDF for %s to %s" % (ar.Title(), pdf_fn))
        else:
            os.remove(pdf_fn)

        recipients = []
        contact = ar.getContact()
        lab = ar.bika_setup.laboratory

        # BIKA Cannabis hack.  Create the CSV they desire here now
        #csvdata = self.create_cannabis_csv(ars)
        csvdata = self.create_metrc_csv(ars)
        pdf_fn = to_utf8(ar.getRequestID())
        if pdf_report:
            if contact:
                recipients = [{
                    'UID':
                    contact.UID(),
                    'Username':
                    to_utf8(contact.getUsername()),
                    'Fullname':
                    to_utf8(contact.getFullname()),
                    'EmailAddress':
                    to_utf8(contact.getEmailAddress()),
                    'PublicationModes':
                    contact.getPublicationPreference()
                }]
            reportid = ar.generateUniqueId('ARReport')
            report = _createObjectByType("ARReport", ar, reportid)
            report.edit(AnalysisRequest=ar.UID(),
                        Pdf=pdf_report,
                        CSV=csvdata,
                        Html=results_html,
                        Recipients=recipients)
            report.unmarkCreationFlag()
            renameAfterCreation(report)
            # Set blob properties for fields containing file data
            fld = report.getField('Pdf')
            fld.get(report).setFilename(pdf_fn + ".pdf")
            fld.get(report).setContentType('application/pdf')
            fld = report.getField('CSV')
            fld.get(report).setFilename(pdf_fn + ".csv")
            fld.get(report).setContentType('text/csv')

            # Set status to prepublished/published/republished
            status = wf.getInfoFor(ar, 'review_state')
            transitions = {'verified': 'publish', 'published': 'republish'}
            transition = transitions.get(status, 'prepublish')
            try:
                wf.doActionFor(ar, transition)
            except WorkflowException:
                pass

            # compose and send email.
            # The managers of the departments for which the current AR has
            # at least one AS must receive always the pdf report by email.
            # https://github.com/bikalabs/Bika-LIMS/issues/1028
            mime_msg = MIMEMultipart('related')
            mime_msg['Subject'] = self.get_mail_subject(ar)[0]
            mime_msg['From'] = formataddr(
                (encode_header(lab.getName()), lab.getEmailAddress()))
            mime_msg.preamble = 'This is a multi-part MIME message.'
            msg_txt = MIMEText(results_html, _subtype='html')
            mime_msg.attach(msg_txt)

            to = []
            #mngrs = ar.getResponsible()
            #for mngrid in mngrs['ids']:
            #    name = mngrs['dict'][mngrid].get('name', '')
            #    email = mngrs['dict'][mngrid].get('email', '')
            #    if (email != ''):
            #        to.append(formataddr((encode_header(name), email)))

            #if len(to) > 0:
            #    # Send the email to the managers
            #    mime_msg['To'] = ','.join(to)
            #    attachPdf(mime_msg, pdf_report, pdf_fn)

            #    # BIKA Cannabis hack.  Create the CSV they desire here now
            #    fn = pdf_fn
            #    attachCSV(mime_msg,csvdata,fn)

            #    try:
            #        host = getToolByName(ar, 'MailHost')
            #        host.send(mime_msg.as_string(), immediate=True)
            #    except SMTPServerDisconnected as msg:
            #        logger.warn("SMTPServerDisconnected: %s." % msg)
            #    except SMTPRecipientsRefused as msg:
            #        raise WorkflowException(str(msg))

        # Send report to recipients
        recips = self.get_recipients(ar)
        for recip in recips:
            if 'email' not in recip.get('pubpref', []) \
                    or not recip.get('email', ''):
                continue

            title = encode_header(recip.get('title', ''))
            email = recip.get('email')
            formatted = formataddr((title, email))

            # Create the new mime_msg object, cause the previous one
            # has the pdf already attached
            mime_msg = MIMEMultipart('related')
            mime_msg['Subject'] = self.get_mail_subject(ar)[0]
            mime_msg['From'] = formataddr(
                (encode_header(lab.getName()), lab.getEmailAddress()))
            mime_msg.preamble = 'This is a multi-part MIME message.'
            msg_txt = MIMEText(results_html, _subtype='html')
            mime_msg.attach(msg_txt)
            mime_msg['To'] = formatted

            # Attach the pdf to the email if requested
            if pdf_report and 'pdf' in recip.get('pubpref'):
                attachPdf(mime_msg, pdf_report, pdf_fn)
                # BIKA Cannabis hack.  Create the CSV they desire here now
                fn = pdf_fn
                attachCSV(mime_msg, csvdata, fn)

            # For now, I will simply ignore mail send under test.
            if hasattr(self.portal, 'robotframework'):
                continue

            msg_string = mime_msg.as_string()

            # content of outgoing email written to debug file
            if debug_mode:
                tmp_fn = tempfile.mktemp(suffix=".email")
                logger.debug("Writing MIME message for %s to %s" %
                             (ar.Title(), tmp_fn))
                open(tmp_fn, "wb").write(msg_string)

            try:
                host = getToolByName(ar, 'MailHost')
                host.send(msg_string, immediate=True)
            except SMTPServerDisconnected as msg:
                logger.warn("SMTPServerDisconnected: %s." % msg)
            except SMTPRecipientsRefused as msg:
                raise WorkflowException(str(msg))

        # Save file on the filesystem
        folder = os.environ.get('COAs_FOLDER', '')
        if len(folder) != 0:
            client_path = '{}/{}/'.format(folder, ar.getClientID())
            if not os.path.exists(client_path):
                os.makedirs(client_path)

            today = self.ulocalized_time(DateTime(), long_format=0)
            today_path = '{}{}/'.format(client_path, today)
            if not os.path.exists(today_path):
                os.makedirs(today_path)

            fname = '{}{}.pdf'.format(today_path, pdf_fn)
            f = open(fname, 'w')
            f.write(pdf_report)
            f.close()

            csvname = '{}{}.csv'.format(today_path, pdf_fn)
            fcsv = open(csvname, 'w')
            fcsv.write(csvdata)
            fcsv.close()

        return [ar]