コード例 #1
0
ファイル: api.py プロジェクト: xispa/bika.lims
def get_user_contact(user, contact_types=['Contact', 'LabContact']):
    """Returns the associated contact of a Plone user

    If the user passed in has no contact associated, return None.
    The `contact_types` parameter filter the portal types for the search.

    :param: Plone user
    :contact_types: List with the contact portal types to search
    :returns: Contact associated to the Plone user or None
    """
    if not user:
        return None

    query = {'portal_type': contact_types, 'getUsername': user.id}
    brains = search(query, catalog='portal_catalog')
    if not brains:
        return None

    if len(brains) > 1:
        # Oops, the user has multiple contacts assigned, return None
        contacts = map(lambda c: c.Title, brains)
        err_msg = "User '{}' is bound to multiple Contacts '{}'"
        err_msg = err_msg.format(user.id, ','.join(contacts))
        logger.error(err_msg)
        return None

    return get_object(brains[0])
コード例 #2
0
ファイル: __init__.py プロジェクト: xispa/bika.lims
def getTransitionUsers(obj, action_id, last_user=False):
    """
    This function returns a list with the users who have done the transition.
    :action_id: a sring as the transition id.
    :last_user: a boolean to return only the last user triggering the
        transition or all of them.
    :returns: a list of user ids.
    """
    workflow = getToolByName(obj, 'portal_workflow')
    users = []
    try:
        # https://jira.bikalabs.com/browse/LIMS-2242:
        # Sometimes the workflow history is inexplicably missing!
        review_history = list(workflow.getInfoFor(obj, 'review_history'))
    except WorkflowException:
        logger.error(
            "workflow history is inexplicably missing."
            " https://jira.bikalabs.com/browse/LIMS-2242")
        return users
    # invert the list, so we always see the most recent matching event
    review_history.reverse()
    for event in review_history:
        if event.get('action', '') == action_id:
            value = event.get('actor', '')
            users.append(value)
            if last_user:
                return users
    return users
コード例 #3
0
    def get_workflow_actions(self):
        """ Compile a list of possible workflow transitions for items
            in this Table.
        """

        # cbb return empty list if we are unable to select items
        if not self.show_select_column:
            return []

        workflow = getToolByName(self.context, 'portal_workflow')

        # get all transitions for all items.
        transitions = {}
        actions = []
        for obj in [i.get('obj', '') for i in self.items]:
            # Unresolved error
            # LIMS-1861 Recurrent error after managing analysis from an AR
            # https://jira.bikalabs.com/browse/LIMS-1861
            # Was:
            #   obj = hasattr(obj, 'getObject') and obj.getObject() or obj
            try:
                obj = hasattr(obj, 'getObject') and obj.getObject() or obj
            except AttributeError, e:
                logger.error("Unresolved error LIMS-1861: %s" % str(e))
                continue

            for it in workflow.getTransitionsFor(obj):
                transitions[it['id']] = it
コード例 #4
0
ファイル: view.py プロジェクト: Espurna/bika.health
 def sendAlertEmail(self):
     # Send an alert email
     laboratory = self.context.bika_setup.laboratory
     subject = self.request.get('subject')
     to = self.request.get('to')
     body = self.request.get('body')
     body = "<br/>".join(body.split("\r\n"))
     mime_msg = MIMEMultipart('related')
     mime_msg['Subject'] = subject
     mime_msg['From'] = formataddr(
                 (encode_header(laboratory.getName()),
                  laboratory.getEmailAddress()))
     mime_msg['To'] = to
     msg_txt = MIMEText(safe_unicode(body).encode('utf-8'),
                        _subtype='html')
     mime_msg.preamble = 'This is a multi-part MIME message.'
     mime_msg.attach(msg_txt)
     succeed = False
     try:
         host = getToolByName(self.context, 'MailHost')
         host.send(mime_msg.as_string(), immediate=True)
     except Exception, msg:
         ar = self.context.id
         logger.error("Panic level email %s: %s" % (ar, str(msg)))
         message = _('Unable to send an email to alert client '
                     'that some results exceeded the panic levels') \
                                          + (": %s" % str(msg))
         self.addMessage(message, 'warning')
コード例 #5
0
ファイル: worksheet.py プロジェクト: AlcyonSuisse/bika.lims
    def removeAnalysis(self, analysis):
        """ delete an analyses from the worksheet and un-assign it
        """
        workflow = getToolByName(self, 'portal_workflow')

        # overwrite saved context UID for event subscriber
        self.REQUEST['context_uid'] = self.UID()
        try:
            workflow.doActionFor(analysis, 'unassign')
        except WorkflowException as e:
            message = str(e)
            logger.error(
                "Cannot use 'unassign' transition on {}: {}".format(
                analysis, message))
        # Note: subscriber might unassign the AR and/or promote the worksheet

        # remove analysis from context.Analyses *after* unassign,
        # (doActionFor requires worksheet in analysis.getBackReferences)
        Analyses = self.getAnalyses()
        if analysis in Analyses:
            Analyses.remove(analysis)
            self.setAnalyses(Analyses)
        layout = [slot for slot in self.getLayout() if slot['analysis_uid'] != analysis.UID()]
        self.setLayout(layout)

        if analysis.portal_type == "DuplicateAnalysis":
            self._delObject(analysis.id)
コード例 #6
0
ファイル: __init__.py プロジェクト: lankesh46/bika.health
    def Import(self):
        folder = self.context.patients
        rows = self.get_rows(3)
        for row in rows:
            if not row['Firstname'] or not row['PrimaryReferrer']:
                continue
            pc = getToolByName(self.context, 'portal_catalog')
            client = pc(portal_type='Client', Title=row['PrimaryReferrer'])
            if len(client) == 0:
                raise IndexError("Primary referrer invalid: '%s'" % row['PrimaryReferrer'])

            client = client[0].getObject()
            _id = folder.invokeFactory('Patient', id=tmpID())
            obj = folder[_id]
            obj.unmarkCreationFlag()
            renameAfterCreation(obj)
            Fullname = (row['Firstname'] + " " + row.get('Surname', '')).strip()
            obj.edit(title=Fullname,
                     ClientPatientID = row.get('ClientPatientID', ''),
                     Salutation = row.get('Salutation', ''),
                     Firstname = row.get('Firstname', ''),
                     Surname = row.get('Surname', ''),
                     PrimaryReferrer = client.UID(),
                     Gender = row.get('Gender', 'dk'),
                     Age = row.get('Age', ''),
                     BirthDate = row.get('BirthDate', ''),
                     BirthDateEstimated =self.to_bool(row.get('BirthDateEstimated','False')),
                     BirthPlace = row.get('BirthPlace', ''),
                     Ethnicity = row.get('Ethnicity', ''),
                     Citizenship =row.get('Citizenship', ''),
                     MothersName = row.get('MothersName', ''),
                     CivilStatus =row.get('CivilStatus', ''),
                     Anonymous = self.to_bool(row.get('Anonymous','False'))
                     )
            self.fill_contactfields(row, obj)
            self.fill_addressfields(row, obj)
            if 'Photo' in row and row['Photo']:
                try:
                    path = resource_filename("bika.lims",
                                             "setupdata/%s/%s" \
                                             % (self.dataset_name, row['Photo']))
                    file_data = open(path, "rb").read()
                    obj.setPhoto(file_data)
                except:
                    logger.error("Unable to load Photo %s"%row['Photo'])

            if 'Feature' in row and row['Feature']:
                try:
                    path = resource_filename("bika.lims",
                                             "setupdata/%s/%s" \
                                             % (self.dataset_name, row['Feature']))
                    file_data = open(path, "rb").read()
                    obj.setFeature(file_data)
                except:
                    logger.error("Unable to load Feature %s"%row['Feature'])

            obj.unmarkCreationFlag()
            renameAfterCreation(obj)
コード例 #7
0
ファイル: printform.py プロジェクト: AlcyonSuisse/bika.lims
 def _rise_error(self):
     """
     Give the error missage
     """
     tbex = traceback.format_exc()
     logger.error(
         'An error occurred while rendering the view: %s' % tbex)
     self.destination_url = self.request.get_header(
         "referer", self.context.absolute_url())
コード例 #8
0
 def get(self, instance, **kw):
     """Get the value of the field
     """
     # Gracefully avoid programming errors in Computed fields
     try:
         return self._get(instance, **kw)
     except AttributeError:
         logger.error("Could not get the value of the computed field '{}'"
                      .format(self.get_field_name()))
         return None
コード例 #9
0
ファイル: utils.py プロジェクト: RaulKite/Bika-LIMS
def changeWorkflowState(content, wf_id, state_id, acquire_permissions=False,
                        portal_workflow=None, **kw):
    """Change the workflow state of an object
    @param content: Content obj which state will be changed
    @param state_id: name of the state to put on content
    @param acquire_permissions: True->All permissions unchecked and on riles and
                                acquired
                                False->Applies new state security map
    @param portal_workflow: Provide workflow tool (optimisation) if known
    @param kw: change the values of same name of the state mapping
    @return: None
    """

    if portal_workflow is None:
        portal_workflow = getToolByName(content, 'portal_workflow')

    # Might raise IndexError if no workflow is associated to this type
    found_wf = 0
    for wf_def in portal_workflow.getWorkflowsFor(content):
        if wf_id == wf_def.getId():
            found_wf = 1
            break
    if not found_wf:
        logger.error("%s: Cannot find workflow id %s" % (content, wf_id))

    wf_state = {
        'action': None,
        'actor': None,
        'comments': "Setting state to %s" % state_id,
        'review_state': state_id,
        'time': DateTime(),
        }

    # Updating wf_state from keyword args
    for k in kw.keys():
        # Remove unknown items
        if not wf_state.has_key(k):
            del kw[k]
    if kw.has_key('review_state'):
        del kw['review_state']
    wf_state.update(kw)

    portal_workflow.setStatusOf(wf_id, content, wf_state)

    if acquire_permissions:
        # Acquire all permissions
        for permission in content.possible_permissions():
            content.manage_permission(permission, acquire=1)
    else:
        # Setting new state permissions
        wf_def.updateRoleMappingsFor(content)

    # Map changes to the catalogs
    content.reindexObject(idxs=['allowedRolesAndUsers', 'review_state'])
    return
コード例 #10
0
ファイル: abstractanalysis.py プロジェクト: xispa/bika.lims
 def getWorksheet(self):
     """Returns the Worksheet to which this analysis belongs to, or None
     """
     worksheet = self.getBackReferences('WorksheetAnalysis')
     if not worksheet:
         return None
     if len(worksheet) > 1:
         logger.error(
             "Analysis %s is assigned to more than one worksheet."
             % self.getId())
     return worksheet[0]
コード例 #11
0
    def get(self, instance, **kwargs):
        """ get() returns the list of contained analyses
            By default, return a list of catalog brains.

            If you want objects, pass full_objects = True

            If you want to override "ViewRetractedAnalyses",
            pass retracted=True

            other kwargs are passed to bika_analysis_catalog

        """

        full_objects = False
        if 'full_objects' in kwargs:
            full_objects = kwargs['full_objects']
            del kwargs['full_objects']

        if 'retracted' in kwargs:
            retracted = kwargs['retracted']
            del kwargs['retracted']
        else:
            mtool = getToolByName(instance, 'portal_membership')
            retracted = mtool.checkPermission(ViewRetractedAnalyses,
                                              instance)

        bac = getToolByName(instance, 'bika_analysis_catalog')
        contentFilter = dict([(k, v) for k, v in kwargs.items()
                              if k in bac.indexes()])
        contentFilter['portal_type'] = "Analysis"
        contentFilter['sort_on'] = "sortable_title"
        contentFilter['path'] = {'query': "/".join(instance.getPhysicalPath()),
                                 'level': 0}
        analyses = bac(contentFilter)
        if not retracted:
            analyses = [a for a in analyses if a.review_state != 'retracted']
        if full_objects:
            # Unresolved error
            # LIMS-1861 Recurrent error after managing analysis from an AR
            # https://jira.bikalabs.com/browse/LIMS-1861
            # Was:
            #   analyses = [a.getObject() for a in analyses]
            ans = []
            for a in analyses:
                try:
                    obj = a.getObject()
                except AttributeError, e:
                    logger.error("Unresolved error LIMS-1861: %s" % str(e))
                    continue
                ans.append(obj)
            analyses = ans
コード例 #12
0
ファイル: to320.py プロジェクト: AlcyonSuisse/bika.lims
def migrate_instrument_locations(portal):
    bsc = portal.bika_setup_catalog

    bika_instrumentlocations = portal.bika_setup.get("bika_instrumentlocations")

    if bika_instrumentlocations is None:
        logger.error("bika_instrumentlocations not found in bika_setup!")
        return  # This should not happen

    # move bika_instrumentlocations below bika_instrumenttypes
    panel_ids = portal.bika_setup.objectIds()
    target_idx = panel_ids.index("bika_instrumenttypes")
    current_idx = panel_ids.index("bika_instrumentlocations")
    delta = current_idx - target_idx
    if delta > 1:
        portal.bika_setup.moveObjectsUp("bika_instrumentlocations", delta=delta-1)

    instrument_brains = bsc(portal_type="Instrument")
    for instrument_brain in instrument_brains:
        instrument = instrument_brain.getObject()

        # get the string value of the `location` field
        location = instrument.getLocation()
        if not location:
            continue  # Skip if no location was set

        # make a dictionary with the Titles as keys and the objects as values
        instrument_locations = bika_instrumentlocations.objectValues()
        instrument_location_titles = map(lambda o: o.Title(), instrument_locations)
        locations = dict(zip(instrument_location_titles, instrument_locations))

        instrument_location = None
        if location in locations:
            logger.info("Instrument Location {} exists in bika_instrumentlocations".format(location))
            instrument_location = locations[location]
        else:
            # Create a new location and link it to the instruments InstrumentLocation field
            instrument_location = _createObjectByType("InstrumentLocation", bika_instrumentlocations, tmpID())
            instrument_location.setTitle(location)
            instrument_location._renameAfterCreation()
            instrument_location.reindexObject()
            logger.info("Created Instrument Location {} in bika_instrumentlocations".format(location))

        instrument.setLocation(None)  # flush the old instrument location
        instrument.setInstrumentLocation(instrument_location)
        instrument.reindexObject()
        logger.info("Linked Instrument Location {} to Instrument {}".format(location, instrument.id))
コード例 #13
0
ファイル: workflow.py プロジェクト: AlcyonSuisse/bika.lims
def getTransitionActor(obj, action_id):
    """Returns the identifier of the user who last performed the action
    on the object.
    """
    workflow = api.portal.get_tool("portal_workflow")
    try:
        review_history = list(workflow.getInfoFor(obj, "review_history"))
        review_history.reverse()
        for event in review_history:
            if event.get("action") == action_id:
                return event.get("actor")
        return ''
    except WorkflowException as e:
        message = str(e)
        logger.error("Cannot retrieve review_history on {}: {}".format(
            obj, message))
    return ''
コード例 #14
0
ファイル: controller_view.py プロジェクト: xispa/bika.lims
 def __call__(self):
     """
     Returns the number of tasks in the sanitation-tasks queue
     """
     try:
         PostOnly(self.context.REQUEST)
     except:
         logger.error(traceback.format_exc())
         return json.dumps({'count': 0})
     try:
         CheckAuthenticator(self.request.form)
     except:
         logger.error(traceback.format_exc())
         return json.dumps({'count': 0})
     task_queue = queryUtility(ITaskQueue, name='sanitation-tasks')
     count = len(task_queue) if task_queue is not None else 0
     return json.dumps({'count': count})
コード例 #15
0
ファイル: __init__.py プロジェクト: xispa/bika.lims
def getReviewHistory(instance):
    """Returns the review history for the instance in reverse order
    :returns: the list of historic events as dicts
    """
    review_history = []
    workflow = getToolByName(instance, 'portal_workflow')
    try:
        # https://jira.bikalabs.com/browse/LIMS-2242:
        # Sometimes the workflow history is inexplicably missing!
        review_history = list(workflow.getInfoFor(instance, 'review_history'))
    except WorkflowException:
        logger.error(
            "workflow history is inexplicably missing."
            " https://jira.bikalabs.com/browse/LIMS-2242")
    # invert the list, so we always see the most recent matching event
    review_history.reverse()
    return review_history
コード例 #16
0
ファイル: catalog_utilities.py プロジェクト: xispa/bika.lims
def _addColumn(cat, col):
    """
    This function adds a metadata column to the acatalog.
    :cat: a catalog object
    :col: a column id as string
    :returns: a boolean as True if the element has been added and
        False otherwise
    """
    # First check if the metadata column already exists
    if col not in cat.schema():
        try:
            cat.addColumn(col)
            logger.info('Column %s added to %s.' % (col, cat.id))
            return True
        except:
            logger.error(
                'Catalog column %s error while adding to %s.' % (col, cat.id))
    return False
コード例 #17
0
ファイル: workflow.py プロジェクト: AlcyonSuisse/bika.lims
def getTransitionDate(obj, action_id):
    workflow = api.portal.get_tool("portal_workflow")
    try:
        # https://jira.bikalabs.com/browse/LIMS-2242:
        # Sometimes the workflow history is inexplicably missing!
        review_history = list(workflow.getInfoFor(obj, 'review_history'))
    except WorkflowException as e:
        message = str(e)
        logger.error("Cannot retrieve review_history on {}: {}".format(
                obj, message))
        return None
    # invert the list, so we always see the most recent matching event
    review_history.reverse()
    for event in review_history:
        if event['action'] == action_id:
            value = ulocalized_time(event['time'], long_format=True,
                                    time_only=False, context=obj)
            return value
    return None
コード例 #18
0
ファイル: catalog_utilities.py プロジェクト: xispa/bika.lims
def _delIndex(catalog, index):
    """
    This function desindexes the index element from the catalog.
    :catalog: a catalog object
    :index: an index id as string
    :returns: a boolean as True if the element has been desindexed and it
    returns False otherwise.
    """
    if index in catalog.indexes():
        try:
            catalog.delIndex(index)
            logger.info(
                'Catalog index %s deleted from %s.' % (index, catalog.id))
            return True
        except:
            logger.error(
                'Catalog index %s error while deleting from %s.'
                % (index, catalog.id))
    return False
コード例 #19
0
ファイル: catalog_utilities.py プロジェクト: xispa/bika.lims
def _delColumn(cat, col):
    """
    This function deletes a metadata column of the acatalog.
    :cat: a catalog object
    :col: a column id as string
    :returns: a boolean as True if the element has been removed and
        False otherwise
    """
    # First check if the metadata column already exists
    if col in cat.schema():
        try:
            cat.delColumn(col)
            logger.info('Column %s deleted from %s.' % (col, cat.id))
            return True
        except:
            logger.error(
                'Catalog column %s error while deleting from %s.'
                % (col, cat.id))
    return False
コード例 #20
0
ファイル: workflow.py プロジェクト: fqblab/bika.lims
def getTransitionDate(obj, action_id):
    workflow = getToolByName(obj, 'portal_workflow')
    try:
        # https://jira.bikalabs.com/browse/LIMS-2242:
        # Sometimes the workflow history is inexplicably missing!
        review_history = list(workflow.getInfoFor(obj, 'review_history'))
    except WorkflowException:
        logger.error(
            "workflow history is inexplicably missing."
            " https://jira.bikalabs.com/browse/LIMS-2242")
        return None
    # invert the list, so we always see the most recent matching event
    review_history.reverse()
    for event in review_history:
        if event['action'] == action_id:
            value = ulocalized_time(event['time'], long_format=True,
                                    time_only=False, context=obj)
            return value
    return None
コード例 #21
0
ファイル: contact.py プロジェクト: AlcyonSuisse/bika.lims
    def getContactByUsername(cls, username):
        """Convenience Classmethod which returns a Contact by a Username
        """

        # Check if the User is linked already
        pc = api.portal.get_tool("portal_catalog")
        contacts = pc(portal_type=cls.portal_type,
                      getUsername=username)

        # No Contact assigned to this username
        if len(contacts) == 0:
            return None

        # Multiple Users assigned, this should never happen
        if len(contacts) > 1:
            logger.error("User '{}' is bound to multiple Contacts '{}'".format(
                username, ",".join(map(lambda c: c.Title, contacts))))
            return map(lambda x: x.getObject(), contacts)

        # Return the found Contact object
        return contacts[0].getObject()
コード例 #22
0
ファイル: bika_listing.py プロジェクト: fqblab/bika.lims
 def review_state(self):
     """Get workflow state of object in wf_id.
     First try request: <form_id>_review_state
     Then try 'default': self.default_review_state
     :return: item from self.review_states
     """
     if not self.review_states:
         logger.error("%s.review_states is undefined." % self)
         return None
     # get state_id from (request or default_review_states)
     key = "%s_review_state" % self.form_id
     state_id = self.request.form.get(key, self.default_review_state)
     states = [r for r in self.review_states if r['id'] == state_id]
     if not states:
         logger.error("%s.review_states does not contains id='%s'." %
                      (self, state_id))
         return None
     review_state = states[0] if states else self.review_states[0]
     # set selected state into the request
     self.request['%s_review_state' % self.form_id] = review_state['id']
     return review_state
コード例 #23
0
ファイル: idserver.py プロジェクト: xispa/bika.lims
def generateUniqueId(context, **kw):
    """ Generate pretty content IDs.
    """

    # get the config for this portal type from the system setup
    config = get_config(context, **kw)

    # get the variables map for later string interpolation
    variables = get_variables(context, **kw)

    # The new generate sequence number
    number = 0

    # get the sequence type from the global config
    sequence_type = config.get("sequence_type", "generated")

    # Sequence Type is "Counter", so we use the length of the backreferences or
    # contained objects of the evaluated "context" defined in the config
    if sequence_type == 'counter':
        number = get_counted_number(context, config, variables, **kw)

    # Sequence Type is "Generated", so the ID is constructed according to the
    # configured split length
    if sequence_type == 'generated':
        number = get_generated_number(context, config, variables, **kw)

    # store the new sequence number to the variables map for str interpolation
    variables["seq"] = number

    # The ID formatting template from user config, e.g. {sampleId}-R{seq:02d}
    id_template = config.get("form", "")

    # Interpolate the ID template
    try:
        new_id = id_template.format(**variables)
    except KeyError, e:
        logger.error('KeyError: {} not in id_template {}'.format(
            e, id_template))
        raise 
コード例 #24
0
ファイル: catalog_utilities.py プロジェクト: xispa/bika.lims
def _addIndex(catalog, index, indextype):
    """
    This function indexes the index element into the catalog if it isn't yet.
    :catalog: a catalog object
    :index: an index id as string
    :indextype: the type of the index as string
    :returns: a boolean as True if the element has been indexed and it returns
    False otherwise.
    """
    if index not in catalog.indexes():
        try:
            if indextype == 'ZCTextIndex':
                addZCTextIndex(catalog, index)
            else:
                catalog.addIndex(index, indextype)
            logger.info('Catalog index %s added to %s.' % (index, catalog.id))
            return True
        except:
            logger.error(
                'Catalog index %s error while adding to %s.'
                % (index, catalog.id))
    return False
コード例 #25
0
ファイル: api.py プロジェクト: andersonsmith/bika.lims
def get_review_history(brain_or_object, rev=True):
    """Get the review history for the given brain or context.

    :param brain_or_object: A single catalog brain or content object
    :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
    :returns: Workflow history
    :rtype: [{}, ...]
    """
    obj = get_object(brain_or_object)
    review_history = []
    try:
        workflow = get_tool("portal_workflow")
        review_history = workflow.getInfoFor(obj, 'review_history')
    except WorkflowException as e:
        message = str(e)
        logger.error("Cannot retrieve review_history on {}: {}".format(
            obj, message))
    if not isinstance(review_history, (list, tuple)):
        logger.error("get_review_history: expected list, recieved {}".format(
            review_history))
        review_history = []
    if rev is True:
        review_history.reverse()
    return review_history
コード例 #26
0
    def __call__(self):
        # Do generic bika.lims stuff
        BaseClass.__call__(self)
        # Do bika-health specific actions when submit
        action = BaseClass._get_form_workflow_action(self)
        addPortalMessage = self.context.plone_utils.addPortalMessage
        if action[0] == 'submit' and isActive(self.context):
            inpanicanalyses = []
            workflow = getToolByName(self.context, 'portal_workflow')
            translate = self.context.translate
            rc = getToolByName(self.context, REFERENCE_CATALOG)
            uc = getToolByName(self.context, 'uid_catalog')
            # retrieve the results from database and check if
            # the values are exceeding panic levels
            alerts = {}
            for uid in self.request.form['Result'][0].keys():
                analysis = rc.lookupObject(uid)
                analysis = analysis.getObject() if hasattr(
                    analysis, 'getObject') else analysis
                if not analysis:
                    continue
                astate = workflow.getInfoFor(analysis, 'review_state')
                if astate == 'retracted':
                    continue
                alerts.update(ResultOutOfRange(analysis)())
            if alerts:
                message = translate(
                    _('Some results exceeded the '
                      'panic levels that may '
                      'indicate an imminent '
                      'life-threatening condition'))
                addPortalMessage(message, 'warning')
                self.request.response.redirect(self.context.absolute_url())

                # If panic levels alert email enabled, send an email to
                # labmanagers
                bs = self.context.bika_setup
                if hasattr(bs, 'getEnablePanicAlert') \
                        and bs.getEnablePanicAlert():
                    laboratory = self.context.bika_setup.laboratory
                    lab_address = "<br/>".join(laboratory.getPrintAddress())
                    managers = self.context.portal_groups.getGroupMembers(
                        'LabManagers')
                    mime_msg = MIMEMultipart('related')
                    mime_msg['Subject'] = _("Panic alert")
                    mime_msg['From'] = formataddr(
                        (encode_header(laboratory.getName()),
                         laboratory.getEmailAddress()))
                    to = []
                    for manager in managers:
                        user = self.portal.acl_users.getUser(manager)
                        uemail = user.getProperty('email')
                        ufull = user.getProperty('fullname')
                        to.append(formataddr((encode_header(ufull), uemail)))
                    mime_msg['To'] = ','.join(to)
                    strans = []
                    for analysis_uid, alertlist in alerts:
                        analysis = uc(analysis_uid).getObject()
                        for alert in alertlist:
                            strans.append("- {0}, {1}: {2}".format(
                                analysis.getService().Title(),
                                translate(_("Result")), analysis.getResult()))
                    stran = "<br/>".join(strans)
                    text = translate(
                        _(
                            "Some results from ${items} exceeded the panic levels "
                            "that may indicate an imminent life-threatening "
                            "condition: <br/><br/>{analysisresults}<br/><br/>"
                            "<b>Please, check the Analysis Request if you "
                            "want to re-test the analysis or immediately "
                            "alert the client.</b><br/><br/>{lab_address}",
                            mapping={
                                'items': self.context.getId(),
                                'analysisresults': stran,
                                'lab_address': lab_address
                            }))
                    msg_txt = MIMEText(safe_unicode(text).encode('utf-8'),
                                       _subtype='html')
                    mime_msg.preamble = 'This is a multi-part MIME message.'
                    mime_msg.attach(msg_txt)
                    try:
                        host = getToolByName(self.context, 'MailHost')
                        host.send(mime_msg.as_string(), immediate=True)
                    except Exception as msg:
                        ar = inpanicanalyses[0].getRequestID()
                        logger.error("Panic level email %s: %s" %
                                     (ar, str(msg)))
                        message = self.context.translate(
                            _('Unable to send an email to alert lab '
                              'managers that some analyses exceeded the '
                              'panic levels') + (": %s" % str(msg)))
                        self.context.plone_utils.addPortalMessage(
                            message, 'warning')
コード例 #27
0
ファイル: __init__.py プロジェクト: mstroehle/senaite.core
def doActionFor(instance, action_id, idxs=None):
    """Tries to perform the transition to the instance.
    Object is reindexed after the transition takes place, but only if succeeds.
    If idxs is set, only these indexes will be reindexed. Otherwise, will try
    to use the indexes defined in ACTIONS_TO_INDEX mapping if any.
    :param instance: Object to be transitioned
    :param action_id: transition id
    :param idxs: indexes to be reindexed after the transition
    :returns: True if the transition has been performed, together with message
    :rtype: tuple (bool,str)
    """
    if not instance:
        return False, ""

    if isinstance(instance, list):
        # TODO Workflow . Check if this is strictly necessary
        # This check is here because sometimes Plone creates a list
        # from submitted form elements.
        logger.warn("Got a list of obj in doActionFor!")
        if len(instance) > 1:
            logger.warn(
                "doActionFor is getting an instance parameter which is a list "
                "with more than one item. Instance: '{}', action_id: '{}'"
                .format(instance, action_id)
            )

        return doActionFor(instance=instance[0], action_id=action_id, idxs=idxs)

    # Since a given transition can cascade or promote to other objects, we want
    # to reindex all objects for which the transition succeed at once, at the
    # end of process. Otherwise, same object will be reindexed multiple times
    # unnecessarily. Also, ActionsHandlerPool ensures the same transition is not
    # applied twice to the same object due to cascade/promote recursions.
    pool = ActionHandlerPool.get_instance()
    if pool.succeed(instance, action_id):
        return False, "Transition {} for {} already done"\
             .format(action_id, instance.getId())

    # Return False if transition is not permitted
    if not isTransitionAllowed(instance, action_id):
        return False, "Transition {} for {} is not allowed"\
            .format(action_id, instance.getId())

    # Add this batch process to the queue
    pool.queue_pool()
    succeed = False
    message = ""
    workflow = getToolByName(instance, "portal_workflow")
    try:
        workflow.doActionFor(instance, action_id)
        succeed = True
    except WorkflowException as e:
        message = str(e)
        curr_state = getCurrentState(instance)
        clazz_name = instance.__class__.__name__
        logger.warning(
            "Transition '{0}' not allowed: {1} '{2}' ({3})"\
            .format(action_id, clazz_name, instance.getId(), curr_state))
        logger.error(message)

    # If no indexes to reindex have been defined, try to use those defined in
    # the ACTIONS_TO_INDEXES mapping. Reindexing only those indexes that might
    # be affected by the transition boosts the overall performance!.
    if idxs is None:
        portal_type = instance.portal_type
        idxs = ACTIONS_TO_INDEXES.get(portal_type, {}).get(action_id, [])

    # Add the current object to the pool and resume
    pool.push(instance, action_id, succeed, idxs=idxs)
    pool.resume()

    return succeed, message
コード例 #28
0
ファイル: reflexrule.py プロジェクト: xispa/bika.lims
def doActionToAnalysis(base, action):
    """
    This functions executes the action against the analysis.
    :base: a full analysis object. The new analyses will be cloned from it.
    :action: a dictionary representing an action row.
        [{'action': 'duplicate', ...}, {,}, ...]
    :returns: the new analysis
    """
    # If the analysis has been retracted yet, just duplicate it
    workflow = getToolByName(base, "portal_workflow")
    state = workflow.getInfoFor(base, 'review_state')
    action_rule_name = ''
    if action.get('action', '') == 'setvisibility':
        action_rule_name = 'Visibility set'
        target_analysis = action.get('setvisibilityof', '')
        if target_analysis == "original":
            analysis = base
        else:
            analysis = _fetch_analysis_for_local_id(base, target_analysis)
    elif action.get('action', '') == 'repeat' and state != 'retracted':
        # Repeat an analysis consist on cancel it and then create a new
        # analysis with the same analysis service used for the canceled
        # one (always working with the same sample). It'll do a retract
        # action
        doActionFor(base, 'retract')
        analysis = base.aq_parent.getAnalyses(
            sort_on='created')[-1].getObject()
        action_rule_name = 'Repeated'
        analysis.setResult('')
    elif action.get('action', '') == 'duplicate' or state == 'retracted':
        analysis = duplicateAnalysis(base)
        action_rule_name = 'Duplicated'
        analysis.setResult('')
    elif action.get('action', '') == 'setresult':
        target_analysis = action.get('setresulton', '')
        action_rule_name = 'Result set'
        result_value = action['setresultdiscrete'] if \
            action.get('setresultdiscrete', '') else action['setresultvalue']
        if target_analysis == 'original':
            original = base.getOriginalReflexedAnalysis()
            analysis = original
            original.setResult(result_value)
        elif target_analysis == 'new':
            # Create a new analysis
            analysis = duplicateAnalysis(base)
            analysis.setResult(result_value)
            doActionFor(analysis, 'submit')
    else:
        logger.error(
            "Not known Reflex Rule action %s." % (action.get('action', '')))
        return 0
    analysis.setReflexRuleAction(action.get('action', ''))
    analysis.setIsReflexAnalysis(True)
    analysis.setReflexAnalysisOf(base)
    analysis.setReflexRuleActionsTriggered(
        base.getReflexRuleActionsTriggered()
    )
    if action.get('showinreport', '') == "invisible":
        analysis.setHidden(True)
    elif action.get('showinreport', '') == "visible":
        analysis.setHidden(False)
    # Setting the original reflected analysis
    if base.getOriginalReflexedAnalysis():
        analysis.setOriginalReflexedAnalysis(
            base.getOriginalReflexedAnalysis())
    else:
        analysis.setOriginalReflexedAnalysis(base)
    analysis.setReflexRuleLocalID(action.get('an_result_id', ''))
    # Setting the remarks to base analysis
    time = datetime.now().strftime('%Y-%m-%d %H:%M')
    rule_num = action.get('rulenumber', 0)
    rule_name = action.get('rulename', '')
    base_remark = "Reflex rule number %s of '%s' applied at %s." % \
        (rule_num, rule_name, time)
    base_remark = base.getRemarks() + base_remark + '||'
    base.setRemarks(base_remark)
    # Setting the remarks to new analysis
    analysis_remark = "%s due to reflex rule number %s of '%s' at %s" % \
        (action_rule_name, rule_num, rule_name, time)
    analysis_remark = analysis.getRemarks() + analysis_remark + '||'
    analysis.setRemarks(analysis_remark)
    return analysis
コード例 #29
0
ファイル: idserver.py プロジェクト: xispa/bika.lims
def get_variables(context, **kw):
    """Prepares a dictionary of key->value pairs usable for ID formatting
    """

    # allow portal_type override
    portal_type = kw.get("portal_type") or api.get_portal_type(context)

    # The variables map hold the values that might get into the constructed id
    variables = {
        'context': context,
        'id': api.get_id(context),
        'portal_type': portal_type,
        'year': get_current_year(),
        'parent': api.get_parent(context),
        'seq': 0,
    }

    # Augment the variables map depending on the portal type
    if portal_type == "AnalysisRequest":
        variables.update({
            'sampleId': context.getSample().getId(),
            'sample': context.getSample(),
        })

    elif portal_type == "SamplePartition":
        variables.update({
            'sampleId': context.aq_parent.getId(),
            'sample': context.aq_parent,
        })

    elif portal_type == "Sample":
        # get the prefix of the assigned sample type
        sample_id = context.getId()
        sample_type = context.getSampleType()
        sampletype_prefix = sample_type.getPrefix()

        date_now = DateTime()
        sampling_date = context.getSamplingDate()
        date_sampled = context.getDateSampled()

        # Try to get the date sampled and sampling date
        if sampling_date:
            samplingDate = DT2dt(sampling_date)
        else:
            # No Sample Date?
            logger.error("Sample {} has no sample date set".format(sample_id))
            # fall back to current date
            samplingDate = DT2dt(date_now)

        if date_sampled:
            dateSampled = DT2dt(date_sampled)
        else:
            # No Sample Date?
            logger.error("Sample {} has no sample date set".format(sample_id))
            dateSampled = DT2dt(date_now)

        variables.update({
            'clientId': context.aq_parent.getClientID(),
            'dateSampled': dateSampled,
            'samplingDate': samplingDate,
            'sampleType': sampletype_prefix,
        })

    return variables
コード例 #30
0
ファイル: bika_listing.py プロジェクト: fqblab/bika.lims
    def _process_request(self):
        """Scan request for parameters and configure class attributes
        accordingly.  Setup AdvancedQuery or catalog contentFilter.

        Request parameters:
        <form_id>_limit_from:       index of the first item to display
        <form_id>_rows_only:        returns only the rows
        <form_id>_sort_on:          list items are sorted on this key
        <form_id>_manual_sort_on:   no index - sort with python
        <form_id>_pagesize:         number of items
        <form_id>_filter:           A string, will be regex matched against
                                    indexes in <form_id>_filter_indexes
        <form_id>_filter_indexes:   list of index names which will be searched
                                    for the value of <form_id>_filter

        <form_id>_<index_name>:     Any index name can be used after <form_id>_.

            any request variable named ${form_id}_{index_name} will pass it's
            value to that index in self.contentFilter.

            All conditions using ${form_id}_{index_name} are searched with AND.

            The parameter value will be matched with regexp if a FieldIndex or
            TextIndex.  Else, AdvancedQuery.Generic is used.
        """
        form_id = self.form_id
        form = self.request.form
        workflow = getToolByName(self.context, 'portal_workflow')
        catalog = getToolByName(self.context, self.catalog)

        # Some ajax calls duplicate form values?  I have not figured out why!
        if self.request.form:
            for key, value in self.request.form.items():
                if isinstance(value, list):
                    self.request.form[key] = self.request.form[key][0]

        # If table_only specifies another form_id, then we abort.
        # this way, a single table among many can request a redraw,
        # and only it's content will be rendered.
        if form_id not in self.request.get('table_only', form_id) \
            or form_id not in self.request.get('rows_only', form_id):
            return ''

        self.rows_only = self.request.get('rows_only','') == form_id
        self.limit_from = int(self.request.get(form_id + '_limit_from',0))

        # contentFilter is allowed in every self.review_state.
        for k, v in self.review_state.get('contentFilter', {}).items():
            self.contentFilter[k] = v

        # sort on
        self.sort_on = self.sort_on \
            if hasattr(self, 'sort_on') and self.sort_on \
            else None
        self.sort_on = self.request.get(form_id + '_sort_on', self.sort_on)
        self.sort_order = self.request.get(form_id + '_sort_order', 'ascending')
        self.manual_sort_on = self.request.get(form_id + '_manual_sort_on', None)

        if self.sort_on:
            if self.sort_on in self.columns.keys():
               if self.columns[self.sort_on].get('index', None):
                   self.request.set(form_id+'_sort_on', self.sort_on)
                   # The column can be sorted directly using an index
                   idx = self.columns[self.sort_on]['index']
                   self.sort_on = idx
                   # Don't sort manually!
                   self.manual_sort_on = None
               else:
                   # The column must be manually sorted using python
                   self.manual_sort_on = self.sort_on
            else:
                # We cannot sort for a column that doesn't exist!
                msg = "{}: sort_on is '{}', not a valid column".format(
                    self, self.sort_on)
                logger.error(msg)
                self.sort_on = None

        if self.manual_sort_on:
            self.manual_sort_on = self.manual_sort_on[0] \
                                if type(self.manual_sort_on) in (list, tuple) \
                                else self.manual_sort_on
            if self.manual_sort_on not in self.columns.keys():
                # We cannot sort for a column that doesn't exist!
                msg = "{}: manual_sort_on is '{}', not a valid column".format(
                    self, self.manual_sort_on)
                logger.error(msg)
                self.manual_sort_on = None

        if self.sort_on or self.manual_sort_on:
            # By default, if sort_on is set, sort the items ASC
            # Trick to allow 'descending' keyword instead of 'reverse'
            self.sort_order = 'reverse' if self.sort_order \
                                        and self.sort_order[0] in ['d','r'] \
                                        else 'ascending'
        else:
            # By default, sort on created
            self.sort_order = 'reverse'
            self.sort_on = 'created'

        self.contentFilter['sort_order'] = self.sort_order
        if self.sort_on:
            self.contentFilter['sort_on'] = self.sort_on

        # pagesize
        pagesize = self.request.get(form_id + '_pagesize', self.pagesize)
        if type(pagesize) in (list, tuple):
            pagesize = pagesize[0]
        try:
            pagesize = int(pagesize)
        except:
            pagesize = self.pagesize = 10
        self.pagesize = pagesize
        # Plone's batching wants this variable:
        self.request.set('pagesize', self.pagesize)
        # and we want to make our choice remembered in bika_listing also
        self.request.set(self.form_id + '_pagesize', self.pagesize)

        # index filters.
        self.And = []
        self.Or = []
        ##logger.info("contentFilter: %s"%self.contentFilter)
        for k, v in self.columns.items():
            if not v.has_key('index') \
               or v['index'] == 'review_state' \
               or v['index'] in self.filter_indexes:
                continue
            self.filter_indexes.append(v['index'])
        ##logger.info("Filter indexes: %s"%self.filter_indexes)

        # any request variable named ${form_id}_{index_name}
        # will pass it's value to that index in self.contentFilter.
        # all conditions using ${form_id}_{index_name} are searched with AND
        for index in self.filter_indexes:
            idx = catalog.Indexes.get(index, None)
            if not idx:
                logger.debug("index named '%s' not found in %s.  "
                             "(Perhaps the index is still empty)." %
                            (index, self.catalog))
                continue
            request_key = "%s_%s" % (form_id, index)
            value = self.request.get(request_key, '')
            if len(value) > 1:
                ##logger.info("And: %s=%s"%(index, value))
                if idx.meta_type in('ZCTextIndex', 'FieldIndex'):
                    self.And.append(MatchRegexp(index, value))
                elif idx.meta_type == 'DateIndex':
                    logger.info("Unhandled DateIndex search on '%s'"%index)
                    continue
                else:
                    self.Or.append(Generic(index, value))

        # if there's a ${form_id}_filter in request, then all indexes
        # are are searched for it's value.
        # ${form_id}_filter is searched with OR agains all indexes
        request_key = "%s_filter" % form_id
        value = self.request.get(request_key, '')
        if type(value) in (list, tuple):
            value = value[0]
        if len(value) > 1:
            for index in self.filter_indexes:
                idx = catalog.Indexes.get(index, None)
                if not idx:
                    logger.debug("index named '%s' not found in %s.  "
                                 "(Perhaps the index is still empty)." %
                                 (index, self.catalog))
                    continue
                ##logger.info("Or: %s=%s"%(index, value))
                if idx.meta_type in('ZCTextIndex', 'FieldIndex'):
                    self.Or.append(MatchRegexp(index, value))
                    self.expand_all_categories = True
                    # https://github.com/bikalabs/Bika-LIMS/issues/1069
                    vals = value.split('-')
                    if len(vals) > 2:
                        valroot = vals[0]
                        for i in range(1, len(vals)):
                            valroot = '%s-%s' % (valroot, vals[i])
                            self.Or.append(MatchRegexp(index, valroot+'-*'))
                            self.expand_all_categories = True
                elif idx.meta_type == 'DateIndex':
                    if type(value) in (list, tuple):
                        value = value[0]
                    if value.find(":") > -1:
                        try:
                            lohi = [DateTime(x) for x in value.split(":")]
                        except:
                            logger.info("Error (And, DateIndex='%s', term='%s')"%(index,value))
                        self.Or.append(Between(index, lohi[0], lohi[1]))
                        self.expand_all_categories = True
                    else:
                        try:
                            self.Or.append(Eq(index, DateTime(value)))
                            self.expand_all_categories = True
                        except:
                            logger.info("Error (Or, DateIndex='%s', term='%s')"%(index,value))
                else:
                    self.Or.append(Generic(index, value))
                    self.expand_all_categories = True
            self.Or.append(MatchRegexp('review_state', value))

        # get toggle_cols cookie value
        # and modify self.columns[]['toggle'] to match.
        toggle_cols = self.get_toggle_cols()
        for col in self.columns.keys():
            if col in toggle_cols:
                self.columns[col]['toggle'] = True
            else:
                self.columns[col]['toggle'] = False
コード例 #31
0
ファイル: idserver.py プロジェクト: scottwedge/LIMSCUA
def get_variables(context, **kw):
    """Prepares a dictionary of key->value pairs usable for ID formatting
    """

    # allow portal_type override
    portal_type = kw.get("portal_type") or api.get_portal_type(context)

    # The variables map hold the values that might get into the construced id
    variables = {
        'context': context,
        'id': api.get_id(context),
        'portal_type': portal_type,
        'year': get_current_year(),
        'parent': api.get_parent(context),
        'seq': 0,
    }

    # Augment the variables map depending on the portal type
    if portal_type == "AnalysisRequest":
        variables.update({
            'sampleId': context.getSample().getId(),
            'sample': context.getSample(),
        })

    elif portal_type == "SamplePartition":
        variables.update({
            'sampleId': context.aq_parent.getId(),
            'sample': context.aq_parent,
        })

    elif portal_type == "Sample":
        # get the prefix of the assigned sample type
        sample_id = context.getId()
        sample_type = context.getSampleType()
        sampletype_prefix = sample_type.getPrefix()

        date_now = DateTime()
        sampling_date = context.getSamplingDate()
        date_sampled = context.getDateSampled()

        # Try to get the date sampled and sampling date
        if sampling_date:
            samplingDate = DT2dt(sampling_date)
        else:
            # No Sample Date?
            logger.error("Sample {} has no sample date set".format(sample_id))
            # fall back to current date
            samplingDate = DT2dt(date_now)

        if date_sampled:
            dateSampled = DT2dt(date_sampled)
        else:
            # No Sample Date?
            logger.error("Sample {} has no sample date set".format(sample_id))
            dateSampled = DT2dt(date_now)

        variables.update({
            'clientId': context.aq_parent.getClientID(),
            'dateSampled': dateSampled,
            'samplingDate': samplingDate,
            'sampleType': sampletype_prefix,
        })

    return variables
コード例 #32
0
def doActionToAnalysis(base, action):
    """
    This functions executes the action against the analysis.
    :base: a full analysis object. The new analyses will be cloned from it.
    :action: a dictionary representing an action row.
        [{'action': 'duplicate', ...}, {,}, ...]
    :returns: the new analysis
    """
    # If the analysis has been retracted yet, just duplicate it
    workflow = getToolByName(base, "portal_workflow")
    state = workflow.getInfoFor(base, 'review_state')
    action_rule_name = ''
    if action.get('action', '') == 'setvisibility':
        action_rule_name = 'Visibility set'
        target_analysis = action.get('setvisibilityof', '')
        if target_analysis == "original":
            analysis = base
        else:
            analysis = _fetch_analysis_for_local_id(base, target_analysis)
    elif action.get('action', '') == 'repeat' and state != 'retracted':
        # Repeat an analysis consist on cancel it and then create a new
        # analysis with the same analysis service used for the canceled
        # one (always working with the same sample). It'll do a retract
        # action
        doActionFor(base, 'retract')
        analysis = base.aq_parent.getAnalyses(
            sort_on='created')[-1].getObject()
        action_rule_name = 'Repeated'
        analysis.setResult('')
    elif action.get('action', '') == 'duplicate' or state == 'retracted':
        analysis = duplicateAnalysis(base)
        action_rule_name = 'Duplicated'
        analysis.setResult('')
    elif action.get('action', '') == 'setresult':
        target_analysis = action.get('setresulton', '')
        action_rule_name = 'Result set'
        result_value = action['setresultdiscrete'] if \
            action.get('setresultdiscrete', '') else action['setresultvalue']
        if target_analysis == 'original':
            original = base.getOriginalReflexedAnalysis()
            analysis = original
            original.setResult(result_value)
        elif target_analysis == 'new':
            # Create a new analysis
            analysis = duplicateAnalysis(base)
            analysis.setResult(result_value)
            doActionFor(analysis, 'submit')
    else:
        logger.error("Not known Reflex Rule action %s." %
                     (action.get('action', '')))
        return 0
    analysis.setReflexRuleAction(action.get('action', ''))
    analysis.setIsReflexAnalysis(True)
    analysis.setReflexAnalysisOf(base)
    analysis.setReflexRuleActionsTriggered(
        base.getReflexRuleActionsTriggered())
    if action.get('showinreport', '') == "invisible":
        analysis.setHidden(True)
    elif action.get('showinreport', '') == "visible":
        analysis.setHidden(False)
    # Setting the original reflected analysis
    if base.getOriginalReflexedAnalysis():
        analysis.setOriginalReflexedAnalysis(
            base.getOriginalReflexedAnalysis())
    else:
        analysis.setOriginalReflexedAnalysis(base)
    analysis.setReflexRuleLocalID(action.get('an_result_id', ''))
    # Setting the remarks to base analysis
    time = datetime.now().strftime('%Y-%m-%d %H:%M')
    rule_num = action.get('rulenumber', 0)
    rule_name = action.get('rulename', '')
    base_remark = "Reflex rule number %s of '%s' applied at %s." % \
        (rule_num, rule_name, time)
    base_remark = base.getRemarks() + base_remark + '||'
    base.setRemarks(base_remark)
    # Setting the remarks to new analysis
    analysis_remark = "%s due to reflex rule number %s of '%s' at %s" % \
        (action_rule_name, rule_num, rule_name, time)
    analysis_remark = analysis.getRemarks() + analysis_remark + '||'
    analysis.setRemarks(analysis_remark)
    return analysis
コード例 #33
0
ファイル: bika_listing.py プロジェクト: NovaVic/bika.lims
    def _process_request(self):
        """Scan request for parameters and configure class attributes
        accordingly.  Setup AdvancedQuery or catalog contentFilter.

        Request parameters:
        <form_id>_limit_from:       index of the first item to display
        <form_id>_rows_only:        returns only the rows
        <form_id>_sort_on:          list items are sorted on this key
        <form_id>_manual_sort_on:   no index - sort with python
        <form_id>_pagesize:         number of items
        <form_id>_filter:           A string, will be regex matched against
                                    indexes in <form_id>_filter_indexes
        <form_id>_filter_indexes:   list of index names which will be searched
                                    for the value of <form_id>_filter

        <form_id>_<index_name>:     Any index name can be used after <form_id>_.

            any request variable named ${form_id}_{index_name} will pass it's
            value to that index in self.contentFilter.

            All conditions using ${form_id}_{index_name} are searched with AND.

            The parameter value will be matched with regexp if a FieldIndex or
            TextIndex.  Else, AdvancedQuery.Generic is used.
        """
        form_id = self.form_id
        form = self.request.form
        workflow = getToolByName(self.context, 'portal_workflow')
        catalog = getToolByName(self.context, self.catalog)

        # Some ajax calls duplicate form values?  I have not figured out why!
        if self.request.form:
            for key, value in self.request.form.items():
                if isinstance(value, list):
                    self.request.form[key] = self.request.form[key][0]

        # If table_only specifies another form_id, then we abort.
        # this way, a single table among many can request a redraw,
        # and only it's content will be rendered.
        if form_id not in self.request.get('table_only', form_id) \
            or form_id not in self.request.get('rows_only', form_id):
            return ''

        self.rows_only = self.request.get('rows_only', '') == form_id
        self.limit_from = int(self.request.get(form_id + '_limit_from', 0))

        # contentFilter is allowed in every self.review_state.
        for k, v in self.review_state.get('contentFilter', {}).items():
            self.contentFilter[k] = v

        # sort on
        self.sort_on = self.sort_on \
            if hasattr(self, 'sort_on') and self.sort_on \
            else None
        self.sort_on = self.request.get(form_id + '_sort_on', self.sort_on)
        self.sort_order = self.request.get(form_id + '_sort_order',
                                           'ascending')
        self.manual_sort_on = self.request.get(form_id + '_manual_sort_on',
                                               None)

        if self.sort_on:
            if self.sort_on in self.columns.keys():
                if self.columns[self.sort_on].get('index', None):
                    self.request.set(form_id + '_sort_on', self.sort_on)
                    # The column can be sorted directly using an index
                    idx = self.columns[self.sort_on]['index']
                    self.sort_on = idx
                    # Don't sort manually!
                    self.manual_sort_on = None
                else:
                    # The column must be manually sorted using python
                    self.manual_sort_on = self.sort_on
            else:
                # We cannot sort for a column that doesn't exist!
                msg = "{}: sort_on is '{}', not a valid column".format(
                    self, self.sort_on)
                logger.error(msg)
                self.sort_on = None

        if self.manual_sort_on:
            self.manual_sort_on = self.manual_sort_on[0] \
                                if type(self.manual_sort_on) in (list, tuple) \
                                else self.manual_sort_on
            if self.manual_sort_on not in self.columns.keys():
                # We cannot sort for a column that doesn't exist!
                msg = "{}: manual_sort_on is '{}', not a valid column".format(
                    self, self.manual_sort_on)
                logger.error(msg)
                self.manual_sort_on = None

        if self.sort_on or self.manual_sort_on:
            # By default, if sort_on is set, sort the items ASC
            # Trick to allow 'descending' keyword instead of 'reverse'
            self.sort_order = 'reverse' if self.sort_order \
                                        and self.sort_order[0] in ['d','r'] \
                                        else 'ascending'
        else:
            # By default, sort on created
            self.sort_order = 'reverse'
            self.sort_on = 'created'

        self.contentFilter['sort_order'] = self.sort_order
        if self.sort_on:
            self.contentFilter['sort_on'] = self.sort_on

        # pagesize
        pagesize = self.request.get(form_id + '_pagesize', self.pagesize)
        if type(pagesize) in (list, tuple):
            pagesize = pagesize[0]
        try:
            pagesize = int(pagesize)
        except:
            pagesize = self.pagesize = 10
        self.pagesize = pagesize
        # Plone's batching wants this variable:
        self.request.set('pagesize', self.pagesize)
        # and we want to make our choice remembered in bika_listing also
        self.request.set(self.form_id + '_pagesize', self.pagesize)

        # index filters.
        self.And = []
        self.Or = []
        ##logger.info("contentFilter: %s"%self.contentFilter)
        for k, v in self.columns.items():
            if not v.has_key('index') \
               or v['index'] == 'review_state' \
               or v['index'] in self.filter_indexes:
                continue
            self.filter_indexes.append(v['index'])
        ##logger.info("Filter indexes: %s"%self.filter_indexes)

        # any request variable named ${form_id}_{index_name}
        # will pass it's value to that index in self.contentFilter.
        # all conditions using ${form_id}_{index_name} are searched with AND
        for index in self.filter_indexes:
            idx = catalog.Indexes.get(index, None)
            if not idx:
                logger.debug("index named '%s' not found in %s.  "
                             "(Perhaps the index is still empty)." %
                             (index, self.catalog))
                continue
            request_key = "%s_%s" % (form_id, index)
            value = self.request.get(request_key, '')
            if len(value) > 1:
                ##logger.info("And: %s=%s"%(index, value))
                if idx.meta_type in ('ZCTextIndex', 'FieldIndex'):
                    self.And.append(MatchRegexp(index, value))
                elif idx.meta_type == 'DateIndex':
                    logger.info("Unhandled DateIndex search on '%s'" % index)
                    continue
                else:
                    self.Or.append(Generic(index, value))

        # if there's a ${form_id}_filter in request, then all indexes
        # are are searched for it's value.
        # ${form_id}_filter is searched with OR agains all indexes
        request_key = "%s_filter" % form_id
        value = self.request.get(request_key, '')
        if type(value) in (list, tuple):
            value = value[0]
        if len(value) > 1:
            for index in self.filter_indexes:
                idx = catalog.Indexes.get(index, None)
                if not idx:
                    logger.debug("index named '%s' not found in %s.  "
                                 "(Perhaps the index is still empty)." %
                                 (index, self.catalog))
                    continue
                ##logger.info("Or: %s=%s"%(index, value))
                if idx.meta_type in ('ZCTextIndex', 'FieldIndex'):
                    self.Or.append(MatchRegexp(index, value))
                    self.expand_all_categories = True
                    # https://github.com/bikalabs/Bika-LIMS/issues/1069
                    vals = value.split('-')
                    if len(vals) > 2:
                        valroot = vals[0]
                        for i in range(1, len(vals)):
                            valroot = '%s-%s' % (valroot, vals[i])
                            self.Or.append(MatchRegexp(index, valroot + '-*'))
                            self.expand_all_categories = True
                elif idx.meta_type == 'DateIndex':
                    if type(value) in (list, tuple):
                        value = value[0]
                    if value.find(":") > -1:
                        try:
                            lohi = [DateTime(x) for x in value.split(":")]
                        except:
                            logger.info(
                                "Error (And, DateIndex='%s', term='%s')" %
                                (index, value))
                        self.Or.append(Between(index, lohi[0], lohi[1]))
                        self.expand_all_categories = True
                    else:
                        try:
                            self.Or.append(Eq(index, DateTime(value)))
                            self.expand_all_categories = True
                        except:
                            logger.info(
                                "Error (Or, DateIndex='%s', term='%s')" %
                                (index, value))
                else:
                    self.Or.append(Generic(index, value))
                    self.expand_all_categories = True
            self.Or.append(MatchRegexp('review_state', value))

        # get toggle_cols cookie value
        # and modify self.columns[]['toggle'] to match.
        toggle_cols = self.get_toggle_cols()
        for col in self.columns.keys():
            if col in toggle_cols:
                self.columns[col]['toggle'] = True
            else:
                self.columns[col]['toggle'] = False
コード例 #34
0
ファイル: __init__.py プロジェクト: zhangjian0111/bika.health
    def Import(self):
        folder = self.context.patients
        rows = self.get_rows(3)
        for row in rows:
            if not row['Firstname'] or not row['PrimaryReferrer']:
                continue
            pc = getToolByName(self.context, 'portal_catalog')
            client = pc(portal_type='Client', Title=row['PrimaryReferrer'])
            if len(client) == 0:
                error = "Primary referrer invalid: '%s'. Patient '%s %s' will not be uploaded"
                logger.error(error, row['PrimaryReferrer'], row['Firstname'],
                             row.get('Surname', ''))
                continue

            client = client[0].getObject()

            # Getting an existing ethnicity
            bsc = getToolByName(self.context, 'bika_setup_catalog')
            ethnicity = bsc(portal_type='Ethnicity',
                            Title=row.get('Ethnicity', ''))
            if len(ethnicity) == 0:
                raise IndexError("Invalid ethnicity: '%s'" % row['Ethnicity'])
            ethnicity = ethnicity[0].getObject()

            _id = folder.invokeFactory('Patient', id=tmpID())
            obj = folder[_id]
            obj.unmarkCreationFlag()
            renameAfterCreation(obj)
            Fullname = (row['Firstname'] + " " +
                        row.get('Surname', '')).strip()
            obj.edit(
                PatientID=row.get('PatientID'),
                title=Fullname,
                ClientPatientID=row.get('ClientPatientID', ''),
                Salutation=row.get('Salutation', ''),
                Firstname=row.get('Firstname', ''),
                Surname=row.get('Surname', ''),
                PrimaryReferrer=client.UID(),
                Gender=row.get('Gender', 'dk'),
                Age=row.get('Age', ''),
                BirthDate=row.get('BirthDate', ''),
                BirthDateEstimated=self.to_bool(
                    row.get('BirthDateEstimated', 'False')),
                BirthPlace=row.get('BirthPlace', ''),
                # TODO Ethnicity_Obj -> Ethnicity on health v319
                Ethnicity_Obj=ethnicity.UID(),
                Citizenship=row.get('Citizenship', ''),
                MothersName=row.get('MothersName', ''),
                CivilStatus=row.get('CivilStatus', ''),
                Anonymous=self.to_bool(row.get('Anonymous', 'False')))
            self.fill_contactfields(row, obj)
            self.fill_addressfields(row, obj)
            if 'Photo' in row and row['Photo']:
                try:
                    path = resource_filename(self.dataset_project,
                                             "setupdata/%s/%s" \
                                             % (self.dataset_name, row['Photo']))
                    file_data = open(path, "rb").read() if os.path.isfile(path) \
                        else open(path+'.jpg', "rb").read()
                    obj.setPhoto(file_data)
                except:
                    logger.error("Unable to load Photo %s" % row['Photo'])

            if 'Feature' in row and row['Feature']:
                try:
                    path = resource_filename(self.dataset_project,
                                             "setupdata/%s/%s" \
                                             % (self.dataset_name, row['Feature']))
                    file_data = open(path, "rb").read() if os.path.isfile(path) \
                        else open(path+'.pdf', "rb").read()
                    obj.setFeature(file_data)
                except:
                    logger.error("Unable to load Feature %s" % row['Feature'])

            obj.unmarkCreationFlag()
            transaction.savepoint(optimistic=True)
            if row.get('PatientID'):
                # To maintain the patient spreadsheet's IDs, we cannot do a 'renameaftercreation()'
                if obj.getPatientID() != row.get('PatientID'):
                    transaction.savepoint(optimistic=True)
                    obj.aq_inner.aq_parent.manage_renameObject(
                        obj.id, row.get('PatientID'))
            else:
                renameAfterCreation(obj)
コード例 #35
0
ファイル: __init__.py プロジェクト: xispa/bika.lims
def doActionFor(instance, action_id, active_only=True, allowed_transition=True):
    """Performs the transition (action_id) to the instance.

    The transition will only be triggered if the current state of the object
    allows the action_id passed in (delegate to isTransitionAllowed) and the
    instance hasn't been flagged as to be skipped previously.
    If active_only is set to True, the instance will only be transitioned if
    it's current state is active (not cancelled nor inactive)

    :param instance: Object to be transitioned
    :param action_id: transition id
    :param active_only: True if transition must apply to active objects
    :param allowed_transition: True for a allowed transition check
    :returns: true if the transition has been performed and message
    :rtype: list
    """
    actionperformed = False
    message = ''
    if isinstance(instance, list):
        # This check is here because sometimes Plone creates a list
        # from submitted form elements.
        if len(instance) > 1:
            logger.error(
                "doActionFor is getting an instance paramater which is alist  "
                "with more than one item. Instance: '{}', action_id: '{}'"
                .format(instance, action_id)
            )
        instance = instance[0]
    if not instance:
        return actionperformed, message

    workflow = getToolByName(instance, "portal_workflow")
    skipaction = skip(instance, action_id, peek=True)
    if skipaction:
        #clazzname = instance.__class__.__name__
        #msg = "Skipping transition '{0}': {1} '{2}'".format(action_id,
        #                                                    clazzname,
        #                                                    instance.getId())
        #logger.info(msg)
        return actionperformed, message

    if allowed_transition:
        allowed = isTransitionAllowed(instance, action_id, active_only)
        if not allowed:
            transitions = workflow.getTransitionsFor(instance)
            transitions = [trans['id'] for trans in transitions]
            transitions = ', '.join(transitions)
            currstate = getCurrentState(instance)
            clazzname = instance.__class__.__name__
            msg = "Transition '{0}' not allowed: {1} '{2}' ({3}). " \
                  "Available transitions: {4}".format(action_id, clazzname,
                                                      instance.getId(),
                                                      currstate, transitions)
            logger.warning(msg)
            _logTransitionFailure(instance, action_id)
            return actionperformed, message
    else:
        logger.warning(
            "doActionFor should never (ever) be called with allowed_transition"
            "set to True as it avoids permission checks.")
    try:
        workflow.doActionFor(instance, action_id)
        actionperformed = True
    except WorkflowException as e:
        message = str(e)
        logger.error(message)
    return actionperformed, message
コード例 #36
0
def remove_cascaded_analyses_of_root_samples(portal):
    """Removes Analyses from Root Samples that belong to Partitions

    https://github.com/senaite/senaite.core/issues/1504
    """
    logger.info("Removing cascaded analyses from Root Samples...")

    # Query all root Samples
    query = {
        "isRootAncestor": True,
        "sort_on": "created",
        "sort_order": "ascending",
    }
    root_samples = api.search(query, "bika_catalog_analysisrequest_listing")
    total = len(root_samples)
    logger.info("{} Samples to check... ".format(total))

    to_clean = []

    for num, brain in enumerate(root_samples):
        logger.debug("Checking Root Sample {}/{}".format(num+1, total))

        # No Partitions, continue...
        if not brain.getDescendantsUIDs:
            continue

        # get the root sample
        root_sample = api.get_object(brain)
        # get the contained analyses of the root sample
        root_analyses = root_sample.objectIds(spec=["Analysis"])

        # Mapping of cascaded Analysis -> Partition
        analysis_mapping = {}

        # check if a root analysis is located as well in one of the partitions
        for partition in root_sample.getDescendants():
            # get the contained analyses of the partition
            part_analyses = partition.objectIds(spec=["Analysis"])
            # filter analyses that cascade root analyses
            cascaded = filter(lambda an: an in root_analyses, part_analyses)
            # keep a mapping of analysis -> partition
            for analysis in cascaded:
                analysis_mapping[analysis] = partition

        if analysis_mapping:
            to_clean.append((root_sample, analysis_mapping))

    # count the cases for each condition
    case_counter = defaultdict(int)

    # cleanup cascaded analyses
    # mapping maps the analysis id -> partition
    for sample, mapping in to_clean:

        # go through the cascaded analyses and decide if the cascaded analysis
        # should be removed from (a) the root sample or (b) the partition.

        for analysis_id, partition in mapping.items():

            # analysis from the root sample
            root_an = sample[analysis_id]
            # WF state from the root sample analysis
            root_an_state = api.get_workflow_status_of(root_an)

            # analysis from the partition sample
            part_an = partition[analysis_id]
            # WF state from the partition sample analysis
            part_an_state = api.get_workflow_status_of(part_an)

            case_counter["{}_{}".format(root_an_state, part_an_state)] += 1

            # both analyses have the same WF state
            if root_an_state == part_an_state:
                # -> remove the analysis from the root sample
                sample._delObject(analysis_id)
                logger.info(
                    "Remove analysis '{}' in state '{}' from sample {}: {}"
                    .format(analysis_id, root_an_state,
                            api.get_id(sample), api.get_url(sample)))

            # both are in verified/published state
            elif IVerified.providedBy(root_an) and IVerified.providedBy(part_an):
                root_an_result = root_an.getResult()
                part_an_result = root_an.getResult()
                if root_an_result == part_an_result:
                    # remove the root analysis
                    sample._delObject(analysis_id)
                    logger.info(
                        "Remove analysis '{}' in state '{}' from sample {}: {}"
                        .format(analysis_id, root_an_state,
                                api.get_id(sample), api.get_url(sample)))
                else:
                    # -> unsolvable edge case
                    #    display an error message
                    logger.error(
                        "Analysis '{}' of root sample in state '{}' "
                        "and Analysis of partition in state {}. "
                        "Please fix manually: {}"
                        .format(analysis_id, root_an_state, part_an_state,
                                api.get_url(sample)))

            # root analysis is in invalid state
            elif root_an_state in ["rejected", "retracted"]:
                # -> probably the retest was automatically created in the
                #    parent instead of the partition
                pass

            # partition analysis is in invalid state
            elif part_an_state in ["rejected", "retracted"]:
                # -> probably the retest was automatically created in the
                #    parent instead of the partition
                pass

            # root analysis was submitted, but not the partition analysis
            elif ISubmitted.providedBy(root_an) and not ISubmitted.providedBy(part_an):
                # -> remove the analysis from the partition
                partition._delObject(analysis_id)
                logger.info(
                    "Remove analysis '{}' in state '{}' from partition {}: {}"
                    .format(analysis_id, part_an_state,
                            api.get_id(partition), api.get_url(partition)))

            # partition analysis was submitted, but not the root analysis
            elif ISubmitted.providedBy(part_an) and not ISubmitted.providedBy(root_an):
                # -> remove the analysis from the root sample
                sample._delObject(analysis_id)
                logger.info(
                    "Remove analysis '{}' in state '{}' from sample {}: {}"
                    .format(analysis_id, root_an_state,
                            api.get_id(sample), api.get_url(sample)))

            # inconsistent state
            else:
                logger.warning(
                    "Can not handle analysis '{}' located in '{}' (state {}) and '{}' (state {})"
                    .format(analysis_id,
                            repr(sample), root_an_state,
                            repr(partition), part_an_state))

    logger.info("Removing cascaded analyses from Root Samples... [DONE]")

    logger.info("State Combinations (root_an_state, part_an_state): {}"
                .format(sorted(case_counter.items(), key=itemgetter(1), reverse=True)))
コード例 #37
0
    def handle_http_request(self):
        request = self.request
        form = request.form

        submitted = form.get("submitted", False)
        send = form.get("send", False)
        cancel = form.get("cancel", False)

        if submitted and send:
            logger.info("*** SENDING EMAIL ***")

            # Parse used defined values from the request form
            recipients = form.get("recipients", [])
            responsibles = form.get("responsibles", [])
            subject = form.get("subject")
            body = form.get("body")
            reports = self.get_reports()

            # Merge recipiens and responsibles
            recipients = set(recipients + responsibles)

            # sanity checks
            if not recipients:
                message = _("No email recipients selected")
                self.add_status_message(message, "error")
            if not subject:
                message = _("Please add an email subject")
                self.add_status_message(message, "error")
            if not body:
                message = _("Please add an email text")
                self.add_status_message(message, "error")
            if not reports:
                message = _("No attachments")
                self.add_status_message(message, "error")

            success = False
            if all([recipients, subject, body, reports]):
                attachments = []

                # report pdfs
                for report in reports:
                    pdf = self.get_pdf(report)
                    if pdf is None:
                        logger.error("Skipping empty PDF for report {}"
                                     .format(report.getId()))
                        continue
                    ar = report.getAnalysisRequest()
                    filename = "{}.pdf".format(ar.getId())
                    filedata = pdf.data
                    attachments.append(
                        self.to_email_attachment(filename, filedata))

                # additional attachments
                for attachment in self.get_attachments():
                    af = attachment.getAttachmentFile()
                    filedata = af.data
                    filename = af.filename
                    attachments.append(
                        self.to_email_attachment(filename, filedata))

                success = self.send_email(
                    recipients, subject, body, attachments=attachments)

            if success:
                # selected name, email pairs which received the email
                pairs = map(self.parse_email, recipients)
                send_to_names = map(lambda p: p[0], pairs)

                # set recipients to the reports
                for report in reports:
                    ar = report.getAnalysisRequest()
                    # publish the AR
                    self.publish(ar)

                    # Publish all linked ARs of this report
                    # N.B. `ContainedAnalysisRequests` is an extended field
                    field = report.getField("ContainedAnalysisRequests")
                    contained_ars = field.get(report) or []
                    for obj in contained_ars:
                        self.publish(obj)

                    # add new recipients to the AR Report
                    new_recipients = filter(
                        lambda r: r.get("Fullname") in send_to_names,
                        self.get_recipients(ar))
                    self.set_report_recipients(report, new_recipients)

                message = _(u"Message sent to {}"
                            .format(", ".join(send_to_names)))
                self.add_status_message(message, "info")
                return request.response.redirect(self.exit_url)
            else:
                message = _("Failed to send Email(s)")
                self.add_status_message(message, "error")

        if submitted and cancel:
            logger.info("*** EMAIL CANCELLED ***")
            message = _("Email cancelled")
            self.add_status_message(message, "info")
            return request.response.redirect(self.exit_url)

        # get the selected ARReport objects
        reports = self.get_reports()
        attachments = self.get_attachments()

        # calculate the total size of all PDFs
        self.total_size = self.get_total_size(reports, attachments)
        if self.total_size > self.max_email_size:
            # don't allow to send oversized emails
            self.allow_send = False
            message = _("Total size of email exceeded {:.1f} MB ({:.2f} MB)"
                        .format(self.max_email_size / 1024,
                                self.total_size / 1024))
            self.add_status_message(message, "error")

        # prepare the data for the template
        self.reports = map(self.get_report_data, reports)
        self.recipients = self.get_recipients_data(reports)
        self.responsibles = self.get_responsibles_data(reports)

        # inform the user about invalid recipients
        if not all(map(lambda r: r.get("valid"), self.recipients)):
            message = _(
                "Not all contacts are equal for the selected Reports. "
                "Please manually select recipients for this email.")
            self.add_status_message(message, "warning")

        return self.template()
コード例 #38
0
ファイル: workflow.py プロジェクト: afrinshaik/bika.health
    def __call__(self):
        # Do generic bika.lims stuff
        BaseClass.__call__(self)
        # Do bika-health specific actions when submit
        action = BaseClass._get_form_workflow_action(self)
        addPortalMessage = self.context.plone_utils.addPortalMessage
        if action[0] == 'submit' and isActive(self.context):
            inpanicanalyses = []
            workflow = getToolByName(self.context, 'portal_workflow')
            translate = self.context.translate
            rc = getToolByName(self.context, REFERENCE_CATALOG)
            uc = getToolByName(self.context, 'uid_catalog')
            # retrieve the results from database and check if
            # the values are exceeding panic levels
            alerts = {}
            for uid in self.request.form.get('Result', [{}])[0].keys():
                analysis = rc.lookupObject(uid)
                analysis = analysis.getObject() if hasattr(analysis, 'getObject') else analysis
                if not analysis:
                    continue
                astate = workflow.getInfoFor(analysis, 'review_state')
                if astate == 'retracted':
                    continue
                alerts.update(ResultOutOfRange(analysis)())
            if alerts:
                message = translate(_('Some results exceeded the '
                                      'panic levels that may '
                                      'indicate an imminent '
                                      'life-threatening condition'
                                      ))
                addPortalMessage(message, 'warning')
                self.request.response.redirect(self.context.absolute_url())

                # If panic levels alert email enabled, send an email to
                # labmanagers
                bs = self.context.bika_setup
                if hasattr(bs, 'getEnablePanicAlert') \
                        and bs.getEnablePanicAlert():
                    laboratory = self.context.bika_setup.laboratory
                    lab_address = "<br/>".join(laboratory.getPrintAddress())
                    managers = self.context.portal_groups.getGroupMembers('LabManagers')
                    mime_msg = MIMEMultipart('related')
                    mime_msg['Subject'] = _("Panic alert")
                    mime_msg['From'] = formataddr(
                        (encode_header(laboratory.getName()),
                         laboratory.getEmailAddress()))
                    to = []
                    for manager in managers:
                        user = self.portal.acl_users.getUser(manager)
                        uemail = user.getProperty('email')
                        ufull = user.getProperty('fullname')
                        to.append(formataddr((encode_header(ufull), uemail)))
                    mime_msg['To'] = ','.join(to)
                    strans = []
                    ars = {}
                    for analysis_uid, alertlist in alerts:
                        analysis = uc(analysis_uid).getObject()
                        for alert in alertlist:
                            ars[analysis.aq_parent.Title()] = 1
                            strans.append("- {0}, {1}: {2}".format(
                                          analysis.getService().Title(),
                                          translate(_("Result")),
                                          analysis.getResult()))
                    ars = ", ".join(ars.keys())
                    stran = "<br/>".join(strans)
                    text = translate(_(
                        "Some results from ${items} exceeded the panic levels "
                        "that may indicate an imminent life-threatening "
                        "condition: <br/><br/>{analysisresults}<br/><br/>"
                        "<b>Please, check the Analysis Request if you "
                        "want to re-test the analysis or immediately "
                        "alert the client.</b><br/><br/>{lab_address}",
                        mapping={'items': ars,
                                 'analysisresults': stran,
                                 'lab_address': lab_address}))
                    msg_txt = MIMEText(safe_unicode(text).encode('utf-8'),
                                       _subtype='html')
                    mime_msg.preamble = 'This is a multi-part MIME message.'
                    mime_msg.attach(msg_txt)
                    try:
                        host = getToolByName(self.context, 'MailHost')
                        host.send(mime_msg.as_string(), immediate=True)
                    except Exception as msg:
                        ar = inpanicanalyses[0].getRequestID()
                        logger.error("Panic level email %s: %s" %
                                     (ar, str(msg)))
                        message = translate(
                            _('Unable to send an email to alert lab '
                              'managers that some analyses exceeded the '
                              'panic levels') + (": %s" % str(msg)))
                        addPortalMessage(message, 'warning')
コード例 #39
0
    def _process_request(self):
        # Use this function from a template that is using bika_listing_table
        # in such a way that the table_only request var will be used to
        # in-place-update the table.
        form_id = self.form_id
        form = self.request.form
        workflow = getToolByName(self.context, 'portal_workflow')
        catalog = getToolByName(self.context, self.catalog)

        # If table_only specifies another form_id, then we abort.
        # this way, a single table among many can request a redraw,
        # and only it's content will be rendered.
        if form_id not in self.request.get('table_only', form_id):
            return ''

        ## review_state_selector
        cookie = json.loads(self.request.get("review_state", '{}'))
        cookie_key = "%s%s" % (self.context.portal_type, form_id)
        # first check POST
        selected_state = self.request.get("%s_review_state" % form_id, '')
        if not selected_state:
            # then check cookie
            selected_state = cookie.get(cookie_key, 'default')
        # get review_state id=selected_state
        states = [r for r in self.review_states if r['id'] == selected_state]
        review_state = states and states[0] or self.review_states[0]
        # set request and cookie to currently selected state id
        if not selected_state:
            selected_state = self.review_states[0]['id']

        self.review_state = cookie[cookie_key] = selected_state
        cookie = json.dumps(cookie)
        self.request['review_state'] = cookie
        self.request.response.setCookie('review_state', cookie, path="/")

        # contentFilter is expected in every review_state.
        for k, v in review_state['contentFilter'].items():
            self.contentFilter[k] = v

        # sort on
        sort_on = self.request.get(form_id + '_sort_on', '')
        # manual_sort_on: only sort the current batch of items
        # this is a compromise for sorting without column indexes
        self.manual_sort_on = None
        if sort_on \
           and sort_on in self.columns.keys() \
           and self.columns[sort_on].get('index', None):
            idx = self.columns[sort_on].get('index', sort_on)
            self.contentFilter['sort_on'] = idx
        else:
            if sort_on:
                self.manual_sort_on = sort_on
                if 'sort_on' in self.contentFilter:
                    del self.contentFilter['sort_on']

        # sort order
        self.sort_order = self.request.get(form_id + '_sort_order', '')
        if self.sort_order:
            self.contentFilter['sort_order'] = self.sort_order
        else:
            if 'sort_order' not in self.contentFilter:
                self.sort_order = 'ascending'
                self.contentFilter['sort_order'] = 'ascending'
                self.request.set(form_id + '_sort_order', 'ascending')
            else:
                self.sort_order = self.contentFilter['sort_order']
        if self.manual_sort_on:
            del self.contentFilter['sort_order']

        # pagesize
        pagesize = self.request.get(form_id + '_pagesize', self.pagesize)
        if type(pagesize) in (list, tuple):
            pagesize = pagesize[0]
        try:
            pagesize = int(pagesize)
        except:
            pagesize = self.pagesize
        self.pagesize = pagesize
        # Plone's batching wants this variable:
        self.request.set('pagesize', self.pagesize)

        # pagenumber
        self.pagenumber = int(
            self.request.get(form_id + '_pagenumber', self.pagenumber))
        # Plone's batching wants this variable:
        self.request.set('pagenumber', self.pagenumber)

        # index filters.
        self.And = []
        self.Or = []
        ##logger.info("contentFilter: %s"%self.contentFilter)
        for k, v in self.columns.items():
            if not v.has_key('index') \
               or v['index'] == 'review_state' \
               or v['index'] in self.filter_indexes:
                continue
            self.filter_indexes.append(v['index'])
        ##logger.info("Filter indexes: %s"%self.filter_indexes)

        # any request variable named ${form_id}_{index_name}
        # will pass it's value to that index in self.contentFilter.
        # all conditions using ${form_id}_{index_name} are searched with AND
        for index in self.filter_indexes:
            idx = catalog.Indexes.get(index, None)
            if not idx:
                logger.debug("index named '%s' not found in %s.  "
                             "(Perhaps the index is still empty)." %
                             (index, self.catalog))
                continue
            request_key = "%s_%s" % (form_id, index)
            value = self.request.get(request_key, '')
            if len(value) > 1:
                ##logger.info("And: %s=%s"%(index, value))
                if idx.meta_type in ('ZCTextIndex', 'FieldIndex'):
                    self.And.append(MatchRegexp(index, value))
                elif idx.meta_type == 'DateIndex':
                    logger.error("Unhandled DateIndex search on '%s'" % index)
                    continue
                else:
                    self.Or.append(Generic(index, value))

        # if there's a ${form_id}_filter in request, then all indexes
        # are are searched for it's value.
        # ${form_id}_filter is searched with OR agains all indexes
        request_key = "%s_filter" % form_id
        value = self.request.get(request_key, '')
        if len(value) > 1:
            for index in self.filter_indexes:
                idx = catalog.Indexes.get(index, None)
                if not idx:
                    logger.debug("index named '%s' not found in %s.  "
                                 "(Perhaps the index is still empty)." %
                                 (index, self.catalog))
                    continue
                ##logger.info("Or: %s=%s"%(index, value))
                if idx.meta_type in ('ZCTextIndex', 'FieldIndex'):
                    self.Or.append(MatchRegexp(index, value))
                elif idx.meta_type == 'DateIndex':
                    if value.find(":") > -1:
                        try:
                            lohi = [DateTime(x) for x in value.split(":")]
                        except:
                            logger.error(
                                "Error (And, DateIndex='%s', term='%s')" %
                                (index, value))
                        self.Or.append(Between(index, lohi[0], lohi[1]))
                    else:
                        try:
                            self.Or.append(Eq(index, DateTime(value)))
                        except:
                            logger.error(
                                "Error (Or, DateIndex='%s', term='%s')" %
                                (index, value))
                else:
                    self.Or.append(Generic(index, value))
            self.Or.append(MatchRegexp('review_state', value))

        # get toggle_cols cookie value
        # and modify self.columns[]['toggle'] to match.
        toggle_cols = self.get_toggle_cols()
        for col in self.columns.keys():
            if col in toggle_cols:
                self.columns[col]['toggle'] = True
            else:
                self.columns[col]['toggle'] = False