Ejemplo n.º 1
0
 def getConfig(self):
     """get the config"""
     registry = getUtility(IRegistry)
     # First grab the base config, so we can use the operations
     registryreader = IQuerystringRegistryReader(registry)
     registryreader.prefix = "plone.app.querystring.operation"
     op_config = registryreader.parseRegistry()
     # Then combine our fields
     registryreader = IQuerystringRegistryReader(registry)
     registryreader.prefix = "bika.lims.bika_catalog_query"
     config = registryreader.parseRegistry()
     config = registryreader.getVocabularyValues(config)
     config.update(op_config)
     registryreader.mapOperations(config)
     registryreader.mapSortableIndexes(config)
     config = {
         'indexes': config.get('bika.lims.bika_catalog_query.field'),
         'sortable_indexes': config.get('sortable'),
     }
     # Group indices by "group", order alphabetically
     groupedIndexes = {}
     for indexName in config['indexes']:
         index = config['indexes'][indexName]
         if index['enabled']:
             group = index['group']
             if group not in groupedIndexes:
                 groupedIndexes[group] = []
             groupedIndexes[group].append((index['title'], indexName))
     # Sort each index list
     [a.sort() for a in groupedIndexes.values()]
     config['groupedIndexes'] = groupedIndexes
     return config
Ejemplo n.º 2
0
 def getConfig(self):
     """get the config"""
     registry = getUtility(IRegistry)
     # First grab the base config, so we can use the operations
     registryreader = IQuerystringRegistryReader(registry)
     registryreader.prefix = "plone.app.querystring.operation"
     op_config = registryreader.parseRegistry()
     # Then combine our fields
     registryreader = IQuerystringRegistryReader(registry)
     registryreader.prefix = "bika.lims.bika_catalog_query"
     config = registryreader.parseRegistry()
     config = registryreader.getVocabularyValues(config)
     config.update(op_config)
     registryreader.mapOperations(config)
     registryreader.mapSortableIndexes(config)
     config = {
         'indexes': config.get('bika.lims.bika_catalog_query.field'),
         'sortable_indexes': config.get('sortable'),
     }
     # Group indices by "group", order alphabetically
     groupedIndexes = {}
     for indexName in config['indexes']:
         index = config['indexes'][indexName]
         if index['enabled']:
             group = index['group']
             if group not in groupedIndexes:
                 groupedIndexes[group] = []
             groupedIndexes[group].append((index['title'], indexName))
     # Sort each index list
     [a.sort() for a in groupedIndexes.values()]
     config['groupedIndexes'] = groupedIndexes
     return config
Ejemplo n.º 3
0
    def __call__(self, analyses):
        tray = 1
        now = DateTime().strftime('%Y%m%d-%H%M')
        bsc = getToolByName(self.context, 'bika_setup_catalog')
        uc = getToolByName(self.context, 'uid_catalog')
        instrument = self.context.getInstrument()
        norm = getUtility(IIDNormalizer).normalize
        filename = '%s-%s.csv' % (self.context.getId(),
                                  norm(instrument.getDataInterface()))
        listname = '%s_%s_%s' % (self.context.getId(), norm(
            instrument.Title()), now)
        options = {'dilute_factor': 1, 'method': 'F SO2 & T SO2'}
        for k, v in instrument.getDataInterfaceOptions():
            options[k] = v

        # for looking up "cup" number (= slot) of ARs
        parent_to_slot = {}
        layout = self.context.getLayout()
        for x in range(len(layout)):
            a_uid = layout[x]['analysis_uid']
            p_uid = uc(UID=a_uid)[0].getObject().aq_parent.UID()
            layout[x]['parent_uid'] = p_uid
            if not p_uid in parent_to_slot.keys():
                parent_to_slot[p_uid] = int(layout[x]['position'])

        # write rows, one per PARENT
        header = [listname, options['method']]
        rows = []
        rows.append(header)
        tmprows = []
        ARs_exported = []
        for x in range(len(layout)):
            # create batch header row
            c_uid = layout[x]['container_uid']
            p_uid = layout[x]['parent_uid']
            if p_uid in ARs_exported:
                continue
            cup = parent_to_slot[p_uid]
            tmprows.append(
                [tray, cup, p_uid, c_uid, options['dilute_factor'], ""])
            ARs_exported.append(p_uid)
        tmprows.sort(lambda a, b: cmp(a[1], b[1]))
        rows += tmprows

        ramdisk = StringIO()
        writer = csv.writer(ramdisk, delimiter=';')
        assert (writer)
        writer.writerows(rows)
        result = ramdisk.getvalue()
        ramdisk.close()

        #stream file to browser
        setheader = self.request.RESPONSE.setHeader
        setheader('Content-Length', len(result))
        setheader('Content-Type', 'text/comma-separated-values')
        setheader('Content-Disposition', 'inline; filename=%s' % filename)
        self.request.RESPONSE.write(result)
Ejemplo n.º 4
0
def parseFormquery(context,
                   formquery,
                   sort_on=None,
                   sort_order=None,
                   catalog_name='portal_catalog',
                   **kwargs):
    if not formquery:
        return {}
    reg = getUtility(IRegistry)

    # Make sure the things in formquery are dictionaries
    formquery = map(dict, formquery)

    query = {}
    for row in formquery:
        operator = row.get('o', None)
        function_path = reg["%s.operation" % operator]

        # The functions expect this pattern of object, so lets give it to
        # them in a named tuple instead of jamming things onto the request
        row = Row(index=row.get('i', None),
                  operator=function_path,
                  values=row.get('v', None))

        kwargs = {}
        parser = resolve(row.operator)
        kwargs = parser(context, row)
        query.update(kwargs)

    if not query:
        # If the query is empty fall back onto the equality query
        query = _equal(context, row)

    # Check for valid indexes
    catalog = getToolByName(context, catalog_name)
    valid_indexes = [index for index in query if index in catalog.indexes()]

    # We'll ignore any invalid index, but will return an empty set if none of
    # the indexes are valid.
    if not valid_indexes:
        logger.warning(
            "Using empty query because there are no valid indexes used.")
        return {}

    # Add sorting (sort_on and sort_order) to the query
    if sort_on:
        query['sort_on'] = sort_on
    if sort_order:
        query['sort_order'] = sort_order

    # And also, add whatever kwargs have been passed - some querybuilder
    # may want to add defaults here.
    query.update(kwargs)

    return query
Ejemplo n.º 5
0
def parseFormquery(context, formquery, sort_on=None, sort_order=None,
                   catalog_name='portal_catalog', **kwargs):
    if not formquery:
        return {}
    reg = getUtility(IRegistry)

    # Make sure the things in formquery are dictionaries
    formquery = map(dict, formquery)

    query = {}
    for row in formquery:
        operator = row.get('o', None)
        function_path = reg["%s.operation" % operator]

        # The functions expect this pattern of object, so lets give it to
        # them in a named tuple instead of jamming things onto the request
        row = Row(index=row.get('i', None),
                  operator=function_path,
                  values=row.get('v', None))

        kwargs = {}
        parser = resolve(row.operator)
        kwargs = parser(context, row)
        query.update(kwargs)

    if not query:
        # If the query is empty fall back onto the equality query
        query = _equal(context, row)

    # Check for valid indexes
    catalog = getToolByName(context, catalog_name)
    valid_indexes = [index for index in query if index in catalog.indexes()]

    # We'll ignore any invalid index, but will return an empty set if none of
    # the indexes are valid.
    if not valid_indexes:
        logger.warning(
            "Using empty query because there are no valid indexes used.")
        return {}

    # Add sorting (sort_on and sort_order) to the query
    if sort_on:
        query['sort_on'] = sort_on
    if sort_order:
        query['sort_order'] = sort_order

    # And also, add whatever kwargs have been passed - some querybuilder
    # may want to add defaults here.
    query.update(kwargs)

    return query
Ejemplo n.º 6
0
 def __call__(self):
     """Return the registry configuration in JSON format"""
     registry = getUtility(IRegistry)
     # First grab the base config, so we can use the operations
     registryreader = IQuerystringRegistryReader(registry)
     registryreader.prefix = "plone.app.querystring.operation"
     op_config = registryreader.parseRegistry()
     # Then combine our fields
     registryreader = IQuerystringRegistryReader(registry)
     registryreader.prefix = self.prefix
     config = registryreader.parseRegistry()
     config = registryreader.getVocabularyValues(config)
     config.update(op_config)
     registryreader.mapOperations(config)
     registryreader.mapSortableIndexes(config)
     return {
         'indexes': config.get(self.prefix + '.field'),
         'sortable_indexes': config.get('sortable'),
     }
Ejemplo n.º 7
0
 def __call__(self):
     """Return the registry configuration in JSON format"""
     registry = getUtility(IRegistry)
     # First grab the base config, so we can use the operations
     registryreader = IQuerystringRegistryReader(registry)
     registryreader.prefix = "plone.app.querystring.operation"
     op_config = registryreader.parseRegistry()
     # Then combine our fields
     registryreader = IQuerystringRegistryReader(registry)
     registryreader.prefix = self.prefix
     config = registryreader.parseRegistry()
     config = registryreader.getVocabularyValues(config)
     config.update(op_config)
     registryreader.mapOperations(config)
     registryreader.mapSortableIndexes(config)
     return {
         'indexes': config.get(self.prefix + '.field'),
         'sortable_indexes': config.get('sortable'),
     }
Ejemplo n.º 8
0
    def getConfig(self):
        """get the config"""
        registry = getUtility(IRegistry)
        prefix = self.registry_prefix
        if prefix is not None:
            # First grab the base config's operations
            registryreader = IQuerystringRegistryReader(registry)
            registryreader.prefix = "plone.app.querystring.operation"
            plone_config = registryreader.parseRegistry()
            # then merge custom fields
            registryreader = IQuerystringRegistryReader(registry)
            registryreader.prefix = prefix
            config = registryreader.parseRegistry()
            config = registryreader.getVocabularyValues(config)
            config.update(plone_config)
            config = registryreader.mapOperations(config)
            config = registryreader.mapSortableIndexes(config)
            config = {
                'indexes': config.get(prefix + '.field'),
                'sortable_indexes': config.get('sortable'),
            }
        else:
            # First grab the base config's operations
            registryreader = IQuerystringRegistryReader(registry)
            registryreader.prefix = "plone.app.querystring"
            config = registryreader()

        # Group indices by "group", order alphabetically
        groupedIndexes = {}
        for indexName in config['indexes']:
            index = config['indexes'][indexName]
            if index['enabled']:
                group = index['group']
                if group not in groupedIndexes:
                    groupedIndexes[group] = []
                groupedIndexes[group].append((index['title'], indexName))

        # Sort each index list
        [a.sort() for a in groupedIndexes.values()]

        config['groupedIndexes'] = groupedIndexes
        return config
Ejemplo n.º 9
0
    def getConfig(self):
        """get the config"""
        registry = getUtility(IRegistry)
        prefix = self.registry_prefix
        if prefix is not None:
            # First grab the base config's operations
            registryreader = IQuerystringRegistryReader(registry)
            registryreader.prefix = "plone.app.querystring.operation"
            plone_config = registryreader.parseRegistry()
            # then merge custom fields
            registryreader = IQuerystringRegistryReader(registry)
            registryreader.prefix = prefix
            config = registryreader.parseRegistry()
            config = registryreader.getVocabularyValues(config)
            config.update(plone_config)
            config = registryreader.mapOperations(config)
            config = registryreader.mapSortableIndexes(config)
            config = {
                'indexes': config.get(prefix + '.field'),
                'sortable_indexes': config.get('sortable'),
            }
        else:
            # First grab the base config's operations
            registryreader = IQuerystringRegistryReader(registry)
            registryreader.prefix = "plone.app.querystring"
            config = registryreader()

        # Group indices by "group", order alphabetically
        groupedIndexes = {}
        for indexName in config['indexes']:
            index = config['indexes'][indexName]
            if index['enabled']:
                group = index['group']
                if group not in groupedIndexes:
                    groupedIndexes[group] = []
                groupedIndexes[group].append((index['title'], indexName))

        # Sort each index list
        [a.sort() for a in groupedIndexes.values()]

        config['groupedIndexes'] = groupedIndexes
        return config
Ejemplo n.º 10
0
    def folderitems(self):
        self.filter_indexes = None
        self.contentsMethod = self.getClientList
        items = BikaListingView.folderitems(self)
        registry = getUtility(IRegistry)
        if 'bika.lims.client.default_landing_page' in registry:
            landing_page = registry['bika.lims.client.default_landing_page']
        else:
            landing_page = 'analysisrequests'
        for x in range(len(items)):
            if not items[x].has_key('obj'): continue
            obj = items[x]['obj']

            items[x]['replace']['title'] = "<a href='%s/%s'>%s</a>"%\
                 (items[x]['url'], landing_page.encode('ascii'), items[x]['title'])

            items[x]['EmailAddress'] = obj.getEmailAddress()
            items[x]['replace']['EmailAddress'] = "<a href='%s'>%s</a>"%\
                     ('mailto:%s' % obj.getEmailAddress(),
                      obj.getEmailAddress())
            items[x]['Phone'] = obj.getPhone()
            items[x]['Fax'] = obj.getFax()

        return items
Ejemplo n.º 11
0
    def folderitems(self):
        self.filter_indexes = None
        self.contentsMethod = self.getClientList
        items = BikaListingView.folderitems(self)
        registry = getUtility(IRegistry)
        if 'bika.lims.client.default_landing_page' in registry:
            landing_page = registry['bika.lims.client.default_landing_page']
        else:
            landing_page = 'analysisrequests'
        for x in range(len(items)):
            if not items[x].has_key('obj'): continue
            obj = items[x]['obj']

            items[x]['replace']['title'] = "<a href='%s/%s'>%s</a>"%\
                 (items[x]['url'], landing_page.encode('ascii'), items[x]['title'])

            items[x]['EmailAddress'] = obj.getEmailAddress()
            items[x]['replace']['EmailAddress'] = "<a href='%s'>%s</a>"%\
                     ('mailto:%s' % obj.getEmailAddress(),
                      obj.getEmailAddress())
            items[x]['Phone'] = obj.getPhone()
            items[x]['Fax'] = obj.getFax()

        return items
Ejemplo n.º 12
0
 def _DEFAULT_TEMPLATE(self):
     registry = getUtility(IRegistry)
     return registry.get(
         'bika.lims.analysisrequest.default_arreport_template',
         'default.pt')
Ejemplo n.º 13
0
def generateUniqueId(context):
    """ Generate pretty content IDs.
        - context is used to find portal_type; in case there is no
          prefix specified for the type, the normalized portal_type is
          used as a prefix instead.
    """

    fn_normalize = getUtility(IFileNameNormalizer).normalize
    id_normalize = getUtility(IIDNormalizer).normalize
    prefixes = context.bika_setup.getPrefixes()

    year = context.bika_setup.getYearInPrefix() and \
        DateTime().strftime("%Y")[2:] or ''
    separator = '-'
    for e in prefixes:
        if 'separator' not in e:
            e['separator'] = ''
        if e['portal_type'] == context.portal_type:
            separator = e['separator']

    # Analysis Request IDs
    if context.portal_type == "AnalysisRequest":
        sample = context.getSample()
        s_prefix = fn_normalize(sample.getSampleType().getPrefix())
        sample_padding = context.bika_setup.getSampleIDPadding()
        ar_padding = context.bika_setup.getARIDPadding()
        sample_id = sample.getId()
        sample_number = sample_id.split(s_prefix)[1]
        ar_number = sample.getLastARNumber()
        ar_number = ar_number and ar_number + 1 or 1
        return fn_normalize(
            ("%s%s" + separator + "R%s") % (s_prefix,
                          str(sample_number).zfill(sample_padding),
                          str(ar_number).zfill(ar_padding))
        )

    # Sample Partition IDs
    if context.portal_type == "SamplePartition":
        # We do not use prefixes.  There are actually codes that require the 'P'.
        # matches = [p for p in prefixes if p['portal_type'] == 'SamplePartition']
        # prefix = matches and matches[0]['prefix'] or 'samplepartition'
        # padding = int(matches and matches[0]['padding'] or '0')

        # at this time the part exists, so +1 would be 1 too many
        partnr = str(len(context.aq_parent.objectValues('SamplePartition')))
        # parent id is normalized already
        return ("%s" + separator + "P%s") % (context.aq_parent.id, partnr)

    if context.bika_setup.getExternalIDServer():

        # if using external server

        for d in prefixes:
            # Sample ID comes from SampleType
            if context.portal_type == "Sample":
                prefix = context.getSampleType().getPrefix()
                padding = context.bika_setup.getSampleIDPadding()
                new_id = str(idserver_generate_id(context, "%s%s-" % (prefix, year)))
                if padding:
                    new_id = new_id.zfill(int(padding))
                return ('%s%s' + separator + '%s') % (prefix, year, new_id)
            elif d['portal_type'] == context.portal_type:
                prefix = d['prefix']
                padding = d['padding']
                new_id = str(idserver_generate_id(context, "%s%s-" % (prefix, year)))
                if padding:
                    new_id = new_id.zfill(int(padding))
                return ('%s%s' + separator + '%s') % (prefix, year, new_id)
        # no prefix; use portal_type
        # year is not inserted here
        # portal_type is be normalized to lowercase
        npt = id_normalize(context.portal_type)
        new_id = str(idserver_generate_id(context, npt + "-"))
        return ('%s' + separator + '%s') % (npt, new_id)

    else:

        # No external id-server.

        def next_id(prefix):
            # normalize before anything
            prefix = fn_normalize(prefix)
            plone = context.portal_url.getPortalObject()
            # grab the first catalog we are indexed in.
            at = getToolByName(plone, 'archetype_tool')
            if context.portal_type in at.catalog_map:
                catalog_name = at.catalog_map[context.portal_type][0]
            else:
                catalog_name = 'portal_catalog'
            catalog = getToolByName(plone, catalog_name)

            # get all IDS that start with prefix
            # this must specifically exclude AR IDs (two -'s)
            rr = re.compile("^"+prefix+separator+"[\d+]+$")
            ids = [int(i.split(prefix+separator)[1]) \
                   for i in catalog.Indexes['id'].uniqueValues() \
                   if rr.match(i)]

            #plone_tool = getToolByName(context, 'plone_utils')
            #if not plone_tool.isIDAutoGenerated(l.id):
            ids.sort()
            _id = ids and ids[-1] or 0
            new_id = _id + 1
            return str(new_id)

        for d in prefixes:
            if context.portal_type == "Sample":
                # Special case for Sample IDs
                prefix = fn_normalize(context.getSampleType().getPrefix())
                padding = context.bika_setup.getSampleIDPadding()
                sequence_start = context.bika_setup.getSampleIDSequenceStart()
                new_id = next_id(prefix+year)
                # If sequence_start is greater than new_id. Set
                # sequence_start as new_id. (Jira LIMS-280)
                if sequence_start > int(new_id):
                    new_id = str(sequence_start)
                if padding:
                    new_id = new_id.zfill(int(padding))
                return ('%s%s' + separator + '%s') % (prefix, year, new_id)
            elif d['portal_type'] == context.portal_type:
                prefix = d['prefix']
                padding = d['padding']
                sequence_start = d.get("sequence_start", None)
                new_id = next_id(prefix+year)
                # Jira-tracker LIMS-280
                if sequence_start and int(sequence_start) > int(new_id):
                    new_id = str(sequence_start)
                if padding:
                    new_id = new_id.zfill(int(padding))
                return ('%s%s' + separator + '%s') % (prefix, year, new_id)

        # no prefix; use portal_type
        # no year inserted here
        # use "IID" normalizer, because we want portal_type to be lowercased.
        prefix = id_normalize(context.portal_type);
        new_id = next_id(prefix)
        return ('%s' + separator + '%s') % (prefix, new_id)
Ejemplo n.º 14
0
    def __call__(self, analyses):
        tray = 1
        now = DateTime().strftime('%Y%m%d-%H%M')
        bsc = getToolByName(self.context, 'bika_setup_catalog')
        uc = getToolByName(self.context, 'uid_catalog')
        instrument = self.context.getInstrument()
        norm = getUtility(IIDNormalizer).normalize
        filename  = '%s-%s.csv'%(self.context.getId(),
                                 norm(instrument.getDataInterface()))
        listname  = '%s_%s_%s' %(self.context.getId(),
                                 norm(instrument.Title()), now)
        options = {'dilute_factor' : 1,
                   'method': 'F SO2 & T SO2'}
        for k,v in instrument.getDataInterfaceOptions():
            options[k] = v

        # for looking up "cup" number (= slot) of ARs
        parent_to_slot = {}
        layout = self.context.getLayout()
        for x in range(len(layout)):
            a_uid = layout[x]['analysis_uid']
            p_uid = uc(UID=a_uid)[0].getObject().aq_parent.UID()
            layout[x]['parent_uid'] = p_uid
            if not p_uid in parent_to_slot.keys():
                parent_to_slot[p_uid] = int(layout[x]['position'])

        # write rows, one per PARENT
        header = [listname, options['method']]
        rows = []
        rows.append(header)
        tmprows = []
        ARs_exported = []
        for x in range(len(layout)):
            # create batch header row
            c_uid = layout[x]['container_uid']
            p_uid = layout[x]['parent_uid']
            if p_uid in ARs_exported:
                continue
            cup = parent_to_slot[p_uid]
            tmprows.append([tray,
                            cup,
                            p_uid,
                            c_uid,
                            options['dilute_factor'],
                            ""])
            ARs_exported.append(p_uid)
        tmprows.sort(lambda a,b:cmp(a[1], b[1]))
        rows += tmprows

        ramdisk = StringIO()
        writer = csv.writer(ramdisk, delimiter=';')
        assert(writer)
        writer.writerows(rows)
        result = ramdisk.getvalue()
        ramdisk.close()

        #stream file to browser
        setheader = self.request.RESPONSE.setHeader
        setheader('Content-Length',len(result))
        setheader('Content-Type', 'text/comma-separated-values')
        setheader('Content-Disposition', 'inline; filename=%s' % filename)
        self.request.RESPONSE.write(result)
Ejemplo n.º 15
0
    def folderitems(self, full_objects = False):
        """
        >>> portal = layer['portal']
        >>> portal_url = portal.absolute_url()
        >>> from plone.app.testing import SITE_OWNER_NAME
        >>> from plone.app.testing import SITE_OWNER_PASSWORD

        Test page batching https://github.com/bikalabs/Bika-LIMS/issues/1276
        When visiting the second page, the Water sampletype should be displayed:

        >>> browser = layer['getBrowser'](portal, loggedIn=True, username=SITE_OWNER_NAME, password=SITE_OWNER_PASSWORD)
        >>> browser.open(portal_url+"/bika_setup/bika_sampletypes/folder_view?",
        ... "list_pagesize=10&list_review_state=default&list_pagenumber=2")
        >>> browser.contents
        '...Water...'
        """

        #self.contentsMethod = self.context.getFolderContents
        if not hasattr(self, 'contentsMethod'):
            self.contentsMethod = getToolByName(self.context, self.catalog)

        context = aq_inner(self.context)
        plone_layout = getMultiAdapter((context, self.request), name = u'plone_layout')
        plone_utils = getToolByName(context, 'plone_utils')
        plone_view = getMultiAdapter((context, self.request), name = u'plone')
        portal_properties = getToolByName(context, 'portal_properties')
        portal_types = getToolByName(context, 'portal_types')
        workflow = getToolByName(context, 'portal_workflow')
        site_properties = portal_properties.site_properties
        norm = getUtility(IIDNormalizer).normalize
        if self.request.get('show_all', '').lower() == 'true' \
                or self.show_all == True \
                or self.pagesize == 0:
            show_all = True
        else:
            show_all = False

        pagenumber = int(self.request.get('pagenumber', 1) or 1)
        pagesize = self.pagesize
        start = (pagenumber - 1) * pagesize
        end = start + pagesize - 1

        if (hasattr(self, 'And') and self.And) \
           or (hasattr(self, 'Or') and self.Or):
            # if contentsMethod is capable, we do an AdvancedQuery.
            if hasattr(self.contentsMethod, 'makeAdvancedQuery'):
                aq = self.contentsMethod.makeAdvancedQuery(self.contentFilter)
                if hasattr(self, 'And') and self.And:
                    tmpAnd = And()
                    for q in self.And:
                        tmpAnd.addSubquery(q)
                    aq &= tmpAnd
                if hasattr(self, 'Or') and self.Or:
                    tmpOr = Or()
                    for q in self.Or:
                        tmpOr.addSubquery(q)
                    aq &= tmpOr
                brains = self.contentsMethod.evalAdvancedQuery(aq)
            else:
                # otherwise, self.contentsMethod must handle contentFilter
                brains = self.contentsMethod(self.contentFilter)
        else:
            brains = self.contentsMethod(self.contentFilter)

        results = []
        self.page_start_index = 0
        current_index = -1
        for i, obj in enumerate(brains):
            # we don't know yet if it's a brain or an object
            path = hasattr(obj, 'getPath') and obj.getPath() or \
                 "/".join(obj.getPhysicalPath())

            if hasattr(obj, 'getObject'):
                obj = obj.getObject()

            # check if the item must be rendered or not (prevents from
            # doing it later in folderitems) and dealing with paging
            if not self.isItemAllowed(obj):
                continue

            # avoid creating unnecessary info for items outside the current
            # batch;  only the path is needed for the "select all" case...
            # we only take allowed items into account
            current_index += 1
            if not show_all and not (start <= current_index <= end):
                results.append(dict(path = path, uid = obj.UID()))
                continue

            uid = obj.UID()
            title = obj.Title()
            description = obj.Description()
            icon = plone_layout.getIcon(obj)
            url = obj.absolute_url()
            relative_url = obj.absolute_url(relative = True)

            fti = portal_types.get(obj.portal_type)
            if fti is not None:
                type_title_msgid = fti.Title()
            else:
                type_title_msgid = obj.portal_type

            url_href_title = '%s at %s: %s' % (
                t(type_title_msgid),
                path,
                to_utf8(description))

            modified = self.ulocalized_time(obj.modified()),

            # element css classes
            type_class = 'contenttype-' + \
                plone_utils.normalizeString(obj.portal_type)

            state_class = ''
            states = {}
            for w in workflow.getWorkflowsFor(obj):
                state = w._getWorkflowStateOf(obj).id
                states[w.state_var] = state
                state_class += "state-%s " % state

            results_dict = dict(
                obj = obj,
                id = obj.getId(),
                title = title,
                uid = uid,
                path = path,
                url = url,
                fti = fti,
                item_data = json.dumps([]),
                url_href_title = url_href_title,
                obj_type = obj.Type,
                size = obj.getObjSize,
                modified = modified,
                icon = icon.html_tag(),
                type_class = type_class,
                # a list of lookups for single-value-select fields
                choices = {},
                state_class = state_class,
                relative_url = relative_url,
                view_url = url,
                table_row_class = "",
                category = 'None',

                # a list of names of fields that may be edited on this item
                allow_edit = [],

                # a list of names of fields that are compulsory (if editable)
                required = [],

                # "before", "after" and replace: dictionary (key is column ID)
                # A snippet of HTML which will be rendered
                # before/after/instead of the table cell content.
                before = {}, # { before : "<a href=..>" }
                after = {},
                replace = {},
            )
            try:
                rs = workflow.getInfoFor(obj, 'review_state')
                st_title = workflow.getTitleForStateOnType(rs, obj.portal_type)
                st_title = t(PMF(st_title))
            except:
                rs = 'active'
                st_title = None
            if rs:
                results_dict['review_state'] = rs
            for state_var, state in states.items():
                if not st_title:
                    st_title = workflow.getTitleForStateOnType(
                        state, obj.portal_type)
                results_dict[state_var] = state
            results_dict['state_title'] = st_title

            # extra classes for individual fields on this item { field_id : "css classes" }
            results_dict['class'] = {}
            for name, adapter in getAdapters((obj, ), IFieldIcons):
                auid = obj.UID() if hasattr(obj, 'UID') and callable(obj.UID) else None
                if not auid:
                    continue
                alerts = adapter()
                # logger.info(str(alerts))
                if alerts and auid in alerts:
                    if auid in self.field_icons:
                        self.field_icons[auid].extend(alerts[auid])
                    else:
                        self.field_icons[auid] = alerts[auid]

            # Search for values for all columns in obj
            for key in self.columns.keys():
                if hasattr(obj, key):
                    # if the key is already in the results dict
                    # then we don't replace it's value
                    if results_dict.has_key(key):
                        continue
                    value = getattr(obj, key)
                    if callable(value):
                        value = value()
                    results_dict[key] = value
            results.append(results_dict)

        return results
Ejemplo n.º 16
0
 def _DEFAULT_TEMPLATE(self):
     registry = getUtility(IRegistry)
     return registry.get(
         'bika.lims.analysisrequest.default_arreport_template', 'default.pt')
Ejemplo n.º 17
0
def generateUniqueId(context):
    """ Generate pretty content IDs.
        - context is used to find portal_type; in case there is no
          prefix specified for the type, the normalized portal_type is
          used as a prefix instead.
    """

    fn_normalize = getUtility(IFileNameNormalizer).normalize
    id_normalize = getUtility(IIDNormalizer).normalize
    prefixes = context.bika_setup.getPrefixes()

    year = context.bika_setup.getYearInPrefix() and \
        DateTime().strftime("%Y")[2:] or ''
    separator = '-'
    for e in prefixes:
        if 'separator' not in e:
            e['separator'] = ''
        if e['portal_type'] == context.portal_type:
            separator = e['separator']

    # Analysis Request IDs
    if context.portal_type == "AnalysisRequest":
        sample = context.getSample()
        s_prefix = fn_normalize(sample.getSampleType().getPrefix())
        sample_padding = context.bika_setup.getSampleIDPadding()
        ar_padding = context.bika_setup.getARIDPadding()
        sample_id = sample.getId()
        sample_number = sample_id.split(s_prefix)[1]
        ar_number = sample.getLastARNumber()
        ar_number = ar_number and ar_number + 1 or 1
        return fn_normalize(
            ("%s%s" + separator + "R%s") %
            (s_prefix, str(sample_number).zfill(sample_padding),
             str(ar_number).zfill(ar_padding)))

    # Sample Partition IDs
    if context.portal_type == "SamplePartition":
        # We do not use prefixes.  There are actually codes that require the 'P'.
        # matches = [p for p in prefixes if p['portal_type'] == 'SamplePartition']
        # prefix = matches and matches[0]['prefix'] or 'samplepartition'
        # padding = int(matches and matches[0]['padding'] or '0')

        # at this time the part exists, so +1 would be 1 too many
        partnr = str(len(context.aq_parent.objectValues('SamplePartition')))
        # parent id is normalized already
        return ("%s" + separator + "P%s") % (context.aq_parent.id, partnr)

    if context.bika_setup.getExternalIDServer():

        # if using external server

        for d in prefixes:
            # Sample ID comes from SampleType
            if context.portal_type == "Sample":
                prefix = context.getSampleType().getPrefix()
                padding = context.bika_setup.getSampleIDPadding()
                new_id = str(
                    idserver_generate_id(context, "%s%s-" % (prefix, year)))
                if padding:
                    new_id = new_id.zfill(int(padding))
                return ('%s%s' + separator + '%s') % (prefix, year, new_id)
            elif d['portal_type'] == context.portal_type:
                prefix = d['prefix']
                padding = d['padding']
                new_id = str(
                    idserver_generate_id(context, "%s%s-" % (prefix, year)))
                if padding:
                    new_id = new_id.zfill(int(padding))
                return ('%s%s' + separator + '%s') % (prefix, year, new_id)
        # no prefix; use portal_type
        # year is not inserted here
        # portal_type is be normalized to lowercase
        npt = id_normalize(context.portal_type)
        new_id = str(idserver_generate_id(context, npt + "-"))
        return ('%s' + separator + '%s') % (npt, new_id)

    else:

        # No external id-server.

        def next_id(prefix):
            # normalize before anything
            prefix = fn_normalize(prefix)
            plone = context.portal_url.getPortalObject()
            # grab the first catalog we are indexed in.
            at = getToolByName(plone, 'archetype_tool')
            if context.portal_type in at.catalog_map:
                catalog_name = at.catalog_map[context.portal_type][0]
            else:
                catalog_name = 'portal_catalog'
            catalog = getToolByName(plone, catalog_name)

            # get all IDS that start with prefix
            # this must specifically exclude AR IDs (two -'s)
            rr = re.compile("^" + prefix + separator + "[\d+]+$")
            ids = [int(i.split(prefix+separator)[1]) \
                   for i in catalog.Indexes['id'].uniqueValues() \
                   if rr.match(i)]

            #plone_tool = getToolByName(context, 'plone_utils')
            #if not plone_tool.isIDAutoGenerated(l.id):
            ids.sort()
            _id = ids and ids[-1] or 0
            new_id = _id + 1
            return str(new_id)

        for d in prefixes:
            if context.portal_type == "Sample":
                # Special case for Sample IDs
                prefix = fn_normalize(context.getSampleType().getPrefix())
                padding = context.bika_setup.getSampleIDPadding()
                sequence_start = context.bika_setup.getSampleIDSequenceStart()
                new_id = next_id(prefix + year)
                # If sequence_start is greater than new_id. Set
                # sequence_start as new_id. (Jira LIMS-280)
                if sequence_start > int(new_id):
                    new_id = str(sequence_start)
                if padding:
                    new_id = new_id.zfill(int(padding))
                return ('%s%s' + separator + '%s') % (prefix, year, new_id)
            elif d['portal_type'] == context.portal_type:
                prefix = d['prefix']
                padding = d['padding']
                sequence_start = d.get("sequence_start", None)
                new_id = next_id(prefix + year)
                # Jira-tracker LIMS-280
                if sequence_start and int(sequence_start) > int(new_id):
                    new_id = str(sequence_start)
                if padding:
                    new_id = new_id.zfill(int(padding))
                return ('%s%s' + separator + '%s') % (prefix, year, new_id)

        # no prefix; use portal_type
        # no year inserted here
        # use "IID" normalizer, because we want portal_type to be lowercased.
        prefix = id_normalize(context.portal_type)
        new_id = next_id(prefix)
        return ('%s' + separator + '%s') % (prefix, new_id)