Exemple #1
0
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
#
##############################################################################

from zLOG import LOG, INFO, WARNING
from ZODB.POSException import ConflictError
from Products.CMFActivity.ActivityRuntimeEnvironment import \
  getActivityRuntimeEnvironment

try:
    from sklearn.externals.joblib import register_parallel_backend
    from sklearn.externals.joblib.parallel import ParallelBackendBase, parallel_backend
    from sklearn.externals.joblib.parallel import FallbackToBackend, SequentialBackend
    from sklearn.externals.joblib.hashing import hash as joblib_hash
except ImportError:
    LOG(__name__, WARNING, "Joblib cannot be imported, support disabled")
else:

    class JoblibResult(object):
        def __init__(self, result, callback):
            self.result = result
            self.callback = callback

        def get(self, timeout=None):
            result = self.result.result
            callback = self.callback
            if callback is not None:
                callback(result)
            return result

    class JoblibDispatch(object):
Exemple #2
0
 def _getWorklistActionList():
   is_anonymous = portal.portal_membership.isAnonymousUser()
   portal_catalog = portal.portal_catalog
   sql_catalog = portal_catalog.getSQLCatalog()
   catalog_security_uid_groups_columns_dict = \
     sql_catalog.getSQLCatalogSecurityUidGroupsColumnsDict()
   getSecurityUidDictAndRoleColumnDict = \
     portal_catalog.getSecurityUidDictAndRoleColumnDict
   search_result = getattr(self, "Base_getCountFromWorklistTable", None)
   use_cache = search_result is not None
   if use_cache:
     ignored_security_column_id_set = self._getWorklistIgnoredSecurityColumnSet()
     ignored_security_uid_parameter_set = set(x
       for x, y in catalog_security_uid_groups_columns_dict.iteritems()
       if y in ignored_security_column_id_set
     )
     _getSecurityUidDictAndRoleColumnDict = getSecurityUidDictAndRoleColumnDict
     def getSecurityUidDictAndRoleColumnDict(**kw):
       security_uid_dict, role_column_dict, local_role_column_dict = \
         _getSecurityUidDictAndRoleColumnDict(**kw)
       for ignored_security_column_id in ignored_security_column_id_set:
         role_column_dict.pop(ignored_security_column_id, None)
         local_role_column_dict.pop(ignored_security_column_id, None)
       for ignored_security_uid_parameter in \
           ignored_security_uid_parameter_set:
         security_uid_dict.pop(ignored_security_uid_parameter)
       return security_uid_dict, role_column_dict, local_role_column_dict
     select_expression_prefix = 'sum(`%s`) as %s' % (COUNT_COLUMN_TITLE, COUNT_COLUMN_TITLE)
     # Prevent catalog from trying to join
     getQuery = SimpleQuery
   else:
     search_result = portal_catalog.unrestrictedSearchResults
     select_expression_prefix = 'count(*) as %s' % (COUNT_COLUMN_TITLE, )
     # Let catalog join as needed
     getQuery = lambda comparison_operator=None, **kw: AutoQuery(
       operator=comparison_operator,
       **kw
     )
   worklist_result_dict = {}
   # Get a list of dict of WorklistVariableMatchDict grouped by compatible
   # conditions
   (worklist_list_grouped_by_condition, worklist_metadata) = \
     groupWorklistListByCondition(
       worklist_dict=worklist_dict,
       sql_catalog=sql_catalog,
       getSecurityUidDictAndRoleColumnDict=\
         getSecurityUidDictAndRoleColumnDict,
       catalog_security_uid_groups_columns_dict=\
         catalog_security_uid_groups_columns_dict,
     )
   if src__:
     action_list = []
   for grouped_worklist_dict in worklist_list_grouped_by_condition:
     # Generate the query for this worklist_list
     (total_criterion_id_list, query) = \
       getWorklistListQuery(
         getQuery=getQuery,
         grouped_worklist_dict=grouped_worklist_dict,
       )
     group_by_expression = ', '.join(total_criterion_id_list)
     assert COUNT_COLUMN_TITLE not in total_criterion_id_list
     # If required mapping method is not present on the query, assume it
     # handles column mapping properly, and build a bare select
     # expression.
     select_expression = select_expression_prefix + ', ' \
                         + group_by_expression
     catalog_brain_result = []
     try:
       catalog_brain_result = search_result(
                                   select_expression=select_expression,
                                   group_by_expression=group_by_expression,
                                   query=query,
                                   limit=None,
                                   src__=src__)
     except Unauthorized:
       if not is_anonymous:
         raise
       LOG('WorkflowTool.listActions', WARNING,
           'Exception while computing worklists: %s'
           % grouped_worklist_dict.keys(),
           error=sys.exc_info())
       continue
     except ProgrammingError, error_value:
       # 1146 = table does not exist
       if not use_cache or error_value[0] != 1146:
         raise
       try:
         self.Base_zCreateWorklistTable()
       except ProgrammingError, error_value:
         # 1050 = table exists (alarm run just a bit too late)
         if error_value[0] != 1050:
           raise
Exemple #3
0
    def getContentInformation(self):
        """Returns the information about the PDF document with pdfinfo.
    """
        if not self.hasData():
            return {}
        try:
            return self._content_information.copy()  # pylint: disable=access-member-before-definition
        except AttributeError:
            pass
        tmp = tempfile.NamedTemporaryFile()
        tmp.write(self.getData())
        tmp.seek(0)
        command_result = None
        try:

            # First, we use pdfinfo to get standard metadata
            command = ['pdfinfo', '-meta', '-box', tmp.name]
            try:
                command_result = Popen(command, stdout=PIPE).communicate()[0]
            except OSError, e:
                if e.errno == errno.ENOENT:
                    raise ConversionError('pdfinfo was not found')
                raise

            result = {}
            for line in command_result.splitlines():
                item_list = line.split(':')
                key = item_list[0].strip()
                value = ':'.join(item_list[1:]).strip()
                result[key] = value

            # Then we use PyPDF2 to get extra metadata
            try:
                from PyPDF2 import PdfFileReader
                from PyPDF2.utils import PdfReadError
            except ImportError:
                # if PyPDF2 not found, pass
                pass
            else:
                try:
                    pdf_file = PdfFileReader(tmp)
                    for info_key, info_value in (pdf_file.getDocumentInfo()
                                                 or {}).iteritems():
                        info_key = info_key.lstrip("/")
                        if isinstance(info_value, unicode):
                            info_value = info_value.encode("utf-8")

                        # Ignore values that cannot be pickled ( such as AAPL:Keywords )
                        try:
                            pickle.dumps(info_value)
                        except pickle.PicklingError:
                            LOG(
                                "PDFDocument.getContentInformation", INFO,
                                "Ignoring non picklable document info on %s: %s (%r)"
                                %
                                (self.getRelativeUrl(), info_key, info_value))
                        else:
                            result.setdefault(info_key, info_value)
                except (PdfReadError, AssertionError):
                    LOG("PDFDocument.getContentInformation", PROBLEM,
                      "PyPDF2 is Unable to read PDF, probably corrupted PDF here : %s" % \
                      (self.getRelativeUrl(),))
                except Exception:
                    # an exception of Exception class will be raised when the
                    # document is encrypted.
                    pass
Exemple #4
0
    def _mapColumns(self, column_table_map, table_usage_dict, column_name_set,
                    group, vote_result_dict):
        mapping_dict = {}
        catalog_table_name = self.catalog_table_name

        # Map all columns to tables decided by vote.
        for column_name, candidate_dict in vote_result_dict.iteritems():
            # candidate_dict is never empty
            max_score = 0
            for table_name, score in candidate_dict.iteritems():
                if score > max_score:
                    max_score = score
                    best_count = 0
                    best_choice = table_name
                elif score == max_score:
                    best_count += 1
            if best_count:
                LOG(
                    'ColumnMap', WARNING,
                    'Mapping vote led to a tie. Mapping to %r' %
                    (best_choice, ))
            if MAPPING_TRACE:
                LOG('ColumnMap', INFO,
                    'Mapping by vote %r to %r' % (column_name, best_choice))
            mapping_dict[column_name] = best_choice
            column_name_set.remove(column_name)
            for table_name, column_set in table_usage_dict.iteritems():
                if table_name != best_choice:
                    column_set.discard(column_name)

        # Map all remaning columns.
        def table_weight(a):
            """
        Compute each table weight.
      """
            if (group, a[0]) in self.table_alias_dict:
                result = (2, )
            elif a[0] == catalog_table_name:
                result = (1, )
            else:
                result = (0, len(a[1]))
            return result

        # Sort table name list, first has the most required columns
        weighted_table_list = sorted(table_usage_dict.iteritems(),
                                     key=table_weight)
        while len(weighted_table_list):
            table_name, column_set = weighted_table_list.pop()
            if len(column_set):
                common_column_set = column_name_set.intersection(column_set)
                if len(common_column_set):
                    # Only allow usage of this table if any of those is true:
                    # - current table is the catalog  (if any catalog was provided)
                    # - there are column used on that table which are already mapped
                    #   (this does not include columns mapped by this code)
                    #   If columns are mapped to this table in current group, then using
                    #   it will not require a new join, so it should be allowed.
                    #   Note: it would be good to take indexes into account when there
                    #   are multiple candidate tables.
                    # - any of those columns belongs exclusively to this table
                    #   Although the list of tables those columns belong to is known
                    #   earlier (in "build"), mapping them here
                    #   - avoids code duplication (registerTable, resolveColumn,
                    #     _addJoinTableForColumn)
                    #   - offers user to vote for an unknown table, overriding this
                    #     forced mapping.
                    use_allowed = table_name == catalog_table_name or \
                                  len(common_column_set) < len(column_set)
                    if not use_allowed:
                        for column_name in column_set:
                            if len(column_table_map.get(column_name, [])) == 1:
                                # There is no alternative, mark as required
                                use_allowed = True
                                break
                    if use_allowed:
                        for column_name in common_column_set:
                            if MAPPING_TRACE:
                                LOG('ColumnMap', INFO, 'Mapping by default %r to %r' % \
                                    (column_name, table_name))
                            mapping_dict[column_name] = table_name
                            # This column must not be resolved any longer
                            column_name_set.remove(column_name)
                            # Remove this column from sets containing it. This prevents from
                            # giving a high score to a table which columns would already have
                            # been mapped to another table.
                            for ignored, other_column_set in weighted_table_list:
                                other_column_set.discard(column_name)
                        weighted_table_list.sort(key=table_weight)
                    else:
                        # All column which are mappable on that table are to-be-mapped
                        # columns. This means that this table was not explicitely used, and
                        # as each table contain a different amount of lines, we should not
                        # join with any non-explicit table. Hence, we skip this mapping.
                        LOG('ColumnMap', INFO, 'Skipping possible map of %r on %r as that table' \
                            ' is not explicitely used.' % (common_column_set, table_name))

        # Detect incomplete mappings
        if len(column_name_set):
            raise ValueError, 'Could not map those columns: %r' % (
                column_name_set, )

        # Do the actual mapping
        for column_name, table_name in mapping_dict.iteritems():
            # Mark this column as resolved
            if MAPPING_TRACE:
                LOG(
                    'ColumnMap', INFO, 'Mapping column %s to table %s' %
                    (column_name, table_name))
            self.registerTable(table_name, group=group)
            self.resolveColumn(column_name, table_name, group=group)
            if table_name != catalog_table_name:
                self._addJoinTableForColumn(table_name, column_name, group)
Exemple #5
0
    def prepareContents(self, registry, register_subdirs=0):
        # Creates objects for each file.
        data = {}
        objects = []
        types = self._readTypesFile()
        for entry in _filtered_listdir(self._filepath):
            if not self._isAllowableFilename(entry):
                continue
            entry_minimal_fp = '/'.join((self._minimal_fp, entry))
            entry_filepath = path.join(self._filepath, entry)
            if path.isdir(entry_filepath):
                # Add a subdirectory only if it was previously registered,
                # unless register_subdirs is set.
                info = registry.getDirectoryInfo(entry_minimal_fp)
                if info is None and register_subdirs:
                    # Register unknown subdirs
                    registry.registerDirectoryByPath(entry_filepath)
                    info = registry.getDirectoryInfo(entry_minimal_fp)
                if info is not None:
                    mt = types.get(entry)
                    t = None
                    if mt is not None:
                        t = registry.getTypeByMetaType(mt)
                    if t is None:
                        t = DirectoryView
                    ob = t(entry, entry_minimal_fp)
                    ob_id = ob.getId()
                    data[ob_id] = ob
                    objects.append({'id': ob_id, 'meta_type': ob.meta_type})
            else:
                pos = entry.rfind('.')
                if pos >= 0:
                    name = entry[:pos]
                    ext = path.normcase(entry[pos + 1:])
                else:
                    name = entry
                    ext = ''
                if not name or name == 'REQUEST':
                    # Not an allowable id.
                    continue
                mo = bad_id(name)
                if mo is not None and mo != -1:  # Both re and regex formats
                    # Not an allowable id.
                    continue
                t = None
                mt = types.get(entry, None)
                if mt is None:
                    mt = types.get(name, None)
                if mt is not None:
                    t = registry.getTypeByMetaType(mt)
                if t is None:
                    t = registry.getTypeByExtension(ext)

                if t is not None:
                    metadata = FSMetadata(entry_filepath)
                    metadata.read()
                    try:
                        ob = t(name,
                               entry_minimal_fp,
                               fullname=entry,
                               properties=metadata.getProperties())
                    except:
                        import traceback
                        typ, val, tb = exc_info()
                        try:
                            exc_lines = traceback.format_exception(
                                typ, val, tb)
                            LOG('DirectoryView', ERROR, '\n'.join(exc_lines))

                            ob = BadFile(name,
                                         entry_minimal_fp,
                                         exc_str='\r\n'.join(exc_lines),
                                         fullname=entry)
                        finally:
                            tb = None  # Avoid leaking frame!

                    # FS-based security
                    permissions = metadata.getSecurity()
                    if permissions is not None:
                        for name in permissions.keys():
                            acquire, roles = permissions[name]
                            try:
                                ob.manage_permission(name, roles, acquire)
                            except ValueError:
                                LOG('DirectoryView',
                                    ERROR,
                                    'Error setting permissions',
                                    error=exc_info())

                    # only DTML Methods can have proxy roles
                    if hasattr(ob, '_proxy_roles'):
                        try:
                            ob._proxy_roles = tuple(metadata.getProxyRoles())
                        except:
                            LOG('DirectoryView',
                                ERROR,
                                'Error setting proxy role',
                                error=exc_info())

                    ob_id = ob.getId()
                    data[ob_id] = ob
                    objects.append({'id': ob_id, 'meta_type': ob.meta_type})

        return data, tuple(objects)
Exemple #6
0
  def _asXML(self):
    transaction = etree.Element('transaction', type="Sale Order")
    tiosafe_sync_list = self.getTioSafeSynchronizationObjectList(object_type='Product')
    erp5_sync_list = self.getERP5SynchronizationObjectList(object_type='Product')
    integration_site = self.getIntegrationSite()

    # marker for checking property existency
    MARKER = object()

    # specific value
    self.stop_date = self.start_date
    #self.reference = self.id
    # list of possible tags for a sale order
    tag_list = (
        'title', 'start_date', 'stop_date', 'reference', 'currency',
    )
    self._setTagList(self, transaction, tag_list)
    self._setTagList(self, transaction, ['category', ], SEPARATOR)
    # set arrow list
    try:
      self._setPaymentMode(transaction)
      self._setArrowTagList(self, transaction)
    except ValueError:
      # A mapping must be missing
      return None

    # order the movement list
    movement_list = []

    # build a list of 2-tuple
    # the first key contains the sort element
    # the second part of the tuple contains a dict which contains all the data
    # of the transaction line
    method_id = self.getPortalType().replace(' ', '')
    portal_type = self.getPortalType().replace(' ', '_').lower()

    module_id = "%s_module" %(portal_type)
    module = getattr(integration_site, module_id)


    getter_line_method = getattr(
        module,
        'get%sLineList' % (method_id,),
        MARKER,
    )
    if getter_line_method is not MARKER:
      # browse each transaction lines, build the sort element and set data
      parameter_kw = {'%s_id' % portal_type: str(self.getId()), }
      for line in getter_line_method(**parameter_kw):
        key_list = ['title', 'resource', 'reference', 'quantity', 'gross_price', 'vat', 'vat_price', 'net_price']
        value_list = [getattr(line, x, '') for x in key_list]
        movement_dict = {'context': line,}
        # set to None the '' value of the list
        for k, v in zip(key_list, value_list):
          movement_dict[k] = v or None

        # Retrieve the gid of the resource
        for tiosafe_sync in tiosafe_sync_list:
          try:
            brain_node = tiosafe_sync.getObjectFromId(line.product_id)
            resource_gid = brain_node.getGid()
            break
          except (ValueError, AttributeError):
            resource_gid = " Unknown"

        for erp5_sync in erp5_sync_list:
          try:
            resource = erp5_sync.getDocumentFromGid(b16encode(resource_gid))
            break
          except (ValueError, AttributeError):
            resource = None


        # after the work on the line set the resource value which will be
        # render in the xml
        movement_dict['resource'] = resource_gid

        # Work on vat
        if movement_dict['vat']:
          movement_dict['VAT'] = self.getVATCategory(movement_dict['vat'])

        if movement_dict['quantity'] is None:
          continue

        movement_dict['price'] = movement_dict['net_price']
       
        # build the element which allows to sort
        movement_list.append(movement_dict)
    
    # Add Discount
    if self.discount_price > 0:
      discount_gid = b16decode(erp5_sync.getGidFromObject(integration_site.getSourceCarrierValue()))
      discount_dict =  {'price': self.discount_price,
                        'quantity' : -1,
                        'title' : '%s' % (self.discount_title),
                        'reference' : '%s' % (self.discount_title),
                        'resource' : discount_gid,
                        'VAT' : self.getVATCategory(self.discount_tax_rate)
                        }

      movement_list.append(discount_dict)
    # Add delivery
    if self.delivery_price > 0:
      delivery_gid = b16decode(erp5_sync.getGidFromObject(integration_site.getDestinationCarrierValue()))
      delivery_dict =  {'price': self.delivery_price,
                        'quantity' : 1,
                        'title' : self.delivery_title,
                        'reference' : self.delivery_title,
                        'resource' : delivery_gid,
                        'VAT' : self.getVATCategory(self.delivery_tax_rate)
                        }

      movement_list.append(delivery_dict)
    
    def cmp_resource(a,b):
      return cmp(a['resource'], b['resource'])

    movement_list.sort(cmp=cmp_resource)

    # the second part build the XML of the transaction
    # browse the ordered movement list and build the movement list as a result
    # the xml through of the line data in the dict
    for movement_dict in movement_list:
      movement = etree.SubElement(transaction, 'movement')
      # set arrow list on the movement
      if movement_dict.get("context", None) is not None:
        self._setArrowTagList(movement_dict['context'], movement)
      # if exist the following tags in the line dict, add them in the xml
      tag_list = ('resource', 'title', 'reference', 'quantity', 'price', 'VAT')
      for tag in tag_list:
        if tag in movement_dict:
          if movement_dict[tag] is not None:
            element = etree.SubElement(movement, tag)
            if tag == "price":
              element.text = "%.6f" % (float(movement_dict.get(tag, 0.0)),)
            elif tag == "quantity":
              element.text = "%.2f" % (float(movement_dict.get(tag, 0.0)),)
            else:
              element.text = movement_dict[tag]
      # add the categories to the movement
      #for category_value in movement_dict['category']:
      for category_value in movement_dict.get('category', []):
        LOG("category_value %s" %(category_value), 300, "")
        category = etree.SubElement(movement, 'category')
        category.text = category_value
      
    xml = etree.tostring(transaction, pretty_print=True, encoding='utf-8')
    LOG("asXML returns transaction %s" %(xml,), 300, "")
    return xml
Exemple #7
0
def synchronizeDynamicModules(context, force=False):
    """
  Allow resetting all classes to ghost state, most likely done after
  adding and removing mixins on the fly

  Most of the time, this reset is only hypothetic:
  * with force=False, the reset is only done if another node resetted
    the classes since the last reset on this node.
  * with force=True, forcefully reset the classes on the current node
    and send out an invalidation to other nodes
  """
    portal = context.getPortalObject()

    global last_sync
    if force:
        # hard invalidation to force sync between nodes
        portal.newCacheCookie('dynamic_classes')
        last_sync = portal.getCacheCookie('dynamic_classes')
    else:
        cookie = portal.getCacheCookie('dynamic_classes')
        if cookie == last_sync:
            # up to date, nothing to do
            return
        last_sync = cookie

    import erp5
    with aq_method_lock:
        # Thanks to TransactionalResource, the '_bootstrapped' global variable
        # is updated in a transactional way. Without it, it would be required to
        # restart the instance if anything went wrong.
        # XXX: In fact, TransactionalResource does not solve anything here, because
        #      portal cookie is unlikely to change and this function will return
        #      immediately, forcing the user to restart.
        #      This may not be so bad after all: it enables the user to do easily
        #      some changes that are required for the migration.
        if portal.id not in _bootstrapped and \
           TransactionalResource.registerOnce(__name__, 'bootstrap', portal.id):
            migrate = False
            from Products.ERP5Type.Tool.PropertySheetTool import PropertySheetTool
            from Products.ERP5Type.Tool.TypesTool import TypesTool
            from Products.ERP5Type.Tool.ComponentTool import ComponentTool
            from Products.ERP5Catalog.Tool.ERP5CatalogTool import ERP5CatalogTool
            try:
                for tool_class in TypesTool, PropertySheetTool, ComponentTool, ERP5CatalogTool:
                    # if the instance has no property sheet tool, or incomplete
                    # property sheets, we need to import some data to bootstrap
                    # (only likely to happen on the first run ever)
                    tool_id = tool_class.id
                    tool = getattr(portal, tool_id, None)

                    if tool is None:
                        if tool_class == ERP5CatalogTool:
                            # Wait till we find that SQL Catalog Tool is installed
                            # Simpy said, we don't want  ERP5 Catalog Tool to be installed
                            # from here. So, we come to 2 cases:
                            # 1. Running ERP5Site with sql catalog_tool : In that case, we end up
                            # running _bootstrap from here, leading to migration.
                            # 2. New ERP5Site : In this case, we don't do anything here, cause
                            # the catalog_tool would be ERP5CatalogTool, so this would just pass.
                            continue
                        tool = tool_class()
                        portal._setObject(tool_id,
                                          tool,
                                          set_owner=False,
                                          suppress_events=True)
                        tool = getattr(portal, tool_id)
                    elif tool._isBootstrapRequired():
                        migrate = True
                    else:
                        continue
                    tool._bootstrap()
                    tool.__class__ = getattr(erp5.portal_type,
                                             tool.portal_type)
                # TODO: Create portal_activities here, and even before portal_types:
                #       - all code in ActiveObject could assume that it always exists
                #       - currently, some objects created very early are not indexed
                #         and this would fix this issue
                try:
                    portal.portal_activities.initialize()
                except AttributeError:
                    pass  # no Activity Tool yet

                if migrate:
                    portal.migrateToPortalTypeClass()
                    portal.portal_skins.changeSkin(None)
                    TransactionalResource(
                        tpc_finish=lambda txn: _bootstrapped.add(portal.id))
                    transaction.get().note('Site migrated')
                    LOG(
                        'ERP5Site', INFO,
                        'Transition successful, please update your'
                        ' business templates')
                else:
                    _bootstrapped.add(portal.id)
            except:
                # Required because the exception may be silently dropped by the caller.
                transaction.doom()
                LOG('ERP5Site',
                    PANIC,
                    "Automatic migration of core tools failed",
                    error=True)
                raise

        LOG("ERP5Type.dynamic", 0, "Resetting dynamic classes")
        try:
            for _, klass in inspect.getmembers(erp5.portal_type,
                                               inspect.isclass):
                # Zope Interface is implemented through __implements__,
                # __implemented__ (both implementedBy instances) and __provides__
                # (ClassProvides instance) attributes set on the class by
                # zope.interface.declarations.implementedByFallback.
                #
                # However both implementedBy and ClassProvides instances keep a
                # reference to the class itself, thus creating a circular references.
                for k in klass.mro():
                    module_name = k.__module__
                    if (module_name.startswith('erp5.') and
                            # Components are reset independently of Portal Types classes
                            not module_name.startswith('erp5.component.')):
                        for attr in ('__implements__', '__implemented__',
                                     '__provides__'):
                            if k.__dict__.get(attr) is not None:
                                delattr(k, attr)

                klass.restoreGhostState()

            # Clear accessor holders of ZODB Property Sheets and Portal Types
            erp5.accessor_holder.clear()
            erp5.accessor_holder.property_sheet.clear()

            for name in erp5.accessor_holder.portal_type.__dict__.keys():
                if name[0] != '_':
                    delattr(erp5.accessor_holder.portal_type, name)

        except Exception:
            # Allow easier debugging when the code is wrong as this
            # exception is catched later and re-raised as a BadRequest
            import traceback
            traceback.print_exc()
            raise

        # It's okay for classes to keep references to old methods - maybe.
        # but we absolutely positively need to clear the workflow chains
        # stored in WorkflowMethod objects: our generation of workflow
        # methods adds/registers/wraps existing methods, but does not
        # remove old chains. Do it now.
        resetRegisteredWorkflowMethod()

        # Some method generations are based on portal methods, and portal
        # methods cache results. So it is safer to invalidate the cache.
        cache_tool = getattr(portal, 'portal_caches', None)
        if cache_tool is not None:
            cache_tool.clearCache()

        # Clear Zope Component Registries (Zope Adapters/Utilities cache lookup)
        # because it contains references to reset dynamic classes (which prevents
        # them from being GC and may create inconsistencies when Interfaces have
        # been changed)
        import zope.component
        gsm = zope.component.getGlobalSiteManager()
        gsm.adapters.changed(gsm)
        gsm.utilities.changed(gsm)
Exemple #8
0
def initialize(context):
    # Import Product Components
    from Tool import (CacheTool, MemcachedTool, SessionTool, TypesTool,
                      WebServiceTool, PropertySheetTool, ComponentTool)
    import Document
    from Base import Base
    import XMLObject
    from ERP5Type import ERP5TypeInformation
    import CodingStyle
    # Define documents, classes, constructors and tools
    object_classes = ()
    content_constructors = ()
    content_classes = (Base, XMLObject.XMLObject, ERP5TypeInformation)
    portal_tools = (CacheTool.CacheTool, MemcachedTool.MemcachedTool,
                    SessionTool.SessionTool, TypesTool.TypesTool,
                    WebServiceTool.WebServiceTool,
                    PropertySheetTool.PropertySheetTool,
                    ComponentTool.ComponentTool)
    # Do initialization step
    initializeProduct(context,
                      this_module,
                      globals(),
                      document_module=Document,
                      document_classes=document_classes,
                      object_classes=object_classes,
                      portal_tools=portal_tools,
                      content_constructors=content_constructors,
                      content_classes=content_classes)

    # Register our Workflow factories directly (if on CMF 2)
    Products.ERP5Type.Workflow.registerAllWorkflowFactories(context)
    # We should register local constraints at some point
    from Products.ERP5Type.Utils import initializeLocalConstraintRegistry
    if DISPLAY_BOOT_PROCESS:
        LOG('ERP5Type.__init__', INFO, 'initializeLocalConstraintRegistry')
    initializeLocalConstraintRegistry()
    # We should register local property sheets at some point
    from Products.ERP5Type.Utils import initializeLocalPropertySheetRegistry
    if DISPLAY_BOOT_PROCESS:
        LOG('ERP5Type.__init__', INFO, 'initializeLocalPropertySheetRegistry')
    initializeLocalPropertySheetRegistry()
    # We should register product classes at some point
    from Products.ERP5Type.InitGenerator import initializeProductDocumentRegistry
    if DISPLAY_BOOT_PROCESS:
        LOG('ERP5Type.__init__', INFO, 'initializeProductDocumentRegistry')
    initializeProductDocumentRegistry()
    # We should register local classes at some point
    from Products.ERP5Type.Utils import initializeLocalDocumentRegistry
    if DISPLAY_BOOT_PROCESS:
        LOG('ERP5Type.__init__', INFO, 'initializeLocalDocumentRegistry')
    initializeLocalDocumentRegistry()
    # We can now setup global interactors
    from Products.ERP5Type.InitGenerator import initializeProductInteractorRegistry
    if DISPLAY_BOOT_PROCESS:
        LOG('ERP5Type.__init__', INFO, 'initializeProductInteractorRegistry')
    initializeProductInteractorRegistry()
    # And local interactors
    from Products.ERP5Type.Utils import initializeLocalInteractorRegistry
    if DISPLAY_BOOT_PROCESS:
        LOG('ERP5Type.__init__', INFO, 'initializeLocalInteractorRegistry')
    initializeLocalInteractorRegistry()
    # We can now install all interactors
    from Products.ERP5Type.InitGenerator import installInteractorClassRegistry
    if DISPLAY_BOOT_PROCESS:
        LOG('ERP5Type.__init__', INFO, 'installInteractorClassRegistry')
    installInteractorClassRegistry()
Exemple #9
0
                            object.manage_setLocalRoles(userid, ['Owner'])

        object.manage_afterAdd(object, self)
        return id

    def _delObject(self, id, dp=1):
        object = self._getOb(id)
        try:
            object.manage_beforeDelete(object, self)
        except BeforeDeleteException, ob:
            raise
        except ConflictError:
            raise
        except:
            LOG('Zope',
                ERROR,
                'manage_beforeDelete() threw',
                error=sys.exc_info())
        self._delOb(id)

    # Aliases for mapping-like access.
    __len__ = objectCount
    keys = objectIds
    values = objectValues
    items = objectItems

    # backward compatibility
    hasObject = has_key

    security.declareProtected(access_contents_information, 'get')

    def get(self, name, default=None):
Exemple #10
0
    def checkConflict(self, tag, document, previous_value, new_value, domain,
                      xml, signature):
        """
    Check conflict for each tag
    """
        if tag == "relation":
            return self._setRelation(document, previous_value, new_value,
                                     domain, xml, signature)
        else:
            if tag == "phone":
                current_value = document.get('default_telephone', None) and \
                                document.default_telephone.getTelephoneNumber("")
            elif tag == "cellphone":
                current_value = document.get('mobile_telephone', None) and \
                                document.mobile_telephone.getTelephoneNumber("")
            elif tag == "fax":
                current_value = document.get('default_fax', None) and \
                                document.default_fax.getTelephoneNumber("")
            elif tag == "birthday":
                current_value = str(document.getStartDate(""))
            elif tag == "email":
                current_value = str(document.getDefaultEmailText(""))
            else:
                try:
                    current_value = getattr(document, tag)
                except AttributeError:
                    current_value = None

            if current_value:
                current_value = current_value.encode('utf-8')
            if current_value not in [new_value, previous_value, None]:
                LOG(
                    "ERP5NodeConduit.checkConflict", ERROR,
                    "Generating a conflict for tag %s, current is %s, previous is %s, new is %s"
                    % (tag, current_value, previous_value, new_value))
                return [
                    self._generateConflict(document.getPhysicalPath(), tag,
                                           xml, current_value, new_value,
                                           signature),
                ]
            else:
                if new_value is None:
                    # We are deleting some properties
                    if tag == "fax":
                        if getattr(document, "default_fax", None):
                            document.manage_delObjects("default_fax")
                    elif tag == "phone":
                        if getattr(document, "default_telephone", None):
                            document.manage_delObjects("default_telephone")
                    elif tag == "cellphone":
                        if getattr(document, "mobile_telephone", None):
                            document.manage_delObjects("mobile_telephone")
                    elif tag == "email":
                        if getattr(document, "default_email", None):
                            document.manage_delObjects("default_email")
                    else:
                        kw = {tag: new_value}
                        self.editDocument(object=document, **kw)
                else:
                    if tag == 'birthday' and isinstance(new_value, str) \
                           and len(new_value):
                        new_value = DateTime(new_value)
                    kw = {tag: new_value}
                    self.editDocument(object=document, **kw)
            return []
Exemple #11
0
def fixSkinNames(self, REQUEST=None, file=None, dry_run=0):
    """
    Fix bad skin names.

    This method does:

      - Check all the contents of all skins.

      - Check immediate_view, constructor_path and actions in all portal types.

      - Check skins of all business templates.

      - Check actbox_url in transitions and worklists and scripts of all workflows.

      - Rename skins.
  """
    if REQUEST is None:
        REQUEST = get_request()

    if file is None:
        msg = 'You must put a CSV file inside the data directory, and specify %s/ERP5Site_fixSkinNames?file=NAME \n\n' % self.absolute_url(
        )
        msg += 'The template of a CSV file is available via %s/ERP5Site_checkSkinNames?csv=1 \n\n' % self.absolute_url(
        )
        msg += 'This does not modify anything by default. If you really want to fix skin names, specify %s/ERP5Site_fixSkinNames?file=NAME&dry_run=0 \n\n' % self.absolute_url(
        )
        return msg

    with open(os.path.join(data_dir, file)) as file:

        class NamingInformation:
            pass

        info_list = []
        reader = csv.reader(file)
        for row in reader:
            folder, name, new_name, meta_type = row[:4]
            if len(row) > 4 and len(row[4]) > 0:
                removed = 1
                new_name = row[4]
            else:
                removed = 0
            if meta_type == 'Meta Type': continue
            if name == new_name: continue
            # Check the existence of the skin and the meta type. Paranoid?
            #if self.portal_skins[folder][name].meta_type != meta_type:
            #  raise RuntimeError, '%s/%s has a different meta type' % (folder, name)
            info = NamingInformation()
            info.meta_type = meta_type
            info.folder = folder
            info.name = name
            info.new_name = new_name
            info.regexp = re.compile('\\b' + re.escape(name) +
                                     '\\b')  # This is used to search the name
            info.removed = removed
            info_list.append(info)

    # Now we have information enough. Check the skins.
    msg = ''
    path_list = getSkinPathList(self)
    for path in path_list:
        skin = self.portal_skins.restrictedTraverse(path)
        try:
            text = skin.manage_FTPget()
        except:
            type, value, traceback = sys.exc_info()
            line = 'WARNING: the skin %s could not be retrieved because of the exception %s: %s\n' % (
                path, str(type), str(value))
            LOG('fixSkinNames', 0, line)
            msg += '%s\n' % line
        else:
            name_list = []
            for info in info_list:
                if info.regexp.search(text) is not None:
                    text = info.regexp.sub(info.new_name, text)
                    name_list.append(info.name)
            if len(name_list) > 0:
                line = '%s is modified for %s' % ('portal_skins/' + path,
                                                  ', '.join(name_list))
                LOG('fixSkinNames', 0, line)
                msg += '%s\n' % line
                if not dry_run:
                    if skin.meta_type in fs_skin_spec:
                        with open(expandpath(skin.getObjectFSPath()),
                                  'w') as f:
                            f.write(text)
                    else:
                        REQUEST['BODY'] = text
                        skin.manage_FTPput(REQUEST, REQUEST.RESPONSE)

    # Check the portal types.
    for t in self.portal_types.objectValues():
        # Initial view name.
        text = t.immediate_view
        for info in info_list:
            if info.name == text:
                line = 'Initial view name of %s is modified for %s' % (
                    'portal_types/' + t.id, text)
                LOG('fixSkinNames', 0, line)
                msg += '%s\n' % line
                if not dry_run:
                    t.immediate_view = info.new_name
                break
        # Constructor path.
        text = getattr(t, 'constructor_path', None)
        if text is not None:
            for info in info_list:
                if info.name == text:
                    line = 'Constructor path of %s is modified for %s' % (
                        'portal_types/' + t.id, text)
                    LOG('fixSkinNames', 0, line)
                    msg += '%s\n' % line
                    if not dry_run:
                        t.constructor_path = info.new_name
                    break
        # Actions.
        for action in t.listActions():
            text = action.action.text
            for info in info_list:
                if info.regexp.search(text) is not None:
                    text = info.regexp.sub(info.new_name, text)
                    line = 'Action %s of %s is modified for %s' % (
                        action.getId(), 'portal_types/' + t.id, info.name)
                    LOG('fixSkinNames', 0, line)
                    msg += '%s\n' % line
                    if not dry_run:
                        action.action.text = text
                    break

    # Check the portal templates.
    template_tool = getattr(self, 'portal_templates', None)
    # Check the existence of template tool, because an older version of ERP5 does not have it.
    if template_tool is not None:
        for template in template_tool.contentValues(
                filter={'portal_type': 'Business Template'}):
            # Skins.
            skin_id_list = []
            name_list = []
            for skin_id in template.getTemplateSkinIdList():
                for info in info_list:
                    if info.name == skin_id:
                        name_list.append(skin_id)
                        skin_id = info.new_name
                        break
                skin_id_list.append(skin_id)
            if len(name_list) > 0:
                line = 'Skins of %s is modified for %s' % (
                    'portal_templates/' + template.getId(),
                    ', '.join(name_list))
                LOG('fixSkinNames', 0, line)
                msg += '%s\n' % line
                if not dry_run:
                    template.setTemplateSkinIdList(skin_id_list)
            # Paths.
            path_list = []
            name_list = []
            for path in template.getTemplatePathList():
                for info in info_list:
                    if info.regexp.search(path):
                        name_list.append(skin_id)
                        path = info.regexp.sub(info.new_name, path)
                        break
                path_list.append(path)
            if len(name_list) > 0:
                line = 'Paths of %s is modified for %s' % (
                    'portal_templates/' + template.getId(),
                    ', '.join(name_list))
                LOG('fixSkinNames', 0, line)
                msg += '%s\n' % line
                if not dry_run:
                    template.setTemplatePathList(path_list)

    # Workflows.
    for wf in self.portal_workflow.objectValues():
        # Transitions.
        for id in wf.transitions.objectIds():
            transition = wf.transitions._getOb(id)
            text = transition.actbox_url
            for info in info_list:
                if info.regexp.search(text) is not None:
                    text = info.regexp.sub(info.new_name, text)
                    line = 'Transition %s of %s is modified for %s' % (
                        id, 'portal_workflow/' + wf.id, info.name)
                    LOG('fixSkinNames', 0, line)
                    msg += '%s\n' % line
                    if not dry_run:
                        transition.actbox_url = text
                    break
        # Worklists.
        for id in wf.worklists.objectIds():
            worklist = wf.worklists._getOb(id)
            text = worklist.actbox_url
            for info in info_list:
                if info.regexp.search(text) is not None:
                    text = info.regexp.sub(info.new_name, text)
                    line = 'Worklist %s of %s is modified for %s' % (
                        id, 'portal_workflow/' + wf.id, info.name)
                    LOG('fixSkinNames', 0, line)
                    msg += '%s\n' % line
                    if not dry_run:
                        worklist.actbox_url = text
                    break
        # Scripts.
        for id in wf.scripts.objectIds():
            script = wf.scripts._getOb(id)
            text = script.manage_FTPget()
            name_list = []
            for info in info_list:
                if info.regexp.search(text) is not None:
                    text = info.regexp.sub(info.new_name, text)
                    name_list.append(info.name)
            if len(name_list) > 0:
                line = 'Script %s of %s is modified for %s' % (
                    id, 'portal_workflow/' + wf.id, ', '.join(name_list))
                LOG('fixSkinNames', 0, line)
                msg += '%s\n' % line
                if not dry_run:
                    REQUEST['BODY'] = text
                    script.manage_FTPput(REQUEST, REQUEST.RESPONSE)

    # Rename the skins.
    if not dry_run:
        for info in info_list:
            try:
                if info.meta_type in fs_skin_spec:
                    skin = self.portal_skins[info.folder][info.name]
                    old_path = expandpath(skin.getObjectFSPath())
                    new_path = info.regexp.sub(info.new_name, old_path)
                    if info.removed:
                        os.remove(old_path)
                    else:
                        os.rename(old_path, new_path)
                else:
                    folder = self.portal_skins[info.folder]
                    if info.removed:
                        folder.manage_delObjects([info.name])
                    else:
                        folder.manage_renameObjects([info.name],
                                                    [info.new_name])
            except:
                type, value, traceback = sys.exc_info()
                if info.removed:
                    line = 'WARNING: the skin %s could not be removed because of the exception %s: %s\n' % (
                        info.name, str(type), str(value))
                    LOG('fixSkinNames', 0, line)
                    msg += '%s\n' % line
                else:
                    line = 'WARNING: the skin %s could not be renamed to %s because of the exception %s: %s\n' % (
                        info.name, info.new_name, str(type), str(value))
                    LOG('fixSkinNames', 0, line)
                    msg += '%s\n' % line

    return msg
Exemple #12
0
    def _updateXupdateDel(self,
                          document=None,
                          xml=None,
                          previous_xml=None,
                          **kw):
        """ This method is called in updateNode and allows to remove elements. """
        if DEBUG:
            LOG(
                "ERP5NodeConduit._updateXupdateDel", INFO,
                "doc = %s, xml = %s" % (
                    document.getPath(),
                    etree.tostring(xml, pretty_print=1),
                ))
        conflict_list = []
        tag = xml.get('select').split('/')[-1]
        # this variable is used to retrieve the id of address and to not remove the
        # orginal tag (address, street, zip, city or country)
        keyword = {}

        # retrieve the previous xml etree through xpath
        xpath_expression = xml.get('select')
        selected_previous_xml = previous_xml.xpath(xpath_expression)
        try:
            previous_value = selected_previous_xml[0].text.encode('utf-8')
        except (IndexError, AttributeError):
            previous_value = None

        # specific work for address and address elements
        address_tag = tag.split('[')[0]
        if address_tag == "address":
            try:
                # work on the case: "/node/address[x]"
                address_index = int(tag.split('[')[-1].split(']')[0])
            except ValueError:
                # Work on the case: "/node/address"
                address_index = 1

            if address_index == 1:
                address_id = "default_address"
            else:
                # the XUPDATE begin by one, so one is default_address and the
                # first python index list is zero, so x-2
                address_index -= 2
                # address list of the person without default_address
                address_list = document.searchFolder(
                    portal_type='Address',
                    sort_on=(['id', 'ASC'], ),
                    where_expression='id != "default_address"',
                )
                address_id = address_list[address_index].getId()
            try:
                document.manage_delObjects(address_id)
            except (IndexError, BadRequest):
                conflict_list.append(
                    self._generateConflict(document.getPhysicalPath(),
                                           tag, xml, None, None,
                                           kw.get('signature')))
                return conflict_list

        elif address_tag in ADDRESS_TAG_LIST:
            return self.checkAddressConflict(document, address_tag, xml,
                                             previous_value, None,
                                             kw.get('signature'))
        else:
            return self.checkConflict(tag, document, previous_value, None,
                                      kw.get('domain'), xml,
                                      kw.get('signature'))

        return conflict_list
Exemple #13
0
def valueDefaultRenderer(value):
  LOG('OperatorBase', 0, 'Unhandled value class: %s (%r). Converted to string and escaped.' % (value.__class__.__name__, value))
  return escapeString(str(value))
Exemple #14
0
def WorkflowTool_refreshWorklistCache(self):
  """
    Refresh worklist cache table.

    - delete everything from that table
      - if it fails, create the table
    - insert new lines
      - if it fails, recrete the table and retry
  """
  # Contrary to WorkflowTool_listActions, related keys are NOT supported.
  Base_zInsertIntoWorklistTable = getattr(self, 'Base_zInsertIntoWorklistTable', None)
  if Base_zInsertIntoWorklistTable is not None:
    # XXX: Code below is duplicated from WorkflowTool_listActions
    info = self._getOAI(None)
    worklist_dict = {}
    wf_ids = self.getWorkflowIds()
    for wf_id in wf_ids:
      wf = self.getWorkflowById(wf_id)
      if wf is not None:
        a = wf.getWorklistVariableMatchDict(info, check_guard=False)
        if a is not None:
          worklist_dict[wf_id] = a
    # End of duplicated code
    if len(worklist_dict):
      Base_zClearWorklistTable = getattr(self, 'Base_zClearWorklistTable', None)
      if Base_zClearWorklistTable is None:
        LOG('WorkflowTool', 100, 'Base_zClearWorklistTable cannot be found. ' \
            'Falling back to former refresh method. Please update ' \
            'erp5_worklist_sql business template.')
        self.Base_zCreateWorklistTable()
      else:
        try:
          self.Base_zClearWorklistTable()
        except ProgrammingError, error_value:
          # 1146 = table does not exist
          if error_value[0] != 1146:
            raise
          self.Base_zCreateWorklistTable()
      portal_catalog = getToolByName(self, 'portal_catalog')
      search_result = portal_catalog.unrestrictedSearchResults
      sql_catalog = portal_catalog.getSQLCatalog()
      table_column_id_set = ImmutableSet(
          [COUNT_COLUMN_TITLE] + self.Base_getWorklistTableColumnIDList())
      security_column_id_list = list(
        sql_catalog.getSQLCatalogSecurityUidGroupsColumnsDict().values()) + \
        [x[1] for x in sql_catalog.getSQLCatalogRoleKeysList()] + \
        [x[1] for x in sql_catalog.getSQLCatalogLocalRoleKeysList()]
      security_column_id_set = set(security_column_id_list)
      assert len(security_column_id_set) == len(security_column_id_list), (
        security_column_id_set, security_column_id_list)
      del security_column_id_list
      security_column_id_set.difference_update(
        self._getWorklistIgnoredSecurityColumnSet())
      for security_column_id in security_column_id_set:
        assert security_column_id in table_column_id_set
      (worklist_list_grouped_by_condition, worklist_metadata) = \
        groupWorklistListByCondition(
          worklist_dict=worklist_dict,
          sql_catalog=sql_catalog)
      assert COUNT_COLUMN_TITLE in table_column_id_set
      for grouped_worklist_dict in worklist_list_grouped_by_condition:
        # Generate the query for this worklist_list
        (total_criterion_id_list, query) = \
          getWorklistListQuery(
            getQuery=SimpleQuery,
            grouped_worklist_dict=grouped_worklist_dict,
          )
        for criterion_id in total_criterion_id_list:
          assert criterion_id in table_column_id_set
        for security_column_id in security_column_id_set:
          assert security_column_id not in total_criterion_id_list
          total_criterion_id_list.append(security_column_id)
        group_by_expression = ', '.join(total_criterion_id_list)
        assert COUNT_COLUMN_TITLE not in total_criterion_id_list
        select_expression = 'count(*) as %s, %s' % (COUNT_COLUMN_TITLE,
                                                    group_by_expression)
        search_result_kw = {'select_expression': select_expression,
                            'group_by_expression': group_by_expression,
                            'query': query,
                            'limit': None}
        #LOG('refreshWorklistCache', WARNING, 'Using query: %s' % \
        #    (search_result(src__=1, **search_result_kw), ))
        catalog_brain_result = search_result(**search_result_kw)
        value_column_dict = dict([(x, []) for x in table_column_id_set])
        for catalog_brain_line in catalog_brain_result.dictionaries():
          for column_id, value in catalog_brain_line.iteritems():
            if column_id in value_column_dict:
              value_column_dict[column_id].append(value)
        if len(value_column_dict[COUNT_COLUMN_TITLE]):
          try:
            Base_zInsertIntoWorklistTable(**value_column_dict)
          except (ProgrammingError, OperationalError), error_value:
            # OperationalError 1054 = unknown column
            if isinstance(error_value, OperationalError) and error_value[0] != 1054:
              raise
            LOG('WorkflowTool', 100, 'Insertion in worklist cache table ' \
                'failed. Recreating table and retrying.',
                error=sys.exc_info())
            self.Base_zCreateWorklistTable()
            Base_zInsertIntoWorklistTable(**value_column_dict)
Exemple #15
0
    def render(self, field, key, value, REQUEST, render_prefix=None):
        """
    This is where most things happens
    """
        main_content = ""
        here = REQUEST['here']
        selection_name = field.get_value('selection_name')
        default_params = field.get_value('default_params')
        chart_title = field.get_value('chart_title')
        data_method = field.get_value('data_method')
        chart_style = field.get_value('chart_style')
        x_title = field.get_value('x_title')
        y_title = field.get_value('y_title')
        bg_transparent = field.get_value('bg_transparent')

        selection = here.portal_selections.getSelectionFor(selection_name,
                                                           REQUEST=REQUEST)
        LOG(
            'ZGDChart.render', 0, 'selection: %s, selection_name: %s' %
            (str(selection), str(selection_name)))

        # This is the default data, this is just in the case there is not method given
        data = {'chart_data': []}

        # Retrieve the data with the data_method
        if hasattr(here, data_method):
            LOG('ZGDChart.render', 0, 'found method')
            data_method = getattr(here, data_method)
            data['chart_data'] = data_method()

        data['chart_parameter'] = {
            'zgdchart_runtime_title': chart_title,
            'zgdchart_runtime_xtitle': x_title,
            'zgdchart_runtime_ytitle': y_title,
            'zgdchart_runtime_type': 'Line_3D',
            'zgdchart_runtime_bg_transparent': bg_transparent
        }

        # Creation selection if needed
        if selection is None:
            selection = Selection(selection_name, params=data)
        else:
            LOG('ZGDChart.render', 0, 'selection is not None')
            kw = {'params': data}
            selection.edit(**kw)

        here.portal_selections.setSelectionFor(selection_name,
                                               selection,
                                               REQUEST=REQUEST)

        if len(data['chart_data']) > 0:

            main_content = """\
<div class="ChartContent">
 <table border="0" cellpadding="0" cellspacing="0"">
  <tr>
   <td valign="middle" align="center" nowrap>
    <img src="%s" title="Chart" border="0" alt="img"/">
   </td>
  </tr>
 </table>
</div>""" % str(chart_style + '?selection_name=' + selection_name)

        return main_content
Exemple #16
0
    def prepareContents(self, registry, register_subdirs=0):
        # Creates objects for each file.
        fp = expandpath(self.filepath)
        data = {}
        objects = []
        types = self._readTypesFile()
        for entry in _filtered_listdir(fp):
            if not self._isAllowableFilename(entry):
                continue
            e_filepath = path.join(self.filepath, entry)
            e_fp = expandpath(e_filepath)
            if path.isdir(e_fp):
                # Add a subdirectory only if it was previously registered,
                # unless register_subdirs is set.
                info = registry.getDirectoryInfo(e_filepath)
                if info is None and register_subdirs:
                    # Register unknown subdirs
                    registry.registerDirectoryByPath(e_fp)
                    info = registry.getDirectoryInfo(e_filepath)
                if info is not None:
                    mt = types.get(entry)
                    t = None
                    if mt is not None:
                        t = registry.getTypeByMetaType(mt)
                    if t is None:
                        t = DirectoryView
                    ob = t(entry, e_filepath)
                    ob_id = ob.getId()
                    data[ob_id] = ob
                    objects.append({'id': ob_id, 'meta_type': ob.meta_type})
            else:
                pos = rfind(entry, '.')
                if pos >= 0:
                    name = entry[:pos]
                    ext = path.normcase(entry[pos + 1:])
                else:
                    name = entry
                    ext = ''
                if not name or name == 'REQUEST':
                    # Not an allowable id.
                    continue
                mo = bad_id(name)
                if mo is not None and mo != -1:  # Both re and regex formats
                    # Not an allowable id.
                    continue
                t = None
                mt = types.get(entry, None)
                if mt is None:
                    mt = types.get(name, None)
                if mt is not None:
                    t = registry.getTypeByMetaType(mt)
                if t is None:
                    t = registry.getTypeByExtension(ext)

                if t is not None:
                    properties = self._readProperties(e_fp + '.properties')
                    try:
                        ob = t(name,
                               e_filepath,
                               fullname=entry,
                               properties=properties)
                    except:
                        import traceback
                        typ, val, tb = exc_info()
                        try:
                            exc_lines = traceback.format_exception(
                                typ, val, tb)
                            LOG('DirectoryView', ERROR, join(exc_lines, '\n'))

                            ob = BadFile(name,
                                         e_filepath,
                                         exc_str=join(exc_lines, '\r\n'),
                                         fullname=entry)
                        finally:
                            tb = None  # Avoid leaking frame!

                    # FS-based security
                    try:
                        permissions = self._readSecurity(e_fp + '.security')
                        if permissions is not None:
                            for name in permissions.keys():
                                acquire, roles = permissions[name]
                                ob.manage_permission(name, roles, acquire)
                    except:
                        LOG('DirectoryView',
                            ERROR,
                            'Error setting permission from .security file information',
                            error=exc_info())

                    ob_id = ob.getId()
                    data[ob_id] = ob
                    objects.append({'id': ob_id, 'meta_type': ob.meta_type})

        return data, tuple(objects)
Exemple #17
0
    def asSQLExpression(self, sql_catalog, only_group_columns):
        column_map = self.column_map
        if column_map is None:
            # XXX: should we provide a way to register column map as a separate
            # method or do it here ?
            # Column Map was not built yet, do it.
            column_map = ColumnMap(
                catalog_table_name=self.catalog_table_name,
                left_join_list=self.left_join_list,
                inner_join_list=self.inner_join_list,
                implicit_join=self.implicit_join,
            )
            self.column_map = column_map
        if 1:
            column_map.registerTable(
                self.catalog_table_name,
                self.catalog_table_alias,
            )
            for extra_column in self.extra_column_list:
                column_map.registerColumn(extra_column)
            for column in self.group_by_list:
                column_map.registerColumn(column)
            for alias, column in self.select_dict.iteritems():
                if column is None:
                    column = alias
                else:
                    column_map.ignoreColumn(alias)
                column_map.registerColumn(column)
            for order_by in self.order_by_list:
                column_map.registerColumn(order_by[0])
            self.query.registerColumnMap(sql_catalog, column_map)
            column_map.build(sql_catalog)
            # Replace given group_by_list entries by their mapped representations.
            new_column_list = []
            append = new_column_list.append
            for column in self.group_by_list:
                try:
                    append(column_map.asSQLColumn(column))
                except KeyError:
                    LOG(
                        'EntireQuery', WARNING,
                        'Group-by column %r could not be mapped, but is passed through. This use is strongly discouraged.'
                        % (column, ))
                    append(column)
            self.group_by_list = new_column_list
            # Build a dictionnary from select_dict aliasing their mapped representations
            self.final_select_dict = select_dict = {}
            for alias, raw_column in self.select_dict.iteritems():
                if raw_column is None:
                    column = alias
                    if '.' in alias:
                        # If given column is pre-mapped, strip table name from its alias.
                        _, alias = alias.split('.')
                        alias = alias.strip('`()')
                else:
                    column = raw_column
                try:
                    rendered = column_map.asSQLColumn(column)
                except KeyError:
                    LOG(
                        'EntireQuery', WARNING,
                        'Select column %r could not be mapped, but is passed through. This use is strongly discouraged.'
                        % (column, ))
                    rendered = column
                select_dict[alias] = rendered
            # Replace given order_by_list entries by their mapped representations.
            new_order_by_list = []
            append = new_order_by_list.append
            for order_by in self.order_by_list:
                column = order_by[0]
                try:
                    rendered = column_map.asSQLColumn(column)
                except KeyError:
                    LOG(
                        'EntireQuery', WARNING,
                        'Order by %r ignored: it could not be mapped to a known column.'
                        % (order_by, ))
                    rendered = None
                if rendered is not None:
                    append((rendered, ) + tuple(order_by[1:]))
            self.order_by_list = new_order_by_list
            # generate SQLExpression from query
            sql_expression_list = [
                self.query.asSQLExpression(sql_catalog, column_map,
                                           only_group_columns)
            ]
            append = sql_expression_list.append
            for join_query in column_map.iterJoinQueryList():
                append(
                    join_query.asSQLExpression(sql_catalog, column_map,
                                               only_group_columns))
            # generate join expression based on column_map.getJoinTableAliasList
            # XXX: This is now done by ColumnMap to its table_definition,
            # during build()
            #
            # join_table_list = column_map.getJoinTableAliasList()
            # if len(join_table_list):
            #   # XXX: Is there any special rule to observe when joining tables ?
            #   # Maybe we could check which column is a primary key instead of
            #   # hardcoding "uid".
            #   where_pattern = '`%s`.`uid` = `%%s`.`uid`' % \
            #     (column_map.getCatalogTableAlias(), )
            #   # XXX: It would cleaner from completeness point of view to use column
            #   # mapper to render column, but makes code much more complex to just do
            #   # a simple text rendering. If there is any reason why we should have
            #   # those column in the mapper, then we should use the clean way.
            #   append(SQLExpression(self, where_expression=' AND '.join(
            #     where_pattern % (x, ) for x in join_table_list
            #   )))

            table_alias_dict = column_map.getTableAliasDict()
            from_expression = column_map.getTableDefinition()
            assert ((from_expression is None) !=
                    (table_alias_dict is None)), ("Got both a from_expression "
                                                  "and a table_alias_dict")
            self.sql_expression_list = sql_expression_list
            # TODO: wrap the table_alias_dict above into a TableDefinition as well,
            # even without a legacy_table_definition.
        return SQLExpression(self,
                             table_alias_dict=table_alias_dict,
                             from_expression=from_expression,
                             order_by_list=self.order_by_list,
                             group_by_list=self.group_by_list,
                             select_dict=self.final_select_dict,
                             limit=self.limit,
                             where_expression_operator='and',
                             sql_expression_list=self.sql_expression_list)
 def log_error(self, ex='', inst='', msg=""):
     LOG('collective.plonetruegallery', INFO,
         "%s adapter, gallery is %s\n%s\n%s\n%s" %
         (self.name, str(self.gallery), msg, ex, inst))
Exemple #19
0
def generatePortalTypeClass(site, portal_type_name):
    """
  Given a portal type, look up in Types Tool the corresponding
  Base Type object holding the definition of this portal type,
  and computes __bases__ and __dict__ for the class that will
  be created to represent this portal type

  Returns tuple with 4 items:
    - base_tuple: a tuple of classes to be used as __bases__
    - base_category_list: categories defined on the portal type
        (and portal type only: this excludes property sheets)
    - interface_list: list of zope interfaces the portal type implements
    - attribute dictionary: any additional attributes to put on the class
  """
    # LOG("ERP5Type.dynamic", INFO, "Loading portal type " + portal_type_name)

    global core_portal_type_class_dict

    portal_type_category_list = []
    attribute_dict = dict(portal_type=portal_type_name,
                          _categories=[],
                          constraints=[])

    if portal_type_name in core_portal_type_class_dict:
        if not core_portal_type_class_dict[portal_type_name]['generating']:
            # Loading the (full) outer portal type class
            core_portal_type_class_dict[portal_type_name]['generating'] = True
        else:
            # Loading the inner portal type class without any mixin,
            # interface or Property Sheet
            klass = _importFilesystemClass(
                document_class_registry.get(
                    core_portal_type_class_dict[portal_type_name]
                    ['type_class']))

            # LOG("ERP5Type.dynamic", INFO,
            #     "Loaded portal type %s (INNER)" % portal_type_name)

            # Don't do anything else, just allow to load fully the outer
            # portal type class
            return ((klass, ), [], [], attribute_dict)

    # Do not use __getitem__ (or _getOb) because portal_type may exist in a
    # type provider other than Types Tool.
    portal_type = getattr(site.portal_types, portal_type_name, None)

    type_class = None

    if portal_type is not None:
        # type_class has a compatibility getter that should return
        # something even if the field is not set (i.e. Base Type object
        # was not migrated yet). It only works if factory_method_id is set.
        type_class = portal_type.getTypeClass()

        # The Tools used to have 'Folder' or None as type_class instead of
        # 'NAME Tool', so make sure the type_class is correct
        #
        # NOTE: under discussion so might be removed later on
        if portal_type_name.endswith('Tool') and type_class in ('Folder',
                                                                None):
            type_class = portal_type_name.replace(' ', '')

        mixin_list = portal_type.getTypeMixinList()
        interface_list = portal_type.getTypeInterfaceList()
        portal_type_category_list = portal_type.getTypeBaseCategoryList()
        attribute_dict['_categories'] = portal_type_category_list[:]
        acquire_local_role = bool(portal_type.getTypeAcquireLocalRole())
    else:
        LOG(
            "ERP5Type.dynamic", WARNING,
            "Cannot find a portal type definition for '%s', trying to guess..."
            % portal_type_name)

    # But if neither factory_init_method_id nor type_class are set on
    # the portal type, we have to try to guess, for compatibility.
    # Moreover, some tools, such as 'Activity Tool', don't have any
    # portal type
    if type_class is None:
        if portal_type_name in core_portal_type_class_dict:
            # Only happen when portal_types is empty (e.g. when creating a
            # new ERP5Site)
            type_class = core_portal_type_class_dict[portal_type_name][
                'type_class']
        else:
            # Try to figure out a coresponding document class from the
            # document side.  This can happen when calling newTempAmount for
            # instance:
            #  Amount has no corresponding Base Type and will never have one
            #  But the semantic of newTempXXX requires us to create an
            #  object using the Amount Document, so we promptly do it:
            type_class = portal_type_name.replace(' ', '')

        mixin_list = []
        interface_list = []
        acquire_local_role = True

    if type_class is None:
        raise AttributeError('Document class is not defined on Portal Type ' + \
                               portal_type_name)

    klass = None
    if '.' in type_class:
        type_class_path = type_class
    else:
        type_class_path = None

        # Skip any document within ERP5Type Product as it is needed for
        # bootstrapping anyway
        type_class_namespace = document_class_registry.get(type_class, '')
        if not (type_class_namespace.startswith('Products.ERP5Type')
                or portal_type_name in core_portal_type_class_dict):
            if portal_type_name.endswith('Tool'):
                import erp5.component.tool
                klass = _importComponentClass(erp5.component.tool, type_class)

            # Tool Component was introduced recently and some Tool have already been
            # migrated as Document Component
            if klass is None:
                import erp5.component.document
                klass = _importComponentClass(erp5.component.document,
                                              type_class)

        if klass is None:
            type_class_path = document_class_registry.get(type_class)
            if type_class_path is None:
                raise AttributeError(
                    'Document class %s has not been registered:'
                    ' cannot import it as base of Portal Type %s' %
                    (type_class, portal_type_name))

    if klass is None:
        try:
            klass = _importFilesystemClass(type_class_path)
        except ImportError:
            error_msg = 'Could not import %s of Portal Type %s' % (
                type_class, portal_type_name)

            LOG("ERP5Type.Dynamic", WARNING, error_msg, error=True)
            raise AttributeError(error_msg)

    global property_sheet_generating_portal_type_set

    accessor_holder_list = []

    if portal_type_name not in property_sheet_generating_portal_type_set:
        # LOG("ERP5Type.dynamic", INFO,
        #     "Filling accessor holder list for portal_type " + portal_type_name)

        property_sheet_generating_portal_type_set.add(portal_type_name)
        try:
            # Initialize ZODB Property Sheets accessor holders
            accessor_holder_list = createAllAccessorHolderList(
                site, portal_type_name, portal_type, klass)

            base_category_set = set(attribute_dict['_categories'])
            for accessor_holder in accessor_holder_list:
                base_category_set.update(accessor_holder._categories)
                attribute_dict['constraints'].extend(
                    accessor_holder.constraints)

            attribute_dict['_categories'] = list(base_category_set)
        finally:
            property_sheet_generating_portal_type_set.remove(portal_type_name)

    # LOG("ERP5Type.dynamic", INFO,
    #     "Filled accessor holder list for portal_type %s (%s)" % \
    #     (portal_type_name, accessor_holder_list))

    mixin_class_list = []
    if mixin_list:
        # Only one Mixin class per ZODB Component (!= FS) where module_name ==
        # class_name, name ending with 'Mixin'.
        #
        # Rationale: same as Document/Interface; consistent naming; avoid a
        # registry like there used to be with FS.
        import erp5.component.mixin
        for mixin in mixin_list:
            mixin_class = _importComponentClass(erp5.component.mixin, mixin)
            if mixin_class is None:
                mixin_class = _importFilesystemClass(
                    mixin_class_registry[mixin])

            mixin_class_list.append(mixin_class)

    base_class_list = [klass] + accessor_holder_list + mixin_class_list + [
        # _getAcquireLocalRoles is accessed by security machinery, so it needs to
        # be fast: make it a ConstantGetter while we have access to portal_type
        # configuration.
        ACQUIRE_LOCAL_ROLE_GETTER_DICT[acquire_local_role],
    ]

    interface_class_list = []
    if interface_list:
        # Filesystem Interfaces may have defined several Interfaces in one file
        # but only *one* Interface per ZODB Component where module_name ==
        # class_name, name starting with 'I'.
        #
        # Rationale: same as Document/Mixin; consistent naming; avoid a registry
        # like there used to be for Mixin or importing all class in
        # Products.ERP5Type.interfaces.__init__.py.
        import erp5.component.interface
        from Products.ERP5Type import interfaces as filesystem_interfaces
        for interface in interface_list:
            interface_class = _importComponentClass(erp5.component.interface,
                                                    interface)
            if interface_class is None:
                interface_class = getattr(filesystem_interfaces, interface)

            interface_class_list.append(interface_class)

    if portal_type_name in core_portal_type_class_dict:
        core_portal_type_class_dict[portal_type_name]['generating'] = False

    attribute_dict['_restricted_setter_set'] = {
        method
        for ancestor in base_class_list
        for permissions in getattr(ancestor, '__ac_permissions__', ())
        if permissions[0] not in ('Access contents information',
                                  'Modify portal content')
        for method in permissions[1] if method.startswith('set')
    }

    attribute_dict['_restricted_getter_set'] = {
        method
        for ancestor in base_class_list
        for permissions in getattr(ancestor, '__ac_permissions__', ())
        if permissions[0] not in ('Access contents information', )
        for method in permissions[1] if method.startswith('get')
    }

    #LOG("ERP5Type.dynamic", INFO,
    #    "Portal type %s loaded with bases %s" \
    #        % (portal_type_name, repr(base_class_list)))

    return (tuple(base_class_list), portal_type_category_list,
            interface_class_list, attribute_dict)
Exemple #20
0
def DA__call__(self, REQUEST=None, __ick__=None, src__=0, test__=0, **kw):
    """Call the database method

    The arguments to the method should be passed via keyword
    arguments, or in a single mapping object. If no arguments are
    given, and if the method was invoked through the Web, then the
    method will try to acquire and use the Web REQUEST object as
    the argument mapping.

    The returned value is a sequence of record objects.
    """
    __traceback_supplement__ = (SQLMethodTracebackSupplement, self)

    c = kw.pop("connection_id", None)
    #if c is not None:
      #LOG("DA", 300, "connection %s provided to %s" %(c, self.id))
    # patch: dynamic brain configuration
    zsql_brain = kw.pop('zsql_brain', None)
    # patch end


    if REQUEST is None:
        if kw: REQUEST=kw
        else:
            if hasattr(self, 'REQUEST'): REQUEST=self.REQUEST
            else: REQUEST={}

    # Patch to implement dynamic connection id
    # Connection id is retrieve from user preference
    if c is None:
      # XXX cleaner solution will be needed
      if not (self.connection_id in ('cmf_activity_sql_connection',
                                     'erp5_sql_transactionless_connection')
              or 'portal_catalog' in self.getPhysicalPath()):
        portal = self.getPortalObject()
        if 'portal_archives' in portal.__dict__:
          archive_id = portal.portal_preferences.getPreferredArchive()
          if archive_id:
            archive_id = archive_id.split('/')[-1]
            #LOG("DA__call__, archive_id 2", 300, archive_id)
            archive = portal.portal_archives._getOb(archive_id, None)
            if archive is not None:
              c = archive.getConnectionId()
              #LOG("DA call", INFO, "retrieved connection %s from preference" %(c,))

    if c is None:
      # connection hook
      c = self.connection_id
      # for backwards compatability
      hk = self.connection_hook
      # go get the connection hook and call it
      if hk: c = getattr(self, hk)()
    #LOG("DA__call__ connection", 300, c)
    try: dbc=getattr(self, c)
    except AttributeError:
        raise AttributeError(
            "The database connection <em>%s</em> cannot be found." % (
            c))

    try: DB__=dbc()
    except: raise DatabaseError(
        '%s is not connected to a database' % self.id)

    p = aq_parent(self) # None if no aq_parent

    argdata=self._argdata(REQUEST)
    argdata['sql_delimiter']='\0'
    argdata['sql_quote__']=dbc.sql_quote__

    security=getSecurityManager()
    security.addContext(self)
    try:
        query = str2bytes(self.template(p, **argdata))
    except TypeError as msg:
        msg = str(msg)
        if 'client' in msg:
            raise NameError("'client' may not be used as an "
                    "argument name in this context")
        raise
    finally:
        security.removeContext(self)

    if src__: return query

    if self.cache_time_ > 0 and self.max_cache_ > 0:
        result=self._cached_result(DB__, query, self.max_rows_, c)
    else:
      try:
#         if 'portal_ids' in query:
#           LOG("DA query", INFO, "query = %s" %(query,))
        result=DB__.query(query, self.max_rows_)
      except:
        LOG("DA call raise", ERROR, "DB = %s, c = %s, query = %s" %(DB__, c, query), error=True)
        raise

    # patch: dynamic brain configuration
    if zsql_brain is not None:
        try:
          class_file_, class_name_ = zsql_brain.rsplit('.', 1)
        except:
          #import pdb; pdb.post_mortem()
          raise
        brain = getBrain(class_file_, class_name_)
        # XXX remove this logging for performance
        LOG(__name__, INFO, "Using special brain: %r\n" % (brain,))
    else:
        brain = getBrain(self.class_file_, self.class_name_)

    if type(result) is type(''):
        f=BytesIO()
        f.write(result)
        f.seek(0)
        result=RDB.File(f,brain,p)
    else:
        result=Results(result, brain, p)
    columns=result._searchable_result_columns()
    if test__ and columns != self._col: self._col=columns

    # If run in test mode, return both the query and results so
    # that the template doesn't have to be rendered twice!
    if test__: return query, result

    return result
Exemple #21
0
def _write( response, tool, message, level=INFO ):

    LOG( tool, level, message )
    if response is not None:
        response.write( message )
Exemple #22
0
    def backupObject(self,
                     trashbin,
                     container_path,
                     object_id,
                     save,
                     keep_subobjects=False):
        """
      Backup an object in a trash bin

    """
        #     LOG('Trash : backup object', 0, str((container_path, object_id)))
        if save:
            # recreate path of the backup object if necessary
            backup_object_container = trashbin
            for path in container_path:
                if 'portal' in path:
                    path += '_items'
                if path not in backup_object_container.objectIds():
                    if not hasattr(aq_base(backup_object_container),
                                   "newContent"):
                        backup_object_container.manage_addFolder(id=path, )
                        backup_object_container = backup_object_container._getOb(
                            path)
                    else:
                        backup_object_container = backup_object_container.newContent(
                            portal_type='Trash Folder',
                            id=path,
                            is_indexable=0)
                        backup_object_container.edit(isHidden=1)
                else:
                    backup_object_container = backup_object_container._getOb(
                        path)
            # backup the object
            # here we choose export/import to copy because cut/paste
            # do too many things and check for what we want to do
            object_path = container_path + [object_id]
            obj = self.unrestrictedTraverse(object_path, None)
            if obj is not None:
                connection = obj._p_jar
                o = obj
                while connection is None:
                    o = o.aq_parent
                    connection = o._p_jar
                if obj._p_oid is None:
                    LOG("Trash Tool backupObject", WARNING,
                        "Trying to backup uncommitted object %s" % object_path)
                    return {}
                if isinstance(obj, Broken):
                    LOG("Trash Tool backupObject", WARNING,
                        "Can't backup broken object %s" % object_path)
                    klass = obj.__class__
                    if klass.__module__[:27] in ('Products.ERP5Type.Document.',
                                                 'erp5.portal_type'):
                        # meta_type is required so that a broken object
                        # can be removed properly from a BTreeFolder2
                        # (unfortunately, we can only guess it)
                        klass.meta_type = 'ERP5' + re.subn(
                            '(?=[A-Z])', ' ', klass.__name__)[0]
                    return {}
                copy = connection.exportFile(obj._p_oid)
                # import object in trash
                connection = backup_object_container._p_jar
                o = backup_object_container
                while connection is None:
                    o = o.aq_parent
                    connection = o._p_jar
                copy.seek(0)
                try:
                    backup = connection.importFile(copy)
                    backup.isIndexable = ConstantGetter('isIndexable',
                                                        value=False)
                    # the isIndexable setting above avoids the recursion of
                    # manage_afterAdd on
                    # Products.ERP5Type.CopySupport.CopySupport.manage_afterAdd()
                    # but not on event subscribers, so we need to suppress_events,
                    # otherwise subobjects will be reindexed
                    backup_object_container._setObject(object_id,
                                                       backup,
                                                       suppress_events=True)
                except (AttributeError, ImportError):
                    # XXX we can go here due to formulator because attribute
                    # field_added doesn't not exists on parent if it is a Trash
                    # Folder and not a Form, or a module for the old object is
                    # already removed, and we cannot backup the object
                    LOG("Trash Tool backupObject", WARNING,
                        "Can't backup object %s" % object_path)
                    return {}

        subobjects_dict = {}

        if not keep_subobjects:
            # export subobjects
            if save:
                obj = backup_object_container._getOb(object_id, None)
            else:
                object_path = container_path + [object_id]
                obj = self.unrestrictedTraverse(object_path, None)
            if obj is not None:
                for subobject_id in list(obj.objectIds()):
                    subobject = obj[subobject_id]
                    subobjects_dict[
                        subobject_id] = subobject._p_jar.exportFile(
                            subobject._p_oid, StringIO())

                    if save:  # remove subobjecs from backup object
                        obj._delObject(subobject_id)
                        if subobject_id in obj.objectIds():
                            LOG('Products.ERP5.Tool.TrashTool', WARNING,
                                'Cleaning corrupted BTreeFolder2 object at %r.' % \
                                                                     (subobject.getRelativeUrl(),))
                            obj._cleanup()
        return subobjects_dict
Exemple #23
0
    def build(self, sql_catalog):
        join_query_to_build_list = []
        catalog_table_name = self.catalog_table_name
        if catalog_table_name is None:
            return

        column_table_map = sql_catalog.getColumnMap()
        table_vote_method_list = [
            getattr(sql_catalog, x)
            for x in sql_catalog.sql_catalog_table_vote_scripts
        ]

        # Generate missing joins from default group (this is required to allow using related keys outside of queries: order_by, sort_on, ...)
        column_set = self.registry.get(DEFAULT_GROUP_ID, [])
        for column_name in column_set:
            if column_name not in column_table_map and column_name not in self.related_key_dict:
                related_key_definition = sql_catalog.getRelatedKeyDefinition(
                    column_name)
                if related_key_definition is not None:
                    join_query = sql_catalog.getSearchKey(
                        column_name, 'RelatedKey').buildQuery(
                            sql_catalog=sql_catalog,
                            related_key_definition=related_key_definition)
                    join_query.registerColumnMap(sql_catalog, self)
                    join_query_to_build_list.append(join_query)

        # List all possible tables, with all used column for each
        for group, column_set in self.registry.iteritems():
            # unique needed column name set
            column_name_set = set()
            # table -> column_set, including alternatives
            table_usage_dict = {}

            for column_name in column_set:
                if column_name == '*' or column_name in self.column_ignore_set:
                    continue
                table_name_list = column_table_map.get(column_name, [])
                if len(table_name_list) == 0:
                    if not (group is DEFAULT_GROUP_ID
                            and column_name in self.related_key_dict):
                        LOG('ColumnMap', WARNING,
                            'Not a known column name: %r' % (column_name, ))
                    continue
                column_map_key = (group, column_name)
                if column_map_key in self.column_map:
                    # Column is already mapped, so we must count this column as being available only on that table. Its mapping will not change, and it will impact table schema choice.
                    table_name = self.column_map[column_map_key]
                    assert table_name in table_name_list, '%r not in %r' % (
                        table_name, table_name_list)
                    table_name_list = [table_name]
                else:
                    # Mark this column as requiring to be mapped.
                    column_name_set.add(column_name)
                for table_name in table_name_list:
                    table_usage_dict.setdefault(table_name,
                                                set()).add(column_name)
            # XXX: mutable datatypes are provided to vote method. if it modifies
            # them, it can introduce mapping bugs. Copying them might be costly,
            # especialy if done before each call, since they also contain mutable
            # types.
            # XXX: the API of vote methods is not stable yet. Parameters should
            # always be passed and expected by name, to make it less painful to
            # change API.
            # XXX: there is no check that the table voted for contains mapped
            # column. It is up to the user not to do stupid things.
            vote_result_dict = {}
            simple_query_dict = self.simple_query_dict[group]
            for table_vote_method in table_vote_method_list:
                vote_dict = table_vote_method(
                    column_name_set=column_name_set,
                    simple_query_dict=simple_query_dict,
                    table_usage_dict=table_usage_dict,
                    group=group)
                if isinstance(vote_dict, dict):
                    for column, table in vote_dict.iteritems():
                        if column in column_name_set:
                            column_vote_dict = vote_result_dict.setdefault(
                                column, {})
                            column_vote_dict[table] = column_vote_dict.get(
                                table, 0) + 1
                        else:
                            LOG('ColumnMap', WARNING, 'Vote script %r voted for a ' \
                                'non-candidate column: %r, candidates are: %r. Ignored.' %
                                (table_vote_method, column, column_name_set))
                else:
                    LOG('ColumnMap', WARNING, 'Vote script %r returned invalid data: %r. ' \
                        'Ignored.' % (table_vote_method, vote_dict))
            self._mapColumns(column_table_map, table_usage_dict,
                             column_name_set, group, vote_result_dict)

        table_alias_number_dict = {}

        for (group, table_name), alias in self.table_alias_dict.iteritems():
            if alias is None:
                if group in self.related_group_dict:
                    alias_table_name = 'related_%s_%s' % (
                        self.related_group_dict[group], table_name)
                else:
                    alias_table_name = table_name
                table_alias_number = table_alias_number_dict.get(
                    alias_table_name, 0)
                while True:
                    if table_alias_number == 0:
                        alias = alias_table_name
                    else:
                        alias = '%s_%s' % (alias_table_name,
                                           table_alias_number)
                    table_alias_number += 1
                    if alias not in self.table_map:
                        break
                table_alias_number_dict[alias_table_name] = table_alias_number
            self.resolveTable(table_name, alias, group=group)

        # now that we have all aliases, calculate missing joins comming from
        # non-RelatedKey relationships (like full_text).
        self.registerCatalog()
        self._calculateMissingJoins()
        # and all left joins that did not come from explicit queries
        # (i.e. joins comming from 'sort_on', 'select_dict', etc.)
        for join_query in join_query_to_build_list:
            # XXX ugly use of inner attribute of join_query. Please Refactor:
            # search_keys don't actually return SQLExpressions, but they add
            # join definitions in the column_map
            join_query.search_key.buildSQLExpression(
                sql_catalog=sql_catalog,
                column_map=self,
                only_group_columns=False,
                group=join_query.group,
            )
        if MAPPING_TRACE:
            # Key: group
            # Value: 2-tuple
            #  dict
            #   Key: column
            #   Value: table name
            #  dict
            #   Key: table name
            #   Value: table alias
            summary_dict = {}
            for (group, column), table_name in self.column_map.iteritems():
                column_dict = summary_dict.setdefault(group, ({}, {}))[0]
                assert column not in column_dict, '%r in %r' % (column,
                                                                column_dict)
                column_dict[column] = table_name
            for (group,
                 table_name), table_alias in self.table_alias_dict.iteritems():
                table_dict = summary_dict.setdefault(group, ({}, {}))[1]
                assert table_name not in table_dict, '%r in %r' % (table_name,
                                                                   table_dict)
                table_dict[table_name] = table_alias
            for group, (column_dict, table_dict) in summary_dict.iteritems():
                LOG('ColumnMap', INFO, 'Group %r:' % (group, ))
                LOG('ColumnMap', INFO, ' Columns:')
                for column, table_name in column_dict.iteritems():
                    LOG('ColumnMap', INFO,
                        '  %r from table %r' % (column, table_name))
                LOG('ColumnMap', INFO, ' Tables:')
                for table_name, table_alias in table_dict.iteritems():
                    LOG('ColumnMap', INFO,
                        '  %r as %r' % (table_name, table_alias))
Exemple #24
0
  def getTemplateField(self, cache=True):
    """
    Return template field of the proxy field.
    """
    if cache is True:
      tales = self.tales
      if self._p_oid is None or tales['field_id'] or tales['form_id']:
        cache = False
      else:
        try:
          return self._getTemplateFieldCache()
        except KeyError:
          pass

    portal = self.getPortalObject()
    portal_skins = portal.portal_skins
    form = self.aq_parent
    object = form.aq_parent

    form_id = self.get_value('form_id')
    proxy_field = None
    form_id_with_skin_folder_name_flag = False
    if '/' in form_id:
      # If a / is in the form_id, it means that skin_folder is explicitly
      # defined. If so, prevent acquisition to get the form.
      form_id_with_skin_folder_name_flag = True
      proxy_form = aq_base(portal_skins).unrestrictedTraverse(form_id, None)
      if proxy_form is not None:
        proxy_form = portal_skins.unrestrictedTraverse(form_id)
    else:
      proxy_form = getattr(object, form_id, None)

    if (proxy_form is not None):
      field_id = self.get_value('field_id')
      proxy_field = proxy_form._getOb(field_id, None)
      if proxy_field is None:
        if form_id_with_skin_folder_name_flag is False:
          # Try to get the field from another field library with a lower
          # priority.
          # This should return no field if the skin folder name is defined in
          # form_id.
          skin_info = SKINDATA.get(get_ident())

          if skin_info is not None:
            skin_selection_name, ignore, resolve = skin_info

            selection_dict = portal_skins._getSelections()
            candidate_folder_id_list = selection_dict[skin_selection_name].split(',')

            for candidate_folder_id in candidate_folder_id_list:
              candidate_folder = portal_skins._getOb(candidate_folder_id, None)
              if candidate_folder is not None:
                proxy_form = candidate_folder._getOb(form_id, None)
                if proxy_form is not None:
                  proxy_field = proxy_form._getOb(field_id, None)
                  if proxy_field is not None:
                    break

    if proxy_field is None:
      LOG('ProxyField', WARNING, 
          'Could not get a field from a proxy field %s in %s' % \
              (self.id, object.id))
    if cache is True:
      self._setTemplateFieldCache(proxy_field)
    return proxy_field
Exemple #25
0
 def Type(self):
     """ Deprecated. Use Title(). """
     LOG('CMFCore.TypesTool', WARNING,
         'TypeInformation.Type() is deprecated, use Title().')
     return self.Title()
Exemple #26
0
    def _checkConsistency(self, fixit=False, **kw):
        error_list = [
            self._createConstraintMessage('Roles should imported and created')
        ]
        if fixit:
            business_configuration = self.getBusinessConfigurationValue()
            object_list = business_configuration.ConfigurationTemplate_readOOCalcFile(
                self.filename)
            portal = self.getPortalObject()

            portal_type_dict = {}
            # we may pass some override dynamic values from outside
            # Example:we post 'group_id' and in column we have it then
            # it will be replaced with value if not configuration file matters
            dynamic_values = dict(
                group_id=getattr(aq_base(self), 'group_id', None),
                function_id=getattr(aq_base(self), 'function_id', None),
                site_id=getattr(aq_base(self), 'site_id', None),
            )
            for oo_module_dict in object_list:
                mod_conf_list = []
                portal_type = oo_module_dict.pop('portal_type')
                for category, role_list_string in oo_module_dict.items():
                    # passed from outside (it has higher priority than configuration file)
                    category = dynamic_values.get(category, category)
                    title = category.replace('/', '_')
                    role_name_list = [
                        x.strip() for x in role_list_string.split(';')
                    ]
                    role_category_list = [category]
                    conf_dict = {
                        'title': title,
                        'description': 'Configured by ERP5 Configurator',
                        'role_name_list': role_name_list,
                        'role_category_list': role_category_list
                    }

                    mod_conf_list.append(conf_dict)
                portal_type_dict[portal_type] = mod_conf_list
            ## Update fake site
            # XXX rafael: improve this, the ignore list is quite ugly.
            ignore_list = []
            portal_type_id_list = portal.portal_types.objectIds()
            for portal_type, role_list in portal_type_dict.items():
                for role_dict in role_list:
                    if portal_type in portal_type_id_list:
                        portal.portal_types[portal_type].newContent(portal_type='Role Information', \
                                                                    **role_dict)
                    else:
                        ignore_list.append(portal_type)
                        LOG("CONFIGURATOR", INFO,
                            "Fail to define Roles for %s" % portal_type)

            ## Update BT5

            bt5_obj = business_configuration.getSpecialiseValue()
            # keep existing roles definition (from previous configuration saves)
            for existing_type in bt5_obj.getTemplatePortalTypeRoleList():
                portal_type_dict[existing_type] = 1
            bt5_obj.edit(template_portal_type_role_list=[
                i for i in portal_type_dict.keys() if i not in ignore_list
            ])

        return error_list