def __call__(self, token=None, catch_errors=False, email=None, types=[], sf_object_id=None): """ Perform the synchronization. """ # Protection: # Make sure the user has Manager access or has provided the sync key. if not getSecurityManager().checkPermission('Manage portal', self.context): registry = getUtility(IRegistry) sync_key = registry.get('collective.salesforce.content.sync_key') if not sync_key or token != sync_key: raise Unauthorized # become a superuser user = UnrestrictedUser('manager', '', ['Manager'], []) newSecurityManager(None, user) try: logger.info('Syncing picklists for %s' % self.context.Title()) PicklistsFromSalesforce(self.context).queryObjects() logger.info('Syncing Salesforce objects for %s' % self.context.Title()) # Loop through the FTIs that include the ISalesforceObject behavior. for fti in self.getDexterityTypes(): if types and not fti.__name__ in types: continue if ISalesforceObject.__identifier__ in fti.behaviors: query = self.getQueryFromType(fti, sf_object_id=sf_object_id) logger.debug('SOQL: %s' % query) if query: results = self.getResults(query) if results: self.syncPloneObjects(fti, results, sf_object_id=sf_object_id) except: # If the catch_errors flag is set, we try to handle the error # gracefully. This is mostly useful when using sf_sync # with Cron4Plone, which will try again if it gets an error. catch_errors = self.request.get('catch_errors', catch_errors) email = self.request.get('email', email) if catch_errors: message = traceback.format_exc() logger.error(message) if email: # If we have an e-mail address, we try to send # the traceback to that address. MailHost = getToolByName(self.context, 'MailHost') subject = u'Salesforce Sync Failure: %s' % self.context.Title() sender = getUtility(ISiteRoot).email_from_address or email try: MailHost.secureSend(message, email, sender, subject=subject, subtype='plain', charset='utf-8') except: pass else: raise return 'Successfully ran sync.'
def getResults(self, query): """ Get the results for this query from Salesforce. """ sfbc = getToolByName(self.context, 'portal_salesforcebaseconnector') results = sfbc.query(query) size = results['size'] logger.debug('%s records found.' % size) for result in results: yield result while not results['done']: results = sfbc.queryMore(results['queryLocator']) logger.debug('Retrieved %s of %s records from Salesforce.' % (len(results), size)) for result in results: yield result
def __call__(self, catch_errors=False, email=None, types=[], sf_object_id=None): """ Perform the synchronization. """ try: logger.info('Syncing Salesforce objects for %s' % self.context.Title()) # Loop through the FTIs that include the ISalesforceObject behavior. for fti in self.getDexterityTypes(): if types and not fti.__name__ in types: continue if ISalesforceObject.__identifier__ in fti.behaviors: query = self.getQueryFromType(fti, sf_object_id=sf_object_id) logger.debug('SOQL: %s' % query) if query: results = self.getResults(query) if results: self.syncPloneObjects(fti, results, sf_object_id=sf_object_id) except: # If the catch_errors flag is set, we try to handle the error # gracefully. This is mostly useful when using sf_sync # with Cron4Plone, which will try again if it gets an error. catch_errors = self.request.get('catch_errors', catch_errors) email = self.request.get('email', email) if catch_errors: message = traceback.format_exc() logger.error(message) if email: # If we have an e-mail address, we try to send # the traceback to that address. MailHost = getToolByName(self.context, 'MailHost') subject = u'Salesforce Sync Failure: %s' % self.context.Title() sender = getUtility(ISiteRoot).email_from_address or email try: MailHost.secureSend(message, email, sender, subject=subject, subtype='plain', charset='utf-8') except: pass else: raise
def queryFromSchema(schema, relationship_name=None, add_prefix=True, sf_object_id=None): """ Given a schema tagged with Salesforce values, generate a query to return all the records for objects of that type. """ sf_object = schema.queryTaggedValue('salesforce.object', None) sf_criteria = schema.queryTaggedValue('salesforce.criteria', None) sf_fields = schema.queryTaggedValue('salesforce.fields', {}) sf_relationships = schema.queryTaggedValue('salesforce.relationships', {}) sf_subqueries = schema.queryTaggedValue('salesforce.subqueries', {}) if sf_object and (sf_fields or sf_relationships or sf_subqueries): if add_prefix: prefix = '%s.' % sf_object else: prefix = '' selects = ['%sId' % prefix] for schema_field_name in schema: if schema_field_name in sf_subqueries: # Has a custom subquery, which takes precedence prevent_dupe(selects, sf_subqueries[schema_field_name]) elif schema_field_name in sf_fields.keys(): # Has both sf:field and sf:relationship if schema_field_name in sf_relationships.keys(): prevent_dupe(selects, '(SELECT %s FROM %s%s)' % ( sf_fields[schema_field_name], prefix, sf_relationships[schema_field_name], )) # Has sf:field but not sf:relationship else: prevent_dupe(selects, '%s%s' % ( prefix, sf_fields[schema_field_name], )) # Has sf:relationship but not sf:field elif schema_field_name in sf_relationships.keys(): field = schema[schema_field_name] # Zope field is an collection whose value_type is IObject: # build subquery based on the object schema if ICollection.providedBy(field) and IObject.providedBy(field.value_type): subquery = queryFromSchema( field.value_type.schema, relationship_name = sf_relationships[schema_field_name], add_prefix = False) prevent_dupe(selects, '(%s)' % subquery) # Otherwise not supported else: raise ValueError('sf:relationship may only be specified without ' 'sf:field if the field is a zope.schema.Object.') # Construct the main query. query = "SELECT %s FROM %s" % ( ', '.join(selects), relationship_name or sf_object ) if sf_criteria: query += " WHERE %s" % sf_criteria if sf_object_id is not None: query += " AND Id='%s'" % sf_object_id elif sf_object_id is not None: query += " WHERE Id='%s'" % sf_object_id logger.debug(query) return query return None
def syncPloneObjects(self, fti, records, sf_object_id=None): """ Given the results from Salesforce, update or create the appropriate Plone objects. """ time_start = time.time() schema = fti.lookupSchema() catalog = getToolByName(self.context, 'portal_catalog') query = { 'object_provides': schema.__identifier__, } sfid_map = dict([(b.sf_object_id, b) for b in catalog.searchResults(query) \ if b.sf_object_id]) objects_updated_count = 0 for i, record in enumerate(records): digest = sha1(str(record)).digest() if record.Id in sfid_map.keys(): sfobj = ISalesforceObject(sfid_map[record.Id].getObject()) del sfid_map[record.Id] # skip updating items that haven't changed, based on the digest if digest == sfobj.sf_data_digest: continue sfobj.updatePloneObject(record) else: obj = createObject(fti.factory) notify(ObjectCreatedEvent(obj)) sfobj = ISalesforceObject(obj) sfobj.updatePloneObject(record) sfobj.addToContainer() objects_updated_count += 1 sfobj.sf_data_digest = digest # Trigger ObjectModifiedEvent to reindex the object. # We mark it so that handlers can avoid taking action when # objects are updated in this way (such as a handler that # writes back to Salesforce). event = ObjectModifiedEvent(sfobj.context) alsoProvides(event, IModifiedViaSalesforceSync) notify(event) # Send an UpdatedFromSalesforce event. notify(UpdatedFromSalesforceEvent(sfobj.context)) # Commit periodically. if not objects_updated_count % 10: try: transaction.commit() logger.debug('Committed updates (%s)' % i) except ConflictError: # if there was a conflict subsequent commits will fail; # so explicitly start a new transaction logger.exception('Conflict on updates (%s)' % i) transaction.begin() # Send NotFoundInSalesforce events for objects that weren't # returned by the Salesforce query. # We skip this if an sf_object_id was supplied, because that means # we intentionally didn't find all of the objects. if sf_object_id is None: for i, item in enumerate(sfid_map.items()): sf_id, brain = item notify(NotFoundInSalesforceEvent(brain.getObject())) # Commit periodically. if not i % 10: transaction.commit() time_elapsed = time.time() - time_start logger.debug('Sync completed in %s seconds. Have a nice day.' % time_elapsed)
def syncPloneObjects(self, fti, records, sf_object_id=None, ignore_no_container=None): """ Given the results from Salesforce, update or create the appropriate Plone objects. """ time_start = time.time() schema = fti.lookupSchema() catalog = getToolByName(self.context, 'portal_catalog') query = { 'object_provides': schema.__identifier__, } sfid_map = dict([(b.sf_object_id, b) for b in catalog.searchResults(query) \ if b.sf_object_id]) objects_updated_count = 0 for i, record in enumerate(records): digest = sha1(str(record)).digest() if record.Id in sfid_map.keys(): sfobj = ISalesforceObject(sfid_map[record.Id].getObject()) del sfid_map[record.Id] # skip updating items that haven't changed, based on the digest if digest == sfobj.sf_data_digest: continue sfobj.updatePloneObject(record) else: obj = createObject(fti.factory) notify(ObjectCreatedEvent(obj)) sfobj = ISalesforceObject(obj) sfobj.updatePloneObject(record) try: sfobj.addToContainer() except ValueError, e: if ignore_no_container: pass else: raise e objects_updated_count += 1 sfobj.sf_data_digest = digest # Trigger ObjectModifiedEvent to reindex the object. # We mark it so that handlers can avoid taking action when # objects are updated in this way (such as a handler that # writes back to Salesforce). event = ObjectModifiedEvent(sfobj.context) alsoProvides(event, IModifiedViaSalesforceSync) notify(event) # Send an UpdatedFromSalesforce event. notify(UpdatedFromSalesforceEvent(sfobj.context)) # Commit periodically. if not objects_updated_count % 10: try: transaction.commit() logger.debug('Committed updates (%s)' % i) except ConflictError: # if there was a conflict subsequent commits will fail; # so explicitly start a new transaction logger.exception('Conflict on updates (%s)' % i) transaction.begin()