class test_integration_high_volume(unittest.TestCase): def setUp(self): self.connection = Salesforce( instance_url=os.environ["INSTANCE_URL"], session_id=os.environ["ACCESS_TOKEN"], version="46.0", ) def tearDown(self): # We have to run this repeatedly to stick within the DML Rows limit. for i in range(10): self.connection.restful( "tooling/executeAnonymous", {"anonymousBody": "delete [SELECT Id FROM Lead LIMIT 10000];"}, ) def test_loads_and_extracts_high_data_volume(self): # This is a single unit test rather than multiple to save on execution time. records = [] for i in range(100000): records.append({ "Id": "00Q000000{:06d}".format(i), "Company": "[not provided]", "LastName": "Lead {:06d}".format(i), }) op = amaxa.LoadOperation(Connection(self.connection)) op.file_store = MockFileStore() op.file_store.records["Lead"] = records op.add_step(amaxa.LoadStep("Lead", set(["LastName", "Company"]))) op.initialize() op.execute() self.assertEqual( 100000, self.connection.query("SELECT count() FROM Lead").get("totalSize")) oc = amaxa.ExtractOperation(Connection(self.connection)) oc.file_store = MockFileStore() extraction = amaxa.ExtractionStep("Lead", amaxa.ExtractionScope.ALL_RECORDS, ["Id", "LastName"]) oc.add_step(extraction) extraction.initialize() extraction.execute() self.assertEqual(100000, len(oc.get_extracted_ids("Lead")))
class test_integration_high_volume(unittest.TestCase): def setUp(self): self.connection = Salesforce(instance_url=os.environ['INSTANCE_URL'], session_id=os.environ['ACCESS_TOKEN']) def tearDown(self): # We have to run this repeatedly to stick within the DML Rows limit. for i in range(10): self.connection.restful( 'tooling/executeAnonymous', {'anonymousBody': 'delete [SELECT Id FROM Lead LIMIT 10000];'}) def test_loads_and_extracts_high_data_volume(self): # This is a single unit test rather than multiple to save on execution time. records = [] for i in range(100000): records.append({ 'Id': '00Q000000{:06d}'.format(i), 'Company': '[not provided]', 'LastName': 'Lead {:06d}'.format(i) }) op = amaxa.LoadOperation(self.connection) op.file_store = MockFileStore() op.file_store.records['Lead'] = records op.add_step(amaxa.LoadStep('Lead', set(['LastName', 'Company']))) op.initialize() op.execute() self.assertEqual( 100000, self.connection.query('SELECT count() FROM Lead').get('totalSize')) oc = amaxa.ExtractOperation(self.connection) oc.file_store = MockFileStore() extraction = amaxa.ExtractionStep('Lead', amaxa.ExtractionScope.ALL_RECORDS, ['Id', 'LastName']) oc.add_step(extraction) extraction.initialize() extraction.execute() self.assertEqual(100000, len(oc.get_extracted_ids('Lead')))
class test_end_to_end(unittest.TestCase): def setUp(self): self.connection = Salesforce(instance_url=os.environ['INSTANCE_URL'], session_id=os.environ['ACCESS_TOKEN']) def tearDown(self): self.connection.restful( 'tooling/executeAnonymous', { 'anonymousBody': 'delete [SELECT Id FROM Lead]; delete [SELECT Id FROM Product2]; delete [SELECT Id FROM Campaign];' }) def test_extracts_from_command_line(self): contact_mock = io.StringIO() account_mock = io.StringIO() account_mock.close = Mock() contact_mock.close = Mock() expected_account_names = {'Picon Fleet Headquarters'} expected_contact_names = {'Admiral'} def select_file(f, *args, **kwargs): credentials = ''' version: 1 credentials: access-token: '{}' instance-url: '{}' '''.format(os.environ['ACCESS_TOKEN'], os.environ['INSTANCE_URL']) extraction = ''' version: 1 operation: - sobject: Account fields: - Name - Id - ParentId extract: query: "Name = 'Picon Fleet Headquarters'" - sobject: Contact fields: - FirstName - LastName - AccountId extract: descendents: True ''' m = None if f == 'credentials.yaml': m = unittest.mock.mock_open(read_data=credentials)(f, *args, **kwargs) m.name = f elif f == 'extraction.yaml': m = unittest.mock.mock_open(read_data=extraction)(f, *args, **kwargs) m.name = f elif f == 'Account.csv': m = account_mock elif f == 'Contact.csv': m = contact_mock return m m = Mock(side_effect=select_file) with unittest.mock.patch('builtins.open', m): with unittest.mock.patch( 'sys.argv', ['amaxa', '-c', 'credentials.yaml', 'extraction.yaml']): return_value = main() self.assertEqual(0, return_value) account_mock.seek(0) account_reader = csv.DictReader(account_mock) for row in account_reader: self.assertIn(row['Name'], expected_account_names) expected_account_names.remove(row['Name']) self.assertEqual(0, len(expected_account_names)) self.assertEqual(set(['Id', 'Name', 'ParentId']), set(account_reader.fieldnames)) contact_mock.seek(0) contact_reader = csv.DictReader(contact_mock) for row in contact_reader: self.assertIn(row['FirstName'], expected_contact_names) expected_contact_names.remove(row['FirstName']) self.assertEqual(0, len(expected_contact_names)) self.assertEqual(set(['FirstName', 'LastName', 'AccountId', 'Id']), set(contact_reader.fieldnames)) def test_loads_from_command_line(self): # To avoid conflict with extract tests, we load Campaigns, Campaign Members, and Leads. campaigns = io.StringIO(''' Id,Name,IsActive,ParentId 701000000000001,Tauron Tourist Outreach,true, 701000000000002,Aerilon Outreach,true,701000000000001 701000000000003AAA,Caprica City Direct Mailer,false,701000000000001 '''.strip()) leads = io.StringIO(''' Id,Company,LastName 00Q000000000001,Picon Fleet Headquarters,Nagata 00Q000000000002,Picon Fleet Headquarters,Adama 00Q000000000003,Ha-La-Tha,Guatrau 00Q000000000004,[not provided],Thrace '''.strip()) campaign_members = io.StringIO(''' Id,CampaignId,LeadId,Status 00v000000000001,701000000000001,00Q000000000001,Sent 00v000000000002,701000000000002,00Q000000000002,Sent 00v000000000003,701000000000003,00Q000000000004,Sent 00v000000000004,701000000000001,00Q000000000004,Sent '''.strip()) def select_file(f, *args, **kwargs): credentials = ''' version: 1 credentials: access-token: '{}' instance-url: '{}' '''.format(os.environ['ACCESS_TOKEN'], os.environ['INSTANCE_URL']) load = ''' version: 1 operation: - sobject: Campaign fields: - Name - ParentId - IsActive - sobject: Lead fields: - LastName - Company - sobject: CampaignMember fields: - LeadId - CampaignId - Status ''' m = None if f == 'credentials.yaml': m = unittest.mock.mock_open(read_data=credentials)(f, *args, **kwargs) m.name = f elif f == 'load.yaml': m = unittest.mock.mock_open(read_data=load)(f, *args, **kwargs) m.name = f elif f == 'Campaign.csv': m = campaigns elif f == 'Lead.csv': m = leads elif f == 'CampaignMember.csv': m = campaign_members else: m = unittest.mock.mock_open()(f, *args, **kwargs) return m m = Mock(side_effect=select_file) with unittest.mock.patch('builtins.open', m): with unittest.mock.patch( 'sys.argv', ['amaxa', '-c', 'credentials.yaml', '--load', 'load.yaml']): return_value = main() self.assertEqual(0, return_value) loaded_campaigns = self.connection.query_all( 'SELECT Name, IsActive, (SELECT Name FROM ChildCampaigns) FROM Campaign' ).get('records') self.assertEqual(3, len(loaded_campaigns)) required_names = { 'Tauron Tourist Outreach', 'Aerilon Outreach', 'Caprica City Direct Mailer' } for r in loaded_campaigns: self.assertIn(r['Name'], required_names) required_names.remove(r['Name']) if r['Name'] == 'Tauron Tourist Outreach': self.assertEqual(2, len(r['ChildCampaigns']['records'])) self.assertEqual(0, len(required_names)) loaded_leads = self.connection.query_all( 'SELECT LastName, Company, (SELECT Name FROM CampaignMembers) FROM Lead' ).get('records') self.assertEqual(4, len(loaded_leads)) required_names = {'Nagata', 'Adama', 'Guatrau', 'Thrace'} for r in loaded_leads: self.assertIn(r['LastName'], required_names) required_names.remove(r['LastName']) if r['LastName'] == 'Nagata': self.assertEqual(1, len(r['CampaignMembers']['records'])) elif r['LastName'] == 'Thrace': self.assertEqual(2, len(r['CampaignMembers']['records'])) if r['LastName'] == 'Adama': self.assertEqual(1, len(r['CampaignMembers']['records'])) self.assertEqual(0, len(required_names)) loaded_campaign_members = self.connection.query_all( 'SELECT Id FROM CampaignMember').get('records') self.assertEqual(4, len(loaded_campaign_members))
def run_tests(): username = os.environ.get('SF_USERNAME') password = os.environ.get('SF_PASSWORD') serverurl = os.environ.get('SF_SERVERURL') test_name_match = os.environ.get('APEX_TEST_NAME_MATCH', '%_TEST') test_name_exclude = os.environ.get('APEX_TEST_NAME_EXCLUDE', '') namespace = os.environ.get('NAMESPACE', None) poll_interval = int(os.environ.get('POLL_INTERVAL', 10)) debug = os.environ.get('DEBUG_TESTS',False) in ['true','True'] debug_logdir = os.environ.get('DEBUG_LOGDIR') json_output = os.environ.get('TEST_JSON_OUTPUT', None) junit_output = os.environ.get('TEST_JUNIT_OUTPUT', None) if namespace: namespace = "'{0}'".format(namespace,) else: namespace = 'null' sandbox = False if serverurl.find('test.salesforce.com') != -1: sandbox = True sf = Salesforce(username=username, password=password, security_token='', sandbox=sandbox, sf_version='32.0') # Change base_url to use the tooling api sf.base_url = sf.base_url + 'tooling/' # Split test_name_match by commas to allow multiple class name matching options where_name = [] for pattern in test_name_match.split(','): if pattern: where_name.append("Name LIKE '{0}'".format(pattern)) # Add any excludes to the where clause where_exclude = [] for pattern in test_name_exclude.split(','): if pattern: where_exclude.append("(NOT Name LIKE '{0}')".format(pattern,)) # Get all test classes for namespace query = "SELECT Id, Name FROM ApexClass WHERE NamespacePrefix = {0}".format(namespace,) if where_name: query += " AND ({0})".format(' OR '.join(where_name),) if where_exclude: query += " AND {0}".format(' AND '.join(where_exclude),) print "Running Query: {0}".format(query,) sys.stdout.flush() res = sf.query_all(query) print "Found {0} classes".format(res['totalSize'],) sys.stdout.flush() if not res['totalSize']: return {'Pass': 0, 'Fail': 0, 'CompileFail': 0, 'Skip': 0} classes_by_id = {} classes_by_name = {} trace_id = None results_by_class_name = {} classes_by_log_id = {} logs_by_class_id = {} for cls in res['records']: classes_by_id[cls['Id']] = cls['Name'] classes_by_name[cls['Name']] = cls['Id'] results_by_class_name[cls['Name']] = {} # If debug is turned on, setup debug traces for all test classes if debug: print 'Setting up trace flag to capture debug logs' # Get the User's id to set a TraceFlag res_user = sf.query("Select Id from User where Username = '******'".format(username,)) user_id = res_user['records'][0]['Id'] # Set up a simple-salesforce sobject for TraceFlag using the tooling api TraceFlag = sf.TraceFlag TraceFlag.base_url = (u'https://{instance}/services/data/v{sf_version}/tooling/sobjects/{object_name}/' .format(instance=sf.sf_instance, object_name='TraceFlag', sf_version=sf.sf_version)) # First, delete any old trace flags still lying around tf_res = sf.query('Select Id from TraceFlag') if tf_res['totalSize']: for tf in tf_res['records']: TraceFlag.delete(tf['Id']) expiration = datetime.datetime.now() + datetime.timedelta(1) res = TraceFlag.create({ 'ApexCode': 'Info', 'ApexProfiling': 'Debug', 'Callout': 'Info', 'Database': 'Info', 'ExpirationDate': expiration.isoformat(), #'ScopeId': user_id, 'System': 'Info', 'TracedEntityId': user_id, 'Validation': 'Info', 'Visualforce': 'Info', 'Workflow': 'Info', }) trace_id = res['id'] print 'Created TraceFlag for user' # Run all the tests print "Queuing tests for execution..." sys.stdout.flush() job_id = sf.restful('runTestsAsynchronous', params={'classids': ','.join(classes_by_id.keys())}) # Loop waiting for the tests to complete while True: res = sf.query_all("SELECT Id, Status, ApexClassId FROM ApexTestQueueItem WHERE ParentJobId = '{0}'".format(job_id,)) counts = { 'Queued': 0, 'Processing': 0, 'Aborted': 0, 'Completed': 0, 'Failed': 0, 'Preparing': 0, 'Holding': 0, } for item in res['records']: counts[item['Status']] += 1 # If all tests have run, break from the loop if not counts['Queued'] and not counts['Processing']: print '' print '-------------------------------------------------------------------------------' print 'Test Results' print '-------------------------------------------------------------------------------' sys.stdout.flush() break print 'Completed: %(Completed)s Processing: %(Processing)s Queued: %(Queued)s' % counts sys.stdout.flush() sleep(poll_interval) # Get the test results by method res = sf.query_all("SELECT StackTrace,Message, ApexLogId, AsyncApexJobId,MethodName, Outcome, ApexClassId, TestTimestamp FROM ApexTestResult WHERE AsyncApexJobId = '{0}'".format(job_id,)) counts = { 'Pass': 0, 'Fail': 0, 'CompileFail': 0, 'Skip': 0, } for result in res['records']: class_name = classes_by_id[result['ApexClassId']] results_by_class_name[class_name][result['MethodName']] = result counts[result['Outcome']] += 1 if debug and result['ApexLogId']: classes_by_log_id[result['ApexLogId']] = result['ApexClassId'] # Fetch debug logs if debug is enabled if debug: log_ids = "('{0}')".format("','".join([str(id) for id in classes_by_log_id.keys()]),) res = sf.query_all("SELECT Id, Application, DurationMilliseconds, Location, LogLength, LogUserId, Operation, Request, StartTime, Status from ApexLog where Id in {0}".format(log_ids,)) for log in res['records']: class_id = classes_by_log_id[log['Id']] class_name = classes_by_id[class_id] logs_by_class_id[class_id] = log # Fetch the debug log file body_url = '{0}sobjects/ApexLog/{1}/Body'.format(sf.base_url, log['Id']) resp = sf.request.get(body_url, headers=sf.headers) log_file = class_name + '.log' if debug_logdir: log_file = debug_logdir + os.sep + log_file f = open(log_file, 'w') f.write(resp.content) f.close() # Parse stats from the log file f = open(log_file, 'r') method_stats = parse_log(class_name, f) # Add method stats to results_by_class_name for method, info in method_stats.items(): results_by_class_name[class_name][method].update(info) # Delete the trace flag TraceFlag.delete(trace_id) # Build an OrderedDict of results test_results = [] class_names = results_by_class_name.keys() class_names.sort() for class_name in class_names: class_id = classes_by_name[class_name] duration = None if debug and class_id in logs_by_class_id: duration = int(logs_by_class_id[class_id]['DurationMilliseconds']) * .001 print 'Class: {0} ({1}s)'.format(class_name, duration) else: print 'Class: {0}'.format(class_name,) sys.stdout.flush() method_names = results_by_class_name[class_name].keys() method_names.sort() for method_name in method_names: result = results_by_class_name[class_name][method_name] test_results.append({ 'Children': result.get('children', None), 'ClassName': decode_to_unicode(class_name), 'Method': decode_to_unicode(result['MethodName']), 'Message': decode_to_unicode(result['Message']), 'Outcome': decode_to_unicode(result['Outcome']), 'StackTrace': decode_to_unicode(result['StackTrace']), 'Stats': result.get('stats', None), 'TestTimestamp': result.get('TestTimestamp', None), }) # Output result for method if debug and json_output and result.get('stats') and 'duration' in result['stats']: # If debug is enabled and we're generating the json output, include duration with the test print u' {0}: {1} ({2}s)'.format( result['Outcome'], result['MethodName'], result['stats']['duration'] ) else: print u' {Outcome}: {MethodName}'.format(**result) if debug and not json_output: print u' DEBUG LOG INFO:' stats = result.get('stats',None) if not stats: print u' No stats found, likely because of debug log size limit' else: stat_keys = stats.keys() stat_keys.sort() for stat in stat_keys: try: value = stats[stat] output = u' {0} / {1}'.format(value['used'], value['allowed']) print output.ljust(26) + stat except: output = u' {0}'.format(stats[stat],) print output.ljust(26) + stat # Print message and stack trace if failed if result['Outcome'] in ['Fail','CompileFail']: print u' Message: {Message}'.format(**result) print u' StackTrace: {StackTrace}'.format(**result) sys.stdout.flush() print u'-------------------------------------------------------------------------------' print u'Passed: %(Pass)s Fail: %(Fail)s Compile Fail: %(CompileFail)s Skipped: %(Skip)s' % counts print u'-------------------------------------------------------------------------------' sys.stdout.flush() if counts['Fail'] or counts['CompileFail']: print u'' print u'Failing Tests' print u'-------------' print u'' sys.stdout.flush() counter = 0 for result in test_results: if result['Outcome'] not in ['Fail','CompileFail']: continue counter += 1 print u'{0}: {1}.{2} - {3}'.format(counter, result['ClassName'], result['Method'], result['Outcome']) print u' Message: {0}'.format(result['Message'],) print u' StackTrace: {0}'.format(result['StackTrace'],) sys.stdout.flush() if json_output: f = codecs.open(json_output, encoding='utf-8', mode='w') f.write(json.dumps(test_results)) f.close() if junit_output: f = codecs.open(junit_output, encoding='utf-8', mode='w') f.write('<testsuite tests="{0}">\n'.format(len(test_results)),) for result in test_results: testcase = ' <testcase classname="{0}" name="{1}"'.format(result['ClassName'], result['Method']) if 'Stats' in result and result['Stats'] and 'duration' in result['Stats']: testcase = '{0} time="{1}"'.format(testcase, result['Stats']['duration']) if result['Outcome'] in ['Fail','CompileFail']: testcase = '{0}>\n'.format(testcase,) testcase = '{0} <failure type="{1}">{2}</failure>\n'.format( testcase, cgi.escape(result['StackTrace']), cgi.escape(result['Message']), ) testcase = '{0} </testcase>\n'.format(testcase,) else: testcase = '{0} />\n'.format(testcase,) f.write(testcase) f.write('</testsuite>') f.close() return counts
def run_tests(): username = os.environ.get('SF_USERNAME') password = os.environ.get('SF_PASSWORD') serverurl = os.environ.get('SF_SERVERURL') test_name_match = os.environ.get('APEX_TEST_NAME_MATCH', '%_TEST') test_name_exclude = os.environ.get('APEX_TEST_NAME_EXCLUDE', '') namespace = os.environ.get('NAMESPACE', None) poll_interval = int(os.environ.get('POLL_INTERVAL', 10)) debug = os.environ.get('DEBUG_TESTS', False) in ['true', 'True'] debug_logdir = os.environ.get('DEBUG_LOGDIR') json_output = os.environ.get('TEST_JSON_OUTPUT', None) junit_output = os.environ.get('TEST_JUNIT_OUTPUT', None) if namespace: namespace = "'{0}'".format(namespace, ) else: namespace = 'null' sandbox = False if serverurl.find('test.salesforce.com') != -1: sandbox = True sf = Salesforce(username=username, password=password, security_token='', sandbox=sandbox, version='32.0') # Change base_url to use the tooling api sf.base_url = sf.base_url + 'tooling/' # Split test_name_match by commas to allow multiple class name matching options where_name = [] for pattern in test_name_match.split(','): if pattern: where_name.append("Name LIKE '{0}'".format(pattern)) # Add any excludes to the where clause where_exclude = [] for pattern in test_name_exclude.split(','): if pattern: where_exclude.append("(NOT Name LIKE '{0}')".format(pattern, )) # Get all test classes for namespace query = "SELECT Id, Name FROM ApexClass WHERE NamespacePrefix = {0}".format( namespace, ) if where_name: query += " AND ({0})".format(' OR '.join(where_name), ) if where_exclude: query += " AND {0}".format(' AND '.join(where_exclude), ) print "Running Query: {0}".format(query, ) sys.stdout.flush() res = sf.query_all(query) print "Found {0} classes".format(res['totalSize'], ) sys.stdout.flush() if not res['totalSize']: return {'Pass': 0, 'Fail': 0, 'CompileFail': 0, 'Skip': 0} classes_by_id = {} classes_by_name = {} trace_id = None results_by_class_name = {} classes_by_log_id = {} logs_by_class_id = {} for cls in res['records']: classes_by_id[cls['Id']] = cls['Name'] classes_by_name[cls['Name']] = cls['Id'] results_by_class_name[cls['Name']] = {} # If debug is turned on, setup debug traces for all test classes if debug: print 'Setting up trace flag to capture debug logs' # Get the User's id to set a TraceFlag res_user = sf.query( "Select Id from User where Username = '******'".format(username, )) user_id = res_user['records'][0]['Id'] # Set up a simple-salesforce sobject for TraceFlag using the tooling api TraceFlag = sf.TraceFlag TraceFlag.base_url = ( u'https://{instance}/services/data/v{sf_version}/tooling/sobjects/{object_name}/' .format(instance=sf.sf_instance, object_name='TraceFlag', sf_version=sf.sf_version)) # First, delete any old trace flags still lying around tf_res = sf.query('Select Id from TraceFlag') if tf_res['totalSize']: for tf in tf_res['records']: TraceFlag.delete(tf['Id']) expiration = datetime.datetime.now() + datetime.timedelta(seconds=60 * 60 * 12) res = TraceFlag.create({ 'ApexCode': 'Info', 'ApexProfiling': 'Debug', 'Callout': 'Info', 'Database': 'Info', 'ExpirationDate': expiration.isoformat(), #'ScopeId': user_id, 'System': 'Info', 'TracedEntityId': user_id, 'Validation': 'Info', 'Visualforce': 'Info', 'Workflow': 'Info', }) trace_id = res['id'] print 'Created TraceFlag for user' # Run all the tests print "Queuing tests for execution..." sys.stdout.flush() job_id = sf.restful('runTestsAsynchronous', params={'classids': ','.join(classes_by_id.keys())}) # Loop waiting for the tests to complete while True: res = sf.query_all( "SELECT Id, Status, ApexClassId FROM ApexTestQueueItem WHERE ParentJobId = '{0}'" .format(job_id, )) counts = { 'Queued': 0, 'Processing': 0, 'Aborted': 0, 'Completed': 0, 'Failed': 0, 'Preparing': 0, 'Holding': 0, } for item in res['records']: counts[item['Status']] += 1 # If all tests have run, break from the loop if not counts['Queued'] and not counts['Processing']: print '' print '-------------------------------------------------------------------------------' print 'Test Results' print '-------------------------------------------------------------------------------' sys.stdout.flush() break print 'Completed: %(Completed)s Processing: %(Processing)s Queued: %(Queued)s' % counts sys.stdout.flush() sleep(poll_interval) # Get the test results by method res = sf.query_all( "SELECT StackTrace,Message, ApexLogId, AsyncApexJobId,MethodName, Outcome, ApexClassId, TestTimestamp FROM ApexTestResult WHERE AsyncApexJobId = '{0}'" .format(job_id, )) counts = { 'Pass': 0, 'Fail': 0, 'CompileFail': 0, 'Skip': 0, } for result in res['records']: class_name = classes_by_id[result['ApexClassId']] results_by_class_name[class_name][result['MethodName']] = result counts[result['Outcome']] += 1 if debug and result['ApexLogId']: classes_by_log_id[result['ApexLogId']] = result['ApexClassId'] # Fetch debug logs if debug is enabled if debug: log_ids = "('{0}')".format( "','".join([str(id) for id in classes_by_log_id.keys()]), ) res = sf.query_all( "SELECT Id, Application, DurationMilliseconds, Location, LogLength, LogUserId, Operation, Request, StartTime, Status from ApexLog where Id in {0}" .format(log_ids, )) for log in res['records']: class_id = classes_by_log_id[log['Id']] class_name = classes_by_id[class_id] logs_by_class_id[class_id] = log # Fetch the debug log file body_url = '{0}sobjects/ApexLog/{1}/Body'.format( sf.base_url, log['Id']) resp = sf.request.get(body_url, headers=sf.headers) log_file = class_name + '.log' if debug_logdir: log_file = debug_logdir + os.sep + log_file f = open(log_file, 'w') f.write(resp.content) f.close() # Parse stats from the log file f = open(log_file, 'r') method_stats = parse_log(class_name, f) # Add method stats to results_by_class_name for method, info in method_stats.items(): results_by_class_name[class_name][method].update(info) # Delete the trace flag TraceFlag.delete(trace_id) # Build an OrderedDict of results test_results = [] class_names = results_by_class_name.keys() class_names.sort() for class_name in class_names: class_id = classes_by_name[class_name] duration = None if debug and class_id in logs_by_class_id: duration = int( logs_by_class_id[class_id]['DurationMilliseconds']) * .001 print 'Class: {0} ({1}s)'.format(class_name, duration) else: print 'Class: {0}'.format(class_name, ) sys.stdout.flush() method_names = results_by_class_name[class_name].keys() method_names.sort() for method_name in method_names: result = results_by_class_name[class_name][method_name] test_results.append({ 'Children': result.get('children', None), 'ClassName': decode_to_unicode(class_name), 'Method': decode_to_unicode(result['MethodName']), 'Message': decode_to_unicode(result['Message']), 'Outcome': decode_to_unicode(result['Outcome']), 'StackTrace': decode_to_unicode(result['StackTrace']), 'Stats': result.get('stats', None), 'TestTimestamp': result.get('TestTimestamp', None), }) # Output result for method if debug and json_output and result.get( 'stats') and 'duration' in result['stats']: # If debug is enabled and we're generating the json output, include duration with the test print u' {0}: {1} ({2}s)'.format(result['Outcome'], result['MethodName'], result['stats']['duration']) else: print u' {Outcome}: {MethodName}'.format(**result) if debug and not json_output: print u' DEBUG LOG INFO:' stats = result.get('stats', None) if not stats: print u' No stats found, likely because of debug log size limit' else: stat_keys = stats.keys() stat_keys.sort() for stat in stat_keys: try: value = stats[stat] output = u' {0} / {1}'.format( value['used'], value['allowed']) print output.ljust(26) + stat except: output = u' {0}'.format(stats[stat], ) print output.ljust(26) + stat # Print message and stack trace if failed if result['Outcome'] in ['Fail', 'CompileFail']: print u' Message: {Message}'.format(**result) print u' StackTrace: {StackTrace}'.format(**result) sys.stdout.flush() print u'-------------------------------------------------------------------------------' print u'Passed: %(Pass)s Fail: %(Fail)s Compile Fail: %(CompileFail)s Skipped: %(Skip)s' % counts print u'-------------------------------------------------------------------------------' sys.stdout.flush() if counts['Fail'] or counts['CompileFail']: print u'' print u'Failing Tests' print u'-------------' print u'' sys.stdout.flush() counter = 0 for result in test_results: if result['Outcome'] not in ['Fail', 'CompileFail']: continue counter += 1 print u'{0}: {1}.{2} - {3}'.format(counter, result['ClassName'], result['Method'], result['Outcome']) print u' Message: {0}'.format(result['Message'], ) print u' StackTrace: {0}'.format(result['StackTrace'], ) sys.stdout.flush() if json_output: f = codecs.open(json_output, encoding='utf-8', mode='w') f.write(json.dumps(test_results)) f.close() if junit_output: f = codecs.open(junit_output, encoding='utf-8', mode='w') f.write('<testsuite tests="{0}">\n'.format(len(test_results)), ) for result in test_results: testcase = ' <testcase classname="{0}" name="{1}"'.format( result['ClassName'], result['Method']) if 'Stats' in result and result['Stats'] and 'duration' in result[ 'Stats']: testcase = '{0} time="{1}"'.format(testcase, result['Stats']['duration']) if result['Outcome'] in ['Fail', 'CompileFail']: testcase = '{0}>\n'.format(testcase, ) testcase = '{0} <failure type="{1}">{2}</failure>\n'.format( testcase, cgi.escape(result['StackTrace']), cgi.escape(result['Message']), ) testcase = '{0} </testcase>\n'.format(testcase, ) else: testcase = '{0} />\n'.format(testcase, ) f.write(testcase) f.write('</testsuite>') f.close() return counts
class SalesForceModel: def __init__(self): # print("Instantiated SalesForceModel Object") self.config_file = os.path.join(os.path.dirname(__file__), "config.json") self.sf: Salesforce() self.user_id = "" self.username = "" self.password = "" self.api_key = "" def get_sf_username(self): return self.username def set_sf_username(self, username): self.username = username def get_sf_password(self): return self.password def set_sf_password(self, password): self.password = password def get_sf_api_key(self): return self.api_key def set_sf_api_key(self, api_key): self.api_key = api_key def get_api_secret(self): api_secret = '' config_file = os.path.join(os.path.dirname(__file__), "config.json") try: with open(config_file, "r") as f: data = json.load(f) api_secret = data['secret_key'] except FileNotFoundError: print("no config file found! " + str(FileNotFoundError)) api_secret = '' except KeyError: print("no such key - secret_key! " + str(KeyError)) api_secret = '' return api_secret def update_conf(self, key, value): config_file = self.config_file with open(config_file, "r") as cf: config_data = json.load(cf) config_data.update({key: value}) with open(config_file, 'w') as cf: json.dump(config_data, cf) def set_conf_api_secret(self, new_api_key): config_file = self.config_file with open(config_file, "r") as cf: config_data = json.load(cf) config_data.update({'secret_key': new_api_key}) with open(config_file, 'w') as cf: json.dump(config_data, cf) def sf_log_in(self, username=None, password=None, api_key=None): self.username = username self.password = password self.api_key = api_key self.sf = Salesforce(username=username, password=password, security_token=api_key, version='50.0') self.user_id = self.get_user_id() # return self.sf def re_login(self): username = self.get_sf_username() password = self.get_sf_password() api_key = self.get_sf_api_key() self.sf = Salesforce(username=username, password=password, security_token=api_key, version='50.0') self.user_id = self.get_user_id() def logout(self): return self.sf.session.close() def get_user(self, info=None): # return user object # the info string can be 'Id', 'FirstName', 'LastName', 'Title' identity_url = self.sf.restful('')['identity'] user = self.sf.User.get(identity_url[-18:]) if info != None: info = user[info] return info else: return user def set_user_id(self, user_id): self.user_id = user_id def get_user_id(self): identity_url = self.sf.restful('')['identity'] user = self.sf.User.get(identity_url[-18:]) user_id = user['Id'] return user_id def get_user_fname(self): identity_url = self.sf.restful('')['identity'] user = self.sf.User.get(identity_url[-18:]) user_fname = user['FirstName'] return user_fname def get_user_lname(self): identity_url = self.sf.restful('')['identity'] user = self.sf.User.get(identity_url[-18:]) user_lname = user['LastName'] return user_lname def get_user_title(self): identity_url = self.sf.restful('')['identity'] user = self.sf.User.get(identity_url[-18:]) user_title = user['Title'] return user_title def get_current_case_list(self, sf=None): userid = self.get_user_id() query = "SELECT Id, CaseNumber, OwnerId, Status, Subject FROM Case WHERE OwnerId = '" + \ userid + "' AND (Status = 'Active' OR Status='New' OR Status='Re-opened' OR Status='On Hold')" if sf != None: list = sf.query(query) else: list = self.sf.query(query) return list['records'] def get_attachment_list(self, case_id, sf=None): query = """SELECT Id, Name, Body, BodyLength, CreatedDate, LastModifiedDate, Description FROM Attachment WHERE ParentId = '""" + case_id + "'" if sf != None: list = sf.query(query) else: list = self.sf.query(query) return list['records'] def get_sfile_list(self, case_id, sf=None): query = """SELECT Id, Name, cg__Case__c, cg__File_Name__c, cg__File_Size_in_Bytes__c, cg__File_Size__c, CreatedDate, LastModifiedDate, cg__Description__c, cg__Parent_Folder_Id__c, cg__Key__c FROM cg__CaseFile__c WHERE cg__Case__c = '""" + case_id + "'" if sf != None: list = sf.query(query) else: list = self.sf.query(query) return list['records'] def get_case_by_id(self, case_id, sf=None): if sf != None: case_obj = sf.Case.get(case_id) else: case_obj = self.sf.Case.get(case_id) return case_obj def get_case_by_number(self, case_number, sf=None): query = "SELECT Id, CaseNumber, OwnerId, Status, Subject FROM Case WHERE OwnerId = '" + \ self.user_id + "' AND (Status = 'Active' OR Status='New' OR Status='Re-opened') AND (CaseNumber = '" + case_number + "')" if sf != None: case_dic = sf.query(query) else: case_dic = self.sf.query(query) return case_dic['records'][0] def get_case_id(self, case_number, sf=None): query = "SELECT Id, CaseNumber, OwnerId, Status, Subject FROM Case WHERE OwnerId = '" + \ self.user_id + "' AND (Status = 'Active' OR Status='New' OR Status='Re-opened') AND (CaseNumber = '" + case_number + "')" if sf != None: case_dic = sf.query(query) else: case_dic = self.sf.query(query) return case_dic['records'][0]['Id'] def check_session(self, sf=None): return self.sf.restful('')['identity'] def download_file(self, file_path, attach_name, attach_id, attach_body, attach_length, current_row_number): # requires to be threading # attach progress bar # progress = QProgressBar() completed = 0 # progress.setMaximum(100) total = attach_length base = 'https://tableau.my.salesforce.com/' url = base + attach_body result = self.sf._call_salesforce(method='GET', url=url) # print(result) # print(result.headers) # print(result.headers.get('content-length')) try: with open(file_path, 'wb+') as file: total = int(total) # retrieve the bytes from the resources incrementally for chunk in result.iter_content( chunk_size=max(int(total / 1000), 1024 * 1024)): completed += len(chunk) file.write(chunk) done = int(50 * completed / total) print(str(completed)) print(str(done)) print("Download Completed!") file_path = file_path.replace("/", "\\") # Windows specific function open_ex_file = r'explorer /select, "' + file_path + '"' subprocess.Popen(open_ex_file) except Exception as e: print(str(e)) def create_folder(self, path): print(path) try: os.makedirs(path) except OSError as e: # print(e) # e.errno # print(e.errno) # print(e.winerror) # print(e.strerror) if e.winerror == 183: return "folder_alreay_exist" # messagebox.showwarning("Folder Creation Error", "The folder already exist: %s" % path) else: return str(e) # messagebox.showerror("Folder Creation Error", "Reason: %s" % e.strerror) else: return "folder_created"
def run_tests(): username = os.environ.get('SF_USERNAME') password = os.environ.get('SF_PASSWORD') serverurl = os.environ.get('SF_SERVERURL') test_name_match = os.environ.get('APEX_TEST_NAME_MATCH', '%_TEST') namespace = os.environ.get('NAMESPACE', None) poll_interval = int(os.environ.get('POLL_INTERVAL', 10)) debug = os.environ.get('DEBUG_TESTS',False) == 'true' debug_logdir = os.environ.get('DEBUG_LOGDIR') if namespace: namespace = "'%s'" % namespace else: namespace = 'null' sandbox = False if serverurl.find('test.salesforce.com') != -1: sandbox = True sf = Salesforce(username=username, password=password, security_token='', sandbox=sandbox, version='32.0') # Change base_url to use the tooling api sf.base_url = sf.base_url + 'tooling/' # Split test_name_match by commas to allow multiple class name matching options where_name = [] for pattern in test_name_match.split(','): where_name.append("Name LIKE '%s'" % pattern) # Get all test classes for namespace query = "SELECT Id, Name FROM ApexClass WHERE NamespacePrefix = %s and (%s)" % (namespace, ' OR '.join(where_name)) print "Running Query: %s" % query sys.stdout.flush() res = sf.query_all("SELECT Id, Name FROM ApexClass WHERE NamespacePrefix = %s and (%s)" % (namespace, ' OR '.join(where_name))) print "Found %s classes" % res['totalSize'] sys.stdout.flush() if not res['totalSize']: return {'Pass': 0, 'Failed': 0, 'CompileFail': 0, 'Skip': 0} classes_by_id = {} classes_by_name = {} traces_by_class_id = {} results_by_class_name = {} classes_by_log_id = {} logs_by_class_id = {} for cls in res['records']: classes_by_id[cls['Id']] = cls['Name'] classes_by_name[cls['Name']] = cls['Id'] results_by_class_name[cls['Name']] = {} # If debug is turned on, setup debug traces for all test classes if debug: expiration = datetime.datetime.now() + datetime.timedelta(0,3600) for class_id in classes_by_id.keys(): TraceFlag = sf.TraceFlag TraceFlag.base_url = (u'https://{instance}/services/data/v{sf_version}/tooling/sobjects/{object_name}/' .format(instance=sf.sf_instance, object_name='TraceFlag', sf_version=sf.sf_version)) res = TraceFlag.create({ 'ApexCode': 'DEBUG', 'ApexProfiling': 'DEBUG', 'Callout': 'DEBUG', 'Database': 'DEBUG', 'ExpirationDate': expiration.isoformat(), #'ScopeId': class_id, 'System': 'DEBUG', 'TracedEntityId': class_id, 'Validation': 'DEBUG', 'Visualforce': 'DEBUG', 'Workflow': 'DEBUG', }) traces_by_class_id[class_id] = res['id'] # Run all the tests print "Queuing tests for execution..." sys.stdout.flush() job_id = sf.restful('runTestsAsynchronous', params={'classids': ','.join(classes_by_id.keys())}) # Loop waiting for the tests to complete while True: res = sf.query_all("SELECT Id, Status, ApexClassId FROM ApexTestQueueItem WHERE ParentJobId = '%s'" % job_id) counts = { 'Queued': 0, 'Processing': 0, 'Aborted': 0, 'Completed': 0, 'Failed': 0, 'Preparing': 0, 'Holding': 0, } for item in res['records']: counts[item['Status']] += 1 # If all tests have run, break from the loop if not counts['Queued'] and not counts['Processing']: print '' print '-------------------------------------------------------------------------------' print 'Test Results' print '-------------------------------------------------------------------------------' sys.stdout.flush() break print 'Completed: %(Completed)s Processing: %(Processing)s Queued: %(Queued)s' % counts sys.stdout.flush() sleep(poll_interval) # Get the test results by method res = sf.query_all("SELECT StackTrace,Message, ApexLogId, AsyncApexJobId,MethodName, Outcome, ApexClassId FROM ApexTestResult WHERE AsyncApexJobId = '%s'" % job_id) counts = { 'Pass': 0, 'Fail': 0, 'CompileFail': 0, 'Skip': 0, } for result in res['records']: class_name = classes_by_id[result['ApexClassId']] results_by_class_name[class_name][result['MethodName']] = result counts[result['Outcome']] += 1 if debug: classes_by_log_id[result['ApexLogId']] = result['ApexClassId'] # Fetch debug logs if debug is enabled if debug: log_ids = "('%s')" % "','".join([str(id) for id in classes_by_log_id.keys()]) res = sf.query_all("SELECT Id, Application, DurationMilliseconds, Location, LogLength, LogUserId, Operation, Request, StartTime, Status from ApexLog where Id in %s" % log_ids) for log in res['records']: class_id = classes_by_log_id[log['Id']] class_name = classes_by_id[class_id] logs_by_class_id[class_id] = log # Fetch the debug log file body_url = '%ssobjects/ApexLog/%s/Body' % (sf.base_url, log['Id']) resp = sf.request.get(body_url, headers=sf.headers) log_file = class_name + '.log' if debug_logdir: log_file = debug_logdir + os.sep + log_file f = open(log_file, 'w') f.write(resp.content) f.close() # Parse stats from the log file f = open(log_file, 'r') method_stats = parse_log(f) # Add method stats to results_by_class_name for method, stats in method_stats.items(): results_by_class_name[class_name][method]['stats'] = stats # Expire the trace flags for trace_id in traces_by_class_id.values(): TraceFlag.update(trace_id, {'ExpirationDate': datetime.datetime.now().isoformat()}) class_names = results_by_class_name.keys() class_names.sort() for class_name in class_names: class_id = classes_by_name[class_name] if debug: duration = int(logs_by_class_id[class_id]['DurationMilliseconds']) * .001 print 'Class: %s (%ss)' % (class_name, duration) else: print 'Class: %s' % class_name sys.stdout.flush() method_names = results_by_class_name[class_name].keys() method_names.sort() for method_name in method_names: result = results_by_class_name[class_name][method_name] # Output result for method print ' %(Outcome)s: %(MethodName)s' % result if debug: print ' DEBUG LOG INFO:' stat_keys = result['stats'].keys() stat_keys.sort() for stat in stat_keys: try: value = result['stats'][stat] output = ' %s / %s' % (value['used'], value['allowed']) print output.ljust(26) + stat except: output = ' %s' % result['stats'][stat] print output.ljust(26) + stat # Print message and stack trace if failed if result['Outcome'] in ['Fail','CompileFail']: print ' Message: %(Message)s' % result print ' StackTrace: %(StackTrace)s' % result sys.stdout.flush() print '-------------------------------------------------------------------------------' print 'Passed: %(Pass)s Fail: %(Fail)s Compile Fail: %(CompileFail)s Skipped: %(Skip)s' % counts print '-------------------------------------------------------------------------------' sys.stdout.flush() return counts
class test_Integration_Load(unittest.TestCase): def setUp(self): self.connection = Salesforce(instance_url=os.environ['INSTANCE_URL'], session_id=os.environ['ACCESS_TOKEN']) def tearDown(self): self.connection.restful( 'tooling/executeAnonymous', { 'anonymousBody': 'delete [SELECT Id FROM Lead]; delete [SELECT Id FROM Product2]; delete [SELECT Id FROM Campaign];' }) def test_loads_single_object(self): # To avoid conflict, we load an object (Product2) not used in other load or extract tests. records = [{ 'Id': '01t000000000001', 'Name': 'Tauron Taffy', 'IsActive': 'True', 'ProductCode': 'TAFFY_TAUR' }, { 'Id': '01t000000000002', 'Name': 'Gemenese Goulash', 'IsActive': 'True', 'ProductCode': 'GLSH' }, { 'Id': '01t000000000003AAA', 'Name': 'CapricaCorn', 'IsActive': 'False', 'ProductCode': 'CPRCC' }] op = amaxa.LoadOperation(self.connection) op.file_store = MockFileStore() op.file_store.records['Product2'] = records op.add_step( amaxa.LoadStep( 'Product2', set(['Name', 'IsActive', 'ProductCode', 'Description']))) op.initialize() op.execute() loaded_products = self.connection.query_all( 'SELECT Name, IsActive, ProductCode FROM Product2').get('records') self.assertEqual(3, len(loaded_products)) required_names = {x['Name'] for x in records} for r in loaded_products: self.assertIn(r['Name'], required_names) required_names.remove(r['Name']) self.assertEqual(0, len(required_names)) def test_loads_complex_hierarchy(self): # To avoid conflict with other load tests and with extract tests, # we load Campaigns, Campaign Members, and Leads. # Campaign has a self-lookup, ParentId campaigns = [{ 'Id': '701000000000001', 'Name': 'Tauron Tourist Outreach', 'IsActive': 'True', 'ParentId': '' }, { 'Id': '701000000000002', 'Name': 'Aerilon Outreach', 'IsActive': 'True', 'ParentId': '701000000000001' }, { 'Id': '701000000000003AAA', 'Name': 'Caprica City Direct Mailer', 'IsActive': 'False', 'ParentId': '701000000000001' }] leads = [ { 'Id': '00Q000000000001', 'Company': 'Picon Fleet Headquarters', 'LastName': 'Nagata' }, { 'Id': '00Q000000000002', 'Company': 'Picon Fleet Headquarters', 'LastName': 'Adama' }, { 'Id': '00Q000000000003', 'Company': 'Ha-La-Tha', 'LastName': 'Guatrau' }, { 'Id': '00Q000000000004', 'Company': '[not provided]', 'LastName': 'Thrace' }, ] campaign_members = [{ 'Id': '00v000000000001', 'CampaignId': '701000000000001', 'LeadId': '00Q000000000001', 'Status': 'Sent' }, { 'Id': '00v000000000002', 'CampaignId': '701000000000002', 'LeadId': '00Q000000000002', 'Status': 'Sent' }, { 'Id': '00v000000000003', 'CampaignId': '701000000000003', 'LeadId': '00Q000000000004', 'Status': 'Sent' }, { 'Id': '00v000000000004', 'CampaignId': '701000000000001', 'LeadId': '00Q000000000004', 'Status': 'Sent' }] op = amaxa.LoadOperation(self.connection) op.file_store = MockFileStore() op.file_store.records['Campaign'] = campaigns op.file_store.records['Lead'] = leads op.file_store.records['CampaignMember'] = campaign_members op.add_step( amaxa.LoadStep('Campaign', set(['Name', 'IsActive', 'ParentId']))) op.add_step(amaxa.LoadStep('Lead', set(['Company', 'LastName']))) op.add_step( amaxa.LoadStep('CampaignMember', set(['CampaignId', 'LeadId', 'Status']))) op.initialize() op.execute() loaded_campaigns = self.connection.query_all( 'SELECT Name, IsActive, (SELECT Name FROM ChildCampaigns) FROM Campaign' ).get('records') self.assertEqual(3, len(loaded_campaigns)) required_names = {x['Name'] for x in campaigns} for r in loaded_campaigns: self.assertIn(r['Name'], required_names) required_names.remove(r['Name']) if r['Name'] == 'Tauron Tourist Outreach': self.assertEqual(2, len(r['ChildCampaigns']['records'])) self.assertEqual(0, len(required_names)) loaded_leads = self.connection.query_all( 'SELECT LastName, Company, (SELECT Name FROM CampaignMembers) FROM Lead' ).get('records') self.assertEqual(4, len(loaded_leads)) required_names = {x['LastName'] for x in leads} for r in loaded_leads: self.assertIn(r['LastName'], required_names) required_names.remove(r['LastName']) if r['LastName'] == 'Nagata': self.assertEqual(1, len(r['CampaignMembers']['records'])) elif r['LastName'] == 'Thrace': self.assertEqual(2, len(r['CampaignMembers']['records'])) if r['LastName'] == 'Adama': self.assertEqual(1, len(r['CampaignMembers']['records'])) self.assertEqual(0, len(required_names)) loaded_campaign_members = self.connection.query_all( 'SELECT Id FROM CampaignMember').get('records') self.assertEqual(4, len(loaded_campaign_members))
class SalesforceQ: def __init__(self, instance=None, instance_url=None, username=None, password=None, security_token=None, organizationId=None, domain='login'): self.username = escape(username) if username else None self.password = escape(password) if password else None self.instance = escape(instance) if instance else None self.instance_url = escape(instance_url) if instance_url else None self.sf = None if self.username is not None and self.password is not None and self.instance is not None or self.instance_url is not None or self.instance is not None: if security_token is not None: self.security_token = security_token self.sf = Salesforce(instance=self.instance, username=self.username, password=self.password, security_token=self.security_token, domain=domain) self.Account = Account(self.sf) self.Contact = Contact(self.sf) self.Case = Case(self.sf) self.Opportunity = Opportunity(self.sf) self.Contract = Contract(self.sf) elif organizationId is not None: self.organizationId = organizationId self.sf = Salesforce(instance=self.instance, username=self.username, password=self.password, organizationId=self.organizationId, domain=domain) self.Account = Account(self.sf) self.Contact = Contact(self.sf) self.Case = Case(self.sf) self.Opportunity = Opportunity(self.sf) self.Contract = Contract(self.sf) if self.sf is None: raise SalesforceAuthenticationFailed( 'INVALID AUTH', 'You must submit username and password either a security token or ' 'organizationId for authentication') else: """SOQL queries: query: #Equivalent to .get(path='query', params='q=SELECT Id, Name FROM Contact WHERE LastName = 'Adam'') .query("SELECT Id, Name FROM Contact WHERE LastName = 'Adam'") query_more: If, due to an especially large result, Salesforce adds a nextRecordsUrl to your query result, such as "nextRecordsUrl" : "/services/data/v26.0/query/01gD0000002HU6KIAW-2000", you can pull the additional results with either the ID or the full URL (if using the full URL, you must pass 'True' as your second argument) .query_more("01gD0000002HU6KIAW-2000") .query_more("/services/data/v26.0/query/01gD0000002HU6KIAW-2000", True) query_all: A convenience of query_more, to retrieve all of the results in a single local method call use .query_all("SELECT Id, Email FROM Contact WHERE LastName = 'Jones'") """ self.query = self.sf.query self.query_more = self.sf.query_more self.query_all = self.sf.query_all self.search = self.sf.search else: raise SalesforceAuthenticationFailed( 'INVALID AUTH', 'You must submit username and password either a security token or ' 'organizationId for authentication') def get(self, path, params=None, **kwargs): """Allows you to make a direct GET REST call if you know the path EXAMPLE: .get(path='sobjects/Account/0017j00000VLkZtAAL', params={"fields" : "Name"})) Arguments: * path: The path of the request Example: sobjects/User/ABC123/password' * params: dict of parameters to pass to the path * method: HTTP request method, default GET * other arguments supported by requests.request (e.g. json, timeout) :return JSON / False if issue has occurred """ try: res = self.sf.restful(path=path, params=params, method='GET', **kwargs) return res except SalesforceResourceNotFound as e: print("[GET]{errorCode}: Resource {name} not found. {message}". format(message=e.content[0]['message'], name=e.resource_name, errorCode=e.content[0]['errorCode'])) return False except SalesforceMalformedRequest as e: # Deletion failed (could be due account being associated to existing cases) print( "[GET]{errorCode}: Malformed request {url}. {message}".format( message=e.content[0]['message'], url=e.url, errorCode=e.content[0]['errorCode'])) return False except Exception as e: print("Something went wrong!") print(e) return False def get_sobject(self, sobject=None, sobject_id=None): """Allows you to make a direct GET REST call if you know the path EXAMPLE: .get(path='sobjects/Account/0017j00000VLkZtAAL', params={"fields" : "Name"})) Arguments: * path: The path of the request Example: sobjects/User/ABC123/password' * params: dict of parameters to pass to the path * method: HTTP request method, default GET * other arguments supported by requests.request (e.g. json, timeout) :return JSON objects list / False if issue has occurred """ try: if isinstance(sobject_id, str): sobject_data = self.__getattr__(sobject).get(sobject_id) return [Parser.parse(sobject_data)] elif isinstance(sobject_id, list): sobjects_data = [] for sobject_sid in sobject_id: sobject_data = self.__getattr__(sobject).get(sobject_sid) sobjects_data.append(Parser.parse(sobject_data)) return sobjects_data except SalesforceResourceNotFound as e: print("[GET]{errorCode}: Resource {name} not found. {message}". format(message=e.content[0]['message'], name=e.resource_name, errorCode=e.content[0]['errorCode'])) return False except SalesforceMalformedRequest as e: # Deletion failed (could be due account being associated to existing cases) print( "[GET]{errorCode}: Malformed request {url}. {message}".format( message=e.content[0]['message'], url=e.url, errorCode=e.content[0]['errorCode'])) return False except Exception as e: print("Something went wrong!") print(e) return False def get_sobject_type(self, sobject_id): """Get sobject type by ID""" try: res = self.get(path=f'ui-api/record-ui/{sobject_id}') od = collections.OrderedDict( sorted(res['layouts'].items(), key=lambda x: x[1])) return list(od.keys())[0] except SalesforceResourceNotFound as e: print("[GET]{errorCode}: Resource {name} not found. {message}". format(message=e.content[0]['message'], name=e.resource_name, errorCode=e.content[0]['errorCode'])) return False except SalesforceMalformedRequest as e: # Deletion failed (could be due account being associated to existing cases) print( "[GET]{errorCode}: Malformed request {url}. {message}".format( message=e.content[0]['message'], url=e.url, errorCode=e.content[0]['errorCode'])) return False except Exception as e: print("Something went wrong!") print(e) return False def post(self, path, params=None, **kwargs): """Allows you to make a direct POST REST call if you know the path EXAMPLE: .post(path='sobjects/Account',params=None,json={"Name" : "MyREST Test account"}) Arguments: * path: The path of the request Example: sobjects/User/ABC123/password' * params: dict of parameters to pass to the path * method: HTTP request method, default GET * other arguments supported by requests.request (e.g. json, timeout) :return JSON """ try: res = self.sf.restful(path=path, params=params, method='POST', **kwargs) return res except SalesforceResourceNotFound as e: print("[POST]{errorCode}: Resource {name} not found. {message}". format(message=e.content[0]['message'], name=e.resource_name, errorCode=e.content[0]['errorCode'])) return False except SalesforceMalformedRequest as e: print( "[POST]{errorCode}: Malformed request {url}. {message}".format( message=e.content[0]['message'], url=e.url, errorCode=e.content[0]['errorCode'])) return False except Exception as e: print("Something went wrong!") print(e) return False def patch(self, path, params=None, **kwargs): """Allows you to make a direct POST REST call if you know the path EXAMPLE: .patch(path='sobjects/Account',params=None,json={"Name" : "MyREST Test account"}) Arguments: * path: The path of the request Example: sobjects/User/ABC123/password' * params: dict of parameters to pass to the path * method: HTTP request method, PATCH * other arguments supported by requests.request (e.g. json, timeout) :return JSON """ try: res = self.sf.restful(path=path, params=params, method='PATCH', **kwargs) return res except SalesforceResourceNotFound as e: print("[PATCH]{errorCode}: Resource {name} not found. {message}". format(message=e.content[0]['message'], name=e.resource_name, errorCode=e.content[0]['errorCode'])) return False except SalesforceMalformedRequest as e: print("[PATCH]{errorCode}: Malformed request {url}. {message}". format(message=e.content[0]['message'], url=e.url, errorCode=e.content[0]['errorCode'])) return False except Exception as e: print("Something went wrong!") print(e) return False def delete(self, path, params=None, **kwargs): """Allows you to make a direct DELETE REST call if you know the path EXAMPLE: .delete(path='sobjects/Account/recordId') Arguments: * path: The path of the request Example: sobjects/User/ABC123/password' * params: dict of parameters to pass to the path * method: HTTP request method, DELETE * other arguments supported by requests.request (e.g. json, timeout) :return JSON """ try: res = self.sf.restful(path=path, params=params, method='DELETE', **kwargs) return res except SalesforceResourceNotFound as e: print("[DELETE]{errorCode}: Resource {name} not found. {message}". format(message=e.content[0]['message'], name=e.resource_name, errorCode=e.content[0]['errorCode'])) return False except SalesforceMalformedRequest as e: print("[DELETE]{errorCode}: Malformed request {url}. {message}". format(message=e.content[0]['message'], url=e.url, errorCode=e.content[0]['errorCode'])) return False except Exception as e: print("Something went wrong!") print(e) return False def __getattr__(self, name): """ Every salesforce object without a written class can still access the following functions: metadata / describe / describe_layout / get / get_by_custom_id / create / upsert / update / delete / deleted / updated More information at SFType Class: https://github.com/simple-salesforce/simple-salesforce/blob/5d921f3dd32a69472b31d435544ce9c5a1d5eba3/simple_salesforce/api.py#L638 :param name: sobject name :return: SFType object """ return self.sf.__getattr__(name)
class SFClient: def __init__(self, username: str, password: str, sec_token: str): self.client = Salesforce(username=username, password=password, security_token=sec_token) self.batch_size_limit = 2000 self.client.session_id @staticmethod def from_config(): return SFClient(config.Salesforce.username, config.Salesforce.password, config.Salesforce.security_token) def execute_query(self, soql: str): return self.client.query_all(soql) def get_accounts(self, limit: int = 0): import itertools soql = "SELECT AccountId, Transcripts_Type__c, count(id) FROM Contact " \ "WHERE Status_of_User__c IN ('Active', 'Trial') GROUP BY AccountId, Transcripts_Type__c" print('Downloading Contact Agg from Salesforce...', end='') response = self.client.query_all(soql) print('Done!') contact_agg = response['records'] soql = "SELECT Id, Name, Transcript_Provider__c FROM Account " \ "WHERE Customer_Status__c IN ('Client', 'Broker Client', 'Paid Trial')" if limit > 0: soql += f" LIMIT {limit}" print('Downloading Accounts from Salesforce...', end='') response = self.client.query_all(soql) print('Done!') accounts = response['records'] accts = [] for a in accounts: ac = { "id": a['Id'], "name": a['Name'], "ts_prov": a['Transcript_Provider__c'], "ts_types": [] } # c_group = itertools.takewhile(lambda c: c['AccountId'] == ac['id'], contact_agg) c_group = [c for c in contact_agg if c['AccountId'] == ac['id']] for c_agg in c_group: if c_agg['Transcripts_Type__c'] not in ac['ts_types']: ac['ts_types'].append(c_agg['Transcripts_Type__c']) accts.append(ac) return accts def get_contacts(self, limit: int = 0): #soql = "SELECT Id, CreatedDate, PW_Last_Changed__c, (SELECT CreatedDate FROM Histories WHERE Field = 'Password__c' " \ #"ORDER BY CreatedDate DESC LIMIT 1) FROM Contact WHERE Password__c != null " \ # "AND PW_Last_Changed__c = null AND Status_of_User__c IN ('Active', 'Trial')" soql = "SELECT Id, AccountId, Email FROM Contact WHERE Email != null" if limit > 0: soql += f" LIMIT {limit}" print('Downloading Contacts from Salesforce...', end='') response = self.client.query_all(soql) print('done!') return [u for u in response['records']] def get_all_pb_processes(self): query = 'Select Id,ActiveVersion.VersionNumber,LatestVersion.VersionNumber,DeveloperName From FlowDefinition' response = self.query_tooling_api(query) return {pb['Id']: pb for pb in response['records']} def query_tooling_api(self, query): cleaned_query = urllib.parse.quote_plus(query) data = self.client.restful(path=f'tooling/query/?q={cleaned_query}') return data def toggle_pb_process(self, process_id, version_num=None): pb = {'Metadata': {'activeVersionNumber': version_num}} pb_str = jsonpickle.encode(pb, unpicklable=False) response = None try: # The response coming from Salesforce is apparently malformed and fails to parse properly response = self.client.restful( path=f'tooling/sobjects/FlowDefinition/{process_id}/', method='PATCH', data=pb_str) except Exception as ex: if 'Expecting value' not in str(ex): print(ex)
def run_tests(): username = os.environ.get('SF_USERNAME') password = os.environ.get('SF_PASSWORD') serverurl = os.environ.get('SF_SERVERURL') test_name_match = os.environ.get('APEX_TEST_NAME_MATCH', '%_TEST') test_name_exclude = os.environ.get('APEX_TEST_NAME_EXCLUDE', '') namespace = os.environ.get('NAMESPACE', None) poll_interval = int(os.environ.get('POLL_INTERVAL', 10)) debug = os.environ.get('DEBUG_TESTS',False) == 'true' debug_logdir = os.environ.get('DEBUG_LOGDIR') json_output = os.environ.get('TEST_JSON_OUTPUT', None) if namespace: namespace = "'%s'" % namespace else: namespace = 'null' sandbox = False if serverurl.find('test.salesforce.com') != -1: sandbox = True sf = Salesforce(username=username, password=password, security_token='', sandbox=sandbox, version='32.0') # Change base_url to use the tooling api sf.base_url = sf.base_url + 'tooling/' # Split test_name_match by commas to allow multiple class name matching options where_name = [] for pattern in test_name_match.split(','): if pattern: where_name.append("Name LIKE '%s'" % pattern) # Add any excludes to the where clause where_exclude = [] for pattern in test_name_exclude.split(','): if pattern: where_exclude.append("(NOT Name LIKE '%s')" % pattern) # Get all test classes for namespace query = "SELECT Id, Name FROM ApexClass WHERE NamespacePrefix = %s" % namespace if where_name: query += " AND (%s)" % ' OR '.join(where_name) if where_exclude: query += " AND %s" % ' AND '.join(where_exclude) print "Running Query: %s" % query sys.stdout.flush() res = sf.query_all(query) print "Found %s classes" % res['totalSize'] sys.stdout.flush() if not res['totalSize']: return {'Pass': 0, 'Fail': 0, 'CompileFail': 0, 'Skip': 0} classes_by_id = {} classes_by_name = {} traces_by_class_id = {} results_by_class_name = {} classes_by_log_id = {} logs_by_class_id = {} for cls in res['records']: classes_by_id[cls['Id']] = cls['Name'] classes_by_name[cls['Name']] = cls['Id'] results_by_class_name[cls['Name']] = {} # If debug is turned on, setup debug traces for all test classes if debug: # Set up a simple-salesforce sobject for TraceFlag using the tooling api TraceFlag = sf.TraceFlag TraceFlag.base_url = (u'https://{instance}/services/data/v{sf_version}/tooling/sobjects/{object_name}/' .format(instance=sf.sf_instance, object_name='TraceFlag', sf_version=sf.sf_version)) # First, delete any old trace flags still lying around tf_res = sf.query('Select Id from TraceFlag') if tf_res['totalSize']: for tf in tf_res['records']: TraceFlag.delete(tf['Id']) expiration = datetime.datetime.now() + datetime.timedelta(1) for class_id in classes_by_id.keys(): res = TraceFlag.create({ 'ApexCode': 'Error', 'ApexProfiling': 'Debug', 'Callout': 'Error', 'Database': 'Error', 'ExpirationDate': expiration.isoformat(), #'ScopeId': class_id, 'System': 'Error', 'TracedEntityId': class_id, 'Validation': 'Error', 'Visualforce': 'Error', 'Workflow': 'Error', }) traces_by_class_id[class_id] = res['id'] # Run all the tests print "Queuing tests for execution..." sys.stdout.flush() job_id = sf.restful('runTestsAsynchronous', params={'classids': ','.join(classes_by_id.keys())}) # Loop waiting for the tests to complete while True: res = sf.query_all("SELECT Id, Status, ApexClassId FROM ApexTestQueueItem WHERE ParentJobId = '%s'" % job_id) counts = { 'Queued': 0, 'Processing': 0, 'Aborted': 0, 'Completed': 0, 'Failed': 0, 'Preparing': 0, 'Holding': 0, } for item in res['records']: counts[item['Status']] += 1 # If all tests have run, break from the loop if not counts['Queued'] and not counts['Processing']: print '' print '-------------------------------------------------------------------------------' print 'Test Results' print '-------------------------------------------------------------------------------' sys.stdout.flush() break print 'Completed: %(Completed)s Processing: %(Processing)s Queued: %(Queued)s' % counts sys.stdout.flush() sleep(poll_interval) # Get the test results by method res = sf.query_all("SELECT StackTrace,Message, ApexLogId, AsyncApexJobId,MethodName, Outcome, ApexClassId, TestTimestamp FROM ApexTestResult WHERE AsyncApexJobId = '%s'" % job_id) counts = { 'Pass': 0, 'Fail': 0, 'CompileFail': 0, 'Skip': 0, } for result in res['records']: class_name = classes_by_id[result['ApexClassId']] results_by_class_name[class_name][result['MethodName']] = result counts[result['Outcome']] += 1 if debug and result['ApexLogId']: classes_by_log_id[result['ApexLogId']] = result['ApexClassId'] # Fetch debug logs if debug is enabled if debug: log_ids = "('%s')" % "','".join([str(id) for id in classes_by_log_id.keys()]) res = sf.query_all("SELECT Id, Application, DurationMilliseconds, Location, LogLength, LogUserId, Operation, Request, StartTime, Status from ApexLog where Id in %s" % log_ids) for log in res['records']: class_id = classes_by_log_id[log['Id']] class_name = classes_by_id[class_id] logs_by_class_id[class_id] = log # Fetch the debug log file body_url = '%ssobjects/ApexLog/%s/Body' % (sf.base_url, log['Id']) resp = sf.request.get(body_url, headers=sf.headers) log_file = class_name + '.log' if debug_logdir: log_file = debug_logdir + os.sep + log_file f = open(log_file, 'w') f.write(resp.content) f.close() # Parse stats from the log file f = open(log_file, 'r') method_stats = parse_log(class_name, f) # Add method stats to results_by_class_name for method, info in method_stats.items(): results_by_class_name[class_name][method].update(info) # Delete the trace flags for trace_id in traces_by_class_id.values(): TraceFlag.delete(trace_id) # Build an OrderedDict of results test_results = [] class_names = results_by_class_name.keys() class_names.sort() for class_name in class_names: class_id = classes_by_name[class_name] duration = None if debug and class_id in logs_by_class_id: duration = int(logs_by_class_id[class_id]['DurationMilliseconds']) * .001 print 'Class: %s (%ss)' % (class_name, duration) else: print 'Class: %s' % class_name sys.stdout.flush() method_names = results_by_class_name[class_name].keys() method_names.sort() for method_name in method_names: result = results_by_class_name[class_name][method_name] test_results.append({ 'Children': result.get('children', None), 'ClassName': class_name, 'Method': result['MethodName'], 'Message': result['Message'], 'Outcome': result['Outcome'], 'StackTrace': result['StackTrace'], 'Stats': result.get('stats', None), 'TestTimestamp': result.get('TestTimestamp', None), }) # Output result for method print ' %(Outcome)s: %(MethodName)s' % result if debug: print ' DEBUG LOG INFO:' stats = result.get('stats',None) if not stats: print ' No stats found, likely because of debug log size limit' else: stat_keys = stats.keys() stat_keys.sort() for stat in stat_keys: try: value = stats[stat] output = ' %s / %s' % (value['used'], value['allowed']) print output.ljust(26) + stat except: output = ' %s' % stats[stat] print output.ljust(26) + stat # Print message and stack trace if failed if result['Outcome'] in ['Fail','CompileFail']: print ' Message: %(Message)s' % result print ' StackTrace: %(StackTrace)s' % result sys.stdout.flush() print '-------------------------------------------------------------------------------' print 'Passed: %(Pass)s Fail: %(Fail)s Compile Fail: %(CompileFail)s Skipped: %(Skip)s' % counts print '-------------------------------------------------------------------------------' sys.stdout.flush() if json_output: f = open(json_output, 'w') f.write(json.dumps(test_results)) f.close() return counts