Example #1
0
def run_tests():
    username = os.environ.get('SF_USERNAME')
    password = os.environ.get('SF_PASSWORD')
    serverurl = os.environ.get('SF_SERVERURL')
    test_name_match = os.environ.get('APEX_TEST_NAME_MATCH', '%_TEST')
    test_name_exclude = os.environ.get('APEX_TEST_NAME_EXCLUDE', '')
    namespace = os.environ.get('NAMESPACE', None)
    poll_interval = int(os.environ.get('POLL_INTERVAL', 10))
    debug = os.environ.get('DEBUG_TESTS',False) in ['true','True']
    debug_logdir = os.environ.get('DEBUG_LOGDIR')
    json_output = os.environ.get('TEST_JSON_OUTPUT', None)
    junit_output = os.environ.get('TEST_JUNIT_OUTPUT', None)
    
    if namespace:
        namespace = "'{0}'".format(namespace,)
    else:
        namespace = 'null'
    
    sandbox = False
    if serverurl.find('test.salesforce.com') != -1:
        sandbox = True
    
    sf = Salesforce(username=username, password=password, security_token='', sandbox=sandbox, sf_version='32.0')
    
    # Change base_url to use the tooling api
    sf.base_url = sf.base_url + 'tooling/'
    
    # Split test_name_match by commas to allow multiple class name matching options
    where_name = []
    for pattern in test_name_match.split(','):
        if pattern:
            where_name.append("Name LIKE '{0}'".format(pattern))

    # Add any excludes to the where clause
    where_exclude = []
    for pattern in test_name_exclude.split(','):
        if pattern:
            where_exclude.append("(NOT Name LIKE '{0}')".format(pattern,))
   
    # Get all test classes for namespace
    query = "SELECT Id, Name FROM ApexClass WHERE NamespacePrefix = {0}".format(namespace,)
    if where_name:
        query += " AND ({0})".format(' OR '.join(where_name),)
    if where_exclude:
        query += " AND {0}".format(' AND '.join(where_exclude),)

    print "Running Query: {0}".format(query,)
    sys.stdout.flush()

    res = sf.query_all(query)

    print "Found {0} classes".format(res['totalSize'],)
    sys.stdout.flush()

    if not res['totalSize']:
        return {'Pass': 0, 'Fail': 0, 'CompileFail': 0, 'Skip': 0}
    
    classes_by_id = {}
    classes_by_name = {}
    trace_id = None
    results_by_class_name = {}
    classes_by_log_id = {}
    logs_by_class_id = {}
    
    for cls in res['records']:
        classes_by_id[cls['Id']] = cls['Name']
        classes_by_name[cls['Name']] = cls['Id']
        results_by_class_name[cls['Name']] = {}

    # If debug is turned on, setup debug traces for all test classes
    if debug:
        print 'Setting up trace flag to capture debug logs'

        # Get the User's id to set a TraceFlag
        res_user = sf.query("Select Id from User where Username = '******'".format(username,))
        user_id = res_user['records'][0]['Id']
        
        # Set up a simple-salesforce sobject for TraceFlag using the tooling api
        TraceFlag = sf.TraceFlag
        TraceFlag.base_url = (u'https://{instance}/services/data/v{sf_version}/tooling/sobjects/{object_name}/'
                     .format(instance=sf.sf_instance,
                             object_name='TraceFlag',
                             sf_version=sf.sf_version))

        # First, delete any old trace flags still lying around
        tf_res = sf.query('Select Id from TraceFlag')
        if tf_res['totalSize']:
            for tf in tf_res['records']:
                TraceFlag.delete(tf['Id'])
    
        expiration = datetime.datetime.now() + datetime.timedelta(1)
        res = TraceFlag.create({
            'ApexCode': 'Info',
            'ApexProfiling': 'Debug',
            'Callout': 'Info',
            'Database': 'Info',
            'ExpirationDate': expiration.isoformat(),
            #'ScopeId': user_id,
            'System': 'Info',
            'TracedEntityId': user_id,
            'Validation': 'Info',
            'Visualforce': 'Info',
            'Workflow': 'Info',
        })
        trace_id = res['id']

        print 'Created TraceFlag for user'
    
    # Run all the tests
    print "Queuing tests for execution..."
    sys.stdout.flush()
    job_id = sf.restful('runTestsAsynchronous', params={'classids': ','.join(classes_by_id.keys())})
    
    # Loop waiting for the tests to complete
    while True:
        res = sf.query_all("SELECT Id, Status, ApexClassId FROM ApexTestQueueItem WHERE ParentJobId = '{0}'".format(job_id,))
        counts = {
            'Queued': 0,
            'Processing': 0,
            'Aborted': 0,
            'Completed': 0,
            'Failed': 0,
            'Preparing': 0,
            'Holding': 0,
        }
        for item in res['records']:
            counts[item['Status']] += 1
    
        # If all tests have run, break from the loop
        if not counts['Queued'] and not counts['Processing']:
            print ''
            print '-------------------------------------------------------------------------------'
            print 'Test Results'
            print '-------------------------------------------------------------------------------'
            sys.stdout.flush()
            break
        
        print 'Completed: %(Completed)s  Processing: %(Processing)s  Queued: %(Queued)s' % counts
        sys.stdout.flush()
        sleep(poll_interval)
    
    # Get the test results by method
    res = sf.query_all("SELECT StackTrace,Message, ApexLogId, AsyncApexJobId,MethodName, Outcome, ApexClassId, TestTimestamp FROM ApexTestResult WHERE AsyncApexJobId = '{0}'".format(job_id,))
    
    counts = {
        'Pass': 0,
        'Fail': 0,
        'CompileFail': 0,
        'Skip': 0,
    }
    for result in res['records']:
        class_name = classes_by_id[result['ApexClassId']]
        results_by_class_name[class_name][result['MethodName']] = result
        counts[result['Outcome']] += 1
        if debug and result['ApexLogId']:
            classes_by_log_id[result['ApexLogId']] = result['ApexClassId']
    
    # Fetch debug logs if debug is enabled
    if debug:
        log_ids = "('{0}')".format("','".join([str(id) for id in classes_by_log_id.keys()]),)
        res = sf.query_all("SELECT Id, Application, DurationMilliseconds, Location, LogLength, LogUserId, Operation, Request, StartTime, Status from ApexLog where Id in {0}".format(log_ids,))
        for log in res['records']:
            class_id = classes_by_log_id[log['Id']]
            class_name = classes_by_id[class_id]
            logs_by_class_id[class_id] = log
            # Fetch the debug log file
            body_url = '{0}sobjects/ApexLog/{1}/Body'.format(sf.base_url, log['Id'])
            resp = sf.request.get(body_url, headers=sf.headers)
            log_file = class_name + '.log'
            if debug_logdir:
                log_file = debug_logdir + os.sep + log_file
            f = open(log_file, 'w')
            f.write(resp.content)
            f.close()

            # Parse stats from the log file
            f = open(log_file, 'r')
            method_stats = parse_log(class_name, f)
            
            # Add method stats to results_by_class_name
            for method, info in method_stats.items():
                results_by_class_name[class_name][method].update(info)

        # Delete the trace flag
        TraceFlag.delete(trace_id)

    # Build an OrderedDict of results
    test_results = []

    class_names = results_by_class_name.keys()
    class_names.sort()
    for class_name in class_names:
        class_id = classes_by_name[class_name]
        duration = None
        if debug and class_id in logs_by_class_id:
            duration = int(logs_by_class_id[class_id]['DurationMilliseconds']) * .001
            print 'Class: {0} ({1}s)'.format(class_name, duration)
        else:
            print 'Class: {0}'.format(class_name,)
        sys.stdout.flush()

        method_names = results_by_class_name[class_name].keys()
        method_names.sort()
        for method_name in method_names:
            result = results_by_class_name[class_name][method_name]

            test_results.append({
                'Children': result.get('children', None),
                'ClassName': decode_to_unicode(class_name),
                'Method': decode_to_unicode(result['MethodName']),
                'Message': decode_to_unicode(result['Message']),
                'Outcome': decode_to_unicode(result['Outcome']),
                'StackTrace': decode_to_unicode(result['StackTrace']),
                'Stats': result.get('stats', None),
                'TestTimestamp': result.get('TestTimestamp', None),
            })
            
            # Output result for method
            if debug and json_output and result.get('stats') and 'duration' in result['stats']:
                # If debug is enabled and we're generating the json output, include duration with the test
                print u'   {0}: {1} ({2}s)'.format(
                    result['Outcome'], 
                    result['MethodName'], 
                    result['stats']['duration']
                )
            else:
                print u'   {Outcome}: {MethodName}'.format(**result)

            if debug and not json_output:
                print u'     DEBUG LOG INFO:'
                stats = result.get('stats',None)
                if not stats:
                    print u'       No stats found, likely because of debug log size limit'
                else:
                    stat_keys = stats.keys()
                    stat_keys.sort()
                    for stat in stat_keys:
                        try:
                            value = stats[stat]
                            output = u'       {0} / {1}'.format(value['used'], value['allowed'])
                            print output.ljust(26) + stat
                        except:
                            output = u'       {0}'.format(stats[stat],)
                            print output.ljust(26) + stat
    
            # Print message and stack trace if failed
            if result['Outcome'] in ['Fail','CompileFail']:
                print u'   Message: {Message}'.format(**result)
                print u'   StackTrace: {StackTrace}'.format(**result)
            sys.stdout.flush()
    
    print u'-------------------------------------------------------------------------------'
    print u'Passed: %(Pass)s  Fail: %(Fail)s  Compile Fail: %(CompileFail)s  Skipped: %(Skip)s' % counts
    print u'-------------------------------------------------------------------------------'
    sys.stdout.flush()
    
    if counts['Fail'] or counts['CompileFail']:
        print u''
        print u'Failing Tests'
        print u'-------------'
        print u''
        sys.stdout.flush()

        counter = 0
        for result in test_results:
            if result['Outcome'] not in ['Fail','CompileFail']:
                continue
            counter += 1
            print u'{0}: {1}.{2} - {3}'.format(counter, result['ClassName'], result['Method'], result['Outcome'])
            print u'  Message: {0}'.format(result['Message'],)
            print u'  StackTrace: {0}'.format(result['StackTrace'],)
            sys.stdout.flush()

    if json_output:
        f = codecs.open(json_output, encoding='utf-8', mode='w')
        f.write(json.dumps(test_results))
        f.close()

    if junit_output:
        f = codecs.open(junit_output, encoding='utf-8', mode='w')
        f.write('<testsuite tests="{0}">\n'.format(len(test_results)),)
        for result in test_results:
            testcase = '  <testcase classname="{0}" name="{1}"'.format(result['ClassName'], result['Method'])
            if 'Stats' in result and result['Stats'] and 'duration' in result['Stats']:
                testcase = '{0} time="{1}"'.format(testcase, result['Stats']['duration'])
            if result['Outcome'] in ['Fail','CompileFail']:
                testcase = '{0}>\n'.format(testcase,)
                testcase = '{0}    <failure type="{1}">{2}</failure>\n'.format(
                    testcase, 
                    cgi.escape(result['StackTrace']), 
                    cgi.escape(result['Message']),
                )
                testcase = '{0}  </testcase>\n'.format(testcase,)
            else:
                testcase = '{0} />\n'.format(testcase,)
            f.write(testcase)

        f.write('</testsuite>')
        f.close()
        

    return counts
Example #2
0
def run_tests():
    username = os.environ.get('SF_USERNAME')
    password = os.environ.get('SF_PASSWORD')
    serverurl = os.environ.get('SF_SERVERURL')
    test_name_match = os.environ.get('APEX_TEST_NAME_MATCH', '%_TEST')
    namespace = os.environ.get('NAMESPACE', None)
    poll_interval = int(os.environ.get('POLL_INTERVAL', 10))
    debug = os.environ.get('DEBUG_TESTS',False) == 'true'
    debug_logdir = os.environ.get('DEBUG_LOGDIR')
    
    if namespace:
        namespace = "'%s'" % namespace
    else:
        namespace = 'null'
    
    sandbox = False
    if serverurl.find('test.salesforce.com') != -1:
        sandbox = True
    
    sf = Salesforce(username=username, password=password, security_token='', sandbox=sandbox, version='32.0')
    
    # Change base_url to use the tooling api
    sf.base_url = sf.base_url + 'tooling/'
    
    # Split test_name_match by commas to allow multiple class name matching options
    where_name = []
    for pattern in test_name_match.split(','):
        where_name.append("Name LIKE '%s'" % pattern)
   
    # Get all test classes for namespace
    query = "SELECT Id, Name FROM ApexClass WHERE NamespacePrefix = %s and (%s)" % (namespace, ' OR '.join(where_name))

    print "Running Query: %s" % query
    sys.stdout.flush()

    res = sf.query_all("SELECT Id, Name FROM ApexClass WHERE NamespacePrefix = %s and (%s)" % (namespace, ' OR '.join(where_name)))

    print "Found %s classes" % res['totalSize']
    sys.stdout.flush()

    if not res['totalSize']:
        return {'Pass': 0, 'Failed': 0, 'CompileFail': 0, 'Skip': 0}
    
    classes_by_id = {}
    classes_by_name = {}
    traces_by_class_id = {}
    results_by_class_name = {}
    classes_by_log_id = {}
    logs_by_class_id = {}
    
    for cls in res['records']:
        classes_by_id[cls['Id']] = cls['Name']
        classes_by_name[cls['Name']] = cls['Id']
        results_by_class_name[cls['Name']] = {}

    # If debug is turned on, setup debug traces for all test classes
    if debug:
        expiration = datetime.datetime.now() + datetime.timedelta(0,3600)
        for class_id in classes_by_id.keys():
            TraceFlag = sf.TraceFlag
            TraceFlag.base_url = (u'https://{instance}/services/data/v{sf_version}/tooling/sobjects/{object_name}/'
                         .format(instance=sf.sf_instance,
                                 object_name='TraceFlag',
                                 sf_version=sf.sf_version))
            res = TraceFlag.create({
                'ApexCode': 'DEBUG',
                'ApexProfiling': 'DEBUG',
                'Callout': 'DEBUG',
                'Database': 'DEBUG',
                'ExpirationDate': expiration.isoformat(),
                #'ScopeId': class_id,
                'System': 'DEBUG',
                'TracedEntityId': class_id,
                'Validation': 'DEBUG',
                'Visualforce': 'DEBUG',
                'Workflow': 'DEBUG',
            })
            traces_by_class_id[class_id] = res['id']
    
    # Run all the tests
    print "Queuing tests for execution..."
    sys.stdout.flush()
    job_id = sf.restful('runTestsAsynchronous', params={'classids': ','.join(classes_by_id.keys())})
    
    # Loop waiting for the tests to complete
    while True:
        res = sf.query_all("SELECT Id, Status, ApexClassId FROM ApexTestQueueItem WHERE ParentJobId = '%s'" % job_id)
        counts = {
            'Queued': 0,
            'Processing': 0,
            'Aborted': 0,
            'Completed': 0,
            'Failed': 0,
            'Preparing': 0,
            'Holding': 0,
        }
        for item in res['records']:
            counts[item['Status']] += 1
    
        # If all tests have run, break from the loop
        if not counts['Queued'] and not counts['Processing']:
            print ''
            print '-------------------------------------------------------------------------------'
            print 'Test Results'
            print '-------------------------------------------------------------------------------'
            sys.stdout.flush()
            break
        
        print 'Completed: %(Completed)s  Processing: %(Processing)s  Queued: %(Queued)s' % counts
        sys.stdout.flush()
        sleep(poll_interval)
    
    # Get the test results by method
    res = sf.query_all("SELECT StackTrace,Message, ApexLogId, AsyncApexJobId,MethodName, Outcome, ApexClassId FROM ApexTestResult WHERE AsyncApexJobId = '%s'" % job_id)
    
    counts = {
        'Pass': 0,
        'Fail': 0,
        'CompileFail': 0,
        'Skip': 0,
    }
    for result in res['records']:
        class_name = classes_by_id[result['ApexClassId']]
        results_by_class_name[class_name][result['MethodName']] = result
        counts[result['Outcome']] += 1
        if debug:
            classes_by_log_id[result['ApexLogId']] = result['ApexClassId']
    
    # Fetch debug logs if debug is enabled
    if debug:
        log_ids = "('%s')" % "','".join([str(id) for id in classes_by_log_id.keys()])
        res = sf.query_all("SELECT Id, Application, DurationMilliseconds, Location, LogLength, LogUserId, Operation, Request, StartTime, Status from ApexLog where Id in %s" % log_ids)
        for log in res['records']:
            class_id = classes_by_log_id[log['Id']]
            class_name = classes_by_id[class_id]
            logs_by_class_id[class_id] = log
            # Fetch the debug log file
            body_url = '%ssobjects/ApexLog/%s/Body' % (sf.base_url, log['Id'])
            resp = sf.request.get(body_url, headers=sf.headers)
            log_file = class_name + '.log'
            if debug_logdir:
                log_file = debug_logdir + os.sep + log_file
            f = open(log_file, 'w')
            f.write(resp.content)
            f.close()

            # Parse stats from the log file
            f = open(log_file, 'r')
            method_stats = parse_log(f)
            
            # Add method stats to results_by_class_name
            for method, stats in method_stats.items():
                results_by_class_name[class_name][method]['stats'] = stats

        # Expire the trace flags
        for trace_id in traces_by_class_id.values():
            TraceFlag.update(trace_id, {'ExpirationDate': datetime.datetime.now().isoformat()})

    class_names = results_by_class_name.keys()
    class_names.sort()
    for class_name in class_names:
        class_id = classes_by_name[class_name]
        if debug:
            duration = int(logs_by_class_id[class_id]['DurationMilliseconds']) * .001
            print 'Class: %s (%ss)' % (class_name, duration)
        else:
            print 'Class: %s' % class_name
        sys.stdout.flush()
        method_names = results_by_class_name[class_name].keys()
        method_names.sort()
        for method_name in method_names:
            result = results_by_class_name[class_name][method_name]
    
            # Output result for method
            print '   %(Outcome)s: %(MethodName)s' % result

            if debug:
                print '     DEBUG LOG INFO:'
                stat_keys = result['stats'].keys()
                stat_keys.sort()
                for stat in stat_keys:
                    try:
                        value = result['stats'][stat]
                        output = '       %s / %s' % (value['used'], value['allowed'])
                        print output.ljust(26) + stat
                    except:
                        output = '       %s' % result['stats'][stat]
                        print output.ljust(26) + stat
    
            # Print message and stack trace if failed
            if result['Outcome'] in ['Fail','CompileFail']:
                print '   Message: %(Message)s' % result
                print '   StackTrace: %(StackTrace)s' % result
            sys.stdout.flush()
    
    print '-------------------------------------------------------------------------------'
    print 'Passed: %(Pass)s  Fail: %(Fail)s  Compile Fail: %(CompileFail)s  Skipped: %(Skip)s' % counts
    print '-------------------------------------------------------------------------------'
    sys.stdout.flush()

    return counts
Example #3
0
def run_tests():
    username = os.environ.get('SF_USERNAME')
    password = os.environ.get('SF_PASSWORD')
    serverurl = os.environ.get('SF_SERVERURL')
    test_name_match = os.environ.get('APEX_TEST_NAME_MATCH', '%_TEST')
    test_name_exclude = os.environ.get('APEX_TEST_NAME_EXCLUDE', '')
    namespace = os.environ.get('NAMESPACE', None)
    poll_interval = int(os.environ.get('POLL_INTERVAL', 10))
    debug = os.environ.get('DEBUG_TESTS', False) in ['true', 'True']
    debug_logdir = os.environ.get('DEBUG_LOGDIR')
    json_output = os.environ.get('TEST_JSON_OUTPUT', None)
    junit_output = os.environ.get('TEST_JUNIT_OUTPUT', None)

    if namespace:
        namespace = "'{0}'".format(namespace, )
    else:
        namespace = 'null'

    sandbox = False
    if serverurl.find('test.salesforce.com') != -1:
        sandbox = True

    sf = Salesforce(username=username,
                    password=password,
                    security_token='',
                    sandbox=sandbox,
                    version='32.0')

    # Change base_url to use the tooling api
    sf.base_url = sf.base_url + 'tooling/'

    # Split test_name_match by commas to allow multiple class name matching options
    where_name = []
    for pattern in test_name_match.split(','):
        if pattern:
            where_name.append("Name LIKE '{0}'".format(pattern))

    # Add any excludes to the where clause
    where_exclude = []
    for pattern in test_name_exclude.split(','):
        if pattern:
            where_exclude.append("(NOT Name LIKE '{0}')".format(pattern, ))

    # Get all test classes for namespace
    query = "SELECT Id, Name FROM ApexClass WHERE NamespacePrefix = {0}".format(
        namespace, )
    if where_name:
        query += " AND ({0})".format(' OR '.join(where_name), )
    if where_exclude:
        query += " AND {0}".format(' AND '.join(where_exclude), )

    print "Running Query: {0}".format(query, )
    sys.stdout.flush()

    res = sf.query_all(query)

    print "Found {0} classes".format(res['totalSize'], )
    sys.stdout.flush()

    if not res['totalSize']:
        return {'Pass': 0, 'Fail': 0, 'CompileFail': 0, 'Skip': 0}

    classes_by_id = {}
    classes_by_name = {}
    trace_id = None
    results_by_class_name = {}
    classes_by_log_id = {}
    logs_by_class_id = {}

    for cls in res['records']:
        classes_by_id[cls['Id']] = cls['Name']
        classes_by_name[cls['Name']] = cls['Id']
        results_by_class_name[cls['Name']] = {}

    # If debug is turned on, setup debug traces for all test classes
    if debug:
        print 'Setting up trace flag to capture debug logs'

        # Get the User's id to set a TraceFlag
        res_user = sf.query(
            "Select Id from User where Username = '******'".format(username, ))
        user_id = res_user['records'][0]['Id']

        # Set up a simple-salesforce sobject for TraceFlag using the tooling api
        TraceFlag = sf.TraceFlag
        TraceFlag.base_url = (
            u'https://{instance}/services/data/v{sf_version}/tooling/sobjects/{object_name}/'
            .format(instance=sf.sf_instance,
                    object_name='TraceFlag',
                    sf_version=sf.sf_version))

        # First, delete any old trace flags still lying around
        tf_res = sf.query('Select Id from TraceFlag')
        if tf_res['totalSize']:
            for tf in tf_res['records']:
                TraceFlag.delete(tf['Id'])

        expiration = datetime.datetime.now() + datetime.timedelta(seconds=60 *
                                                                  60 * 12)
        res = TraceFlag.create({
            'ApexCode': 'Info',
            'ApexProfiling': 'Debug',
            'Callout': 'Info',
            'Database': 'Info',
            'ExpirationDate': expiration.isoformat(),
            #'ScopeId': user_id,
            'System': 'Info',
            'TracedEntityId': user_id,
            'Validation': 'Info',
            'Visualforce': 'Info',
            'Workflow': 'Info',
        })
        trace_id = res['id']

        print 'Created TraceFlag for user'

    # Run all the tests
    print "Queuing tests for execution..."
    sys.stdout.flush()
    job_id = sf.restful('runTestsAsynchronous',
                        params={'classids': ','.join(classes_by_id.keys())})

    # Loop waiting for the tests to complete
    while True:
        res = sf.query_all(
            "SELECT Id, Status, ApexClassId FROM ApexTestQueueItem WHERE ParentJobId = '{0}'"
            .format(job_id, ))
        counts = {
            'Queued': 0,
            'Processing': 0,
            'Aborted': 0,
            'Completed': 0,
            'Failed': 0,
            'Preparing': 0,
            'Holding': 0,
        }
        for item in res['records']:
            counts[item['Status']] += 1

        # If all tests have run, break from the loop
        if not counts['Queued'] and not counts['Processing']:
            print ''
            print '-------------------------------------------------------------------------------'
            print 'Test Results'
            print '-------------------------------------------------------------------------------'
            sys.stdout.flush()
            break

        print 'Completed: %(Completed)s  Processing: %(Processing)s  Queued: %(Queued)s' % counts
        sys.stdout.flush()
        sleep(poll_interval)

    # Get the test results by method
    res = sf.query_all(
        "SELECT StackTrace,Message, ApexLogId, AsyncApexJobId,MethodName, Outcome, ApexClassId, TestTimestamp FROM ApexTestResult WHERE AsyncApexJobId = '{0}'"
        .format(job_id, ))

    counts = {
        'Pass': 0,
        'Fail': 0,
        'CompileFail': 0,
        'Skip': 0,
    }
    for result in res['records']:
        class_name = classes_by_id[result['ApexClassId']]
        results_by_class_name[class_name][result['MethodName']] = result
        counts[result['Outcome']] += 1
        if debug and result['ApexLogId']:
            classes_by_log_id[result['ApexLogId']] = result['ApexClassId']

    # Fetch debug logs if debug is enabled
    if debug:
        log_ids = "('{0}')".format(
            "','".join([str(id) for id in classes_by_log_id.keys()]), )
        res = sf.query_all(
            "SELECT Id, Application, DurationMilliseconds, Location, LogLength, LogUserId, Operation, Request, StartTime, Status from ApexLog where Id in {0}"
            .format(log_ids, ))
        for log in res['records']:
            class_id = classes_by_log_id[log['Id']]
            class_name = classes_by_id[class_id]
            logs_by_class_id[class_id] = log
            # Fetch the debug log file
            body_url = '{0}sobjects/ApexLog/{1}/Body'.format(
                sf.base_url, log['Id'])
            resp = sf.request.get(body_url, headers=sf.headers)
            log_file = class_name + '.log'
            if debug_logdir:
                log_file = debug_logdir + os.sep + log_file
            f = open(log_file, 'w')
            f.write(resp.content)
            f.close()

            # Parse stats from the log file
            f = open(log_file, 'r')
            method_stats = parse_log(class_name, f)

            # Add method stats to results_by_class_name
            for method, info in method_stats.items():
                results_by_class_name[class_name][method].update(info)

        # Delete the trace flag
        TraceFlag.delete(trace_id)

    # Build an OrderedDict of results
    test_results = []

    class_names = results_by_class_name.keys()
    class_names.sort()
    for class_name in class_names:
        class_id = classes_by_name[class_name]
        duration = None
        if debug and class_id in logs_by_class_id:
            duration = int(
                logs_by_class_id[class_id]['DurationMilliseconds']) * .001
            print 'Class: {0} ({1}s)'.format(class_name, duration)
        else:
            print 'Class: {0}'.format(class_name, )
        sys.stdout.flush()

        method_names = results_by_class_name[class_name].keys()
        method_names.sort()
        for method_name in method_names:
            result = results_by_class_name[class_name][method_name]

            test_results.append({
                'Children':
                result.get('children', None),
                'ClassName':
                decode_to_unicode(class_name),
                'Method':
                decode_to_unicode(result['MethodName']),
                'Message':
                decode_to_unicode(result['Message']),
                'Outcome':
                decode_to_unicode(result['Outcome']),
                'StackTrace':
                decode_to_unicode(result['StackTrace']),
                'Stats':
                result.get('stats', None),
                'TestTimestamp':
                result.get('TestTimestamp', None),
            })

            # Output result for method
            if debug and json_output and result.get(
                    'stats') and 'duration' in result['stats']:
                # If debug is enabled and we're generating the json output, include duration with the test
                print u'   {0}: {1} ({2}s)'.format(result['Outcome'],
                                                   result['MethodName'],
                                                   result['stats']['duration'])
            else:
                print u'   {Outcome}: {MethodName}'.format(**result)

            if debug and not json_output:
                print u'     DEBUG LOG INFO:'
                stats = result.get('stats', None)
                if not stats:
                    print u'       No stats found, likely because of debug log size limit'
                else:
                    stat_keys = stats.keys()
                    stat_keys.sort()
                    for stat in stat_keys:
                        try:
                            value = stats[stat]
                            output = u'       {0} / {1}'.format(
                                value['used'], value['allowed'])
                            print output.ljust(26) + stat
                        except:
                            output = u'       {0}'.format(stats[stat], )
                            print output.ljust(26) + stat

            # Print message and stack trace if failed
            if result['Outcome'] in ['Fail', 'CompileFail']:
                print u'   Message: {Message}'.format(**result)
                print u'   StackTrace: {StackTrace}'.format(**result)
            sys.stdout.flush()

    print u'-------------------------------------------------------------------------------'
    print u'Passed: %(Pass)s  Fail: %(Fail)s  Compile Fail: %(CompileFail)s  Skipped: %(Skip)s' % counts
    print u'-------------------------------------------------------------------------------'
    sys.stdout.flush()

    if counts['Fail'] or counts['CompileFail']:
        print u''
        print u'Failing Tests'
        print u'-------------'
        print u''
        sys.stdout.flush()

        counter = 0
        for result in test_results:
            if result['Outcome'] not in ['Fail', 'CompileFail']:
                continue
            counter += 1
            print u'{0}: {1}.{2} - {3}'.format(counter, result['ClassName'],
                                               result['Method'],
                                               result['Outcome'])
            print u'  Message: {0}'.format(result['Message'], )
            print u'  StackTrace: {0}'.format(result['StackTrace'], )
            sys.stdout.flush()

    if json_output:
        f = codecs.open(json_output, encoding='utf-8', mode='w')
        f.write(json.dumps(test_results))
        f.close()

    if junit_output:
        f = codecs.open(junit_output, encoding='utf-8', mode='w')
        f.write('<testsuite tests="{0}">\n'.format(len(test_results)), )
        for result in test_results:
            testcase = '  <testcase classname="{0}" name="{1}"'.format(
                result['ClassName'], result['Method'])
            if 'Stats' in result and result['Stats'] and 'duration' in result[
                    'Stats']:
                testcase = '{0} time="{1}"'.format(testcase,
                                                   result['Stats']['duration'])
            if result['Outcome'] in ['Fail', 'CompileFail']:
                testcase = '{0}>\n'.format(testcase, )
                testcase = '{0}    <failure type="{1}">{2}</failure>\n'.format(
                    testcase,
                    cgi.escape(result['StackTrace']),
                    cgi.escape(result['Message']),
                )
                testcase = '{0}  </testcase>\n'.format(testcase, )
            else:
                testcase = '{0} />\n'.format(testcase, )
            f.write(testcase)

        f.write('</testsuite>')
        f.close()

    return counts
Example #4
0
def run_tests():
    username = os.environ.get('SF_USERNAME')
    password = os.environ.get('SF_PASSWORD')
    serverurl = os.environ.get('SF_SERVERURL')
    test_name_match = os.environ.get('APEX_TEST_NAME_MATCH', '%_TEST')
    test_name_exclude = os.environ.get('APEX_TEST_NAME_EXCLUDE', '')
    namespace = os.environ.get('NAMESPACE', None)
    poll_interval = int(os.environ.get('POLL_INTERVAL', 10))
    debug = os.environ.get('DEBUG_TESTS',False) == 'true'
    debug_logdir = os.environ.get('DEBUG_LOGDIR')
    json_output = os.environ.get('TEST_JSON_OUTPUT', None)
    
    if namespace:
        namespace = "'%s'" % namespace
    else:
        namespace = 'null'
    
    sandbox = False
    if serverurl.find('test.salesforce.com') != -1:
        sandbox = True
    
    sf = Salesforce(username=username, password=password, security_token='', sandbox=sandbox, version='32.0')
    
    # Change base_url to use the tooling api
    sf.base_url = sf.base_url + 'tooling/'
    
    # Split test_name_match by commas to allow multiple class name matching options
    where_name = []
    for pattern in test_name_match.split(','):
        if pattern:
            where_name.append("Name LIKE '%s'" % pattern)

    # Add any excludes to the where clause
    where_exclude = []
    for pattern in test_name_exclude.split(','):
        if pattern:
            where_exclude.append("(NOT Name LIKE '%s')" % pattern)
   
    # Get all test classes for namespace
    query = "SELECT Id, Name FROM ApexClass WHERE NamespacePrefix = %s" % namespace
    if where_name:
        query += " AND (%s)" % ' OR '.join(where_name)
    if where_exclude:
        query += " AND %s" % ' AND '.join(where_exclude)

    print "Running Query: %s" % query
    sys.stdout.flush()

    res = sf.query_all(query)

    print "Found %s classes" % res['totalSize']
    sys.stdout.flush()

    if not res['totalSize']:
        return {'Pass': 0, 'Fail': 0, 'CompileFail': 0, 'Skip': 0}
    
    classes_by_id = {}
    classes_by_name = {}
    traces_by_class_id = {}
    results_by_class_name = {}
    classes_by_log_id = {}
    logs_by_class_id = {}
    
    for cls in res['records']:
        classes_by_id[cls['Id']] = cls['Name']
        classes_by_name[cls['Name']] = cls['Id']
        results_by_class_name[cls['Name']] = {}

    # If debug is turned on, setup debug traces for all test classes
    if debug:
        # Set up a simple-salesforce sobject for TraceFlag using the tooling api
        TraceFlag = sf.TraceFlag
        TraceFlag.base_url = (u'https://{instance}/services/data/v{sf_version}/tooling/sobjects/{object_name}/'
                     .format(instance=sf.sf_instance,
                             object_name='TraceFlag',
                             sf_version=sf.sf_version))

        # First, delete any old trace flags still lying around
        tf_res = sf.query('Select Id from TraceFlag')
        if tf_res['totalSize']:
            for tf in tf_res['records']:
                TraceFlag.delete(tf['Id'])
    
        expiration = datetime.datetime.now() + datetime.timedelta(1)
        for class_id in classes_by_id.keys():
            res = TraceFlag.create({
                'ApexCode': 'Error',
                'ApexProfiling': 'Debug',
                'Callout': 'Error',
                'Database': 'Error',
                'ExpirationDate': expiration.isoformat(),
                #'ScopeId': class_id,
                'System': 'Error',
                'TracedEntityId': class_id,
                'Validation': 'Error',
                'Visualforce': 'Error',
                'Workflow': 'Error',
            })
            traces_by_class_id[class_id] = res['id']
    
    # Run all the tests
    print "Queuing tests for execution..."
    sys.stdout.flush()
    job_id = sf.restful('runTestsAsynchronous', params={'classids': ','.join(classes_by_id.keys())})
    
    # Loop waiting for the tests to complete
    while True:
        res = sf.query_all("SELECT Id, Status, ApexClassId FROM ApexTestQueueItem WHERE ParentJobId = '%s'" % job_id)
        counts = {
            'Queued': 0,
            'Processing': 0,
            'Aborted': 0,
            'Completed': 0,
            'Failed': 0,
            'Preparing': 0,
            'Holding': 0,
        }
        for item in res['records']:
            counts[item['Status']] += 1
    
        # If all tests have run, break from the loop
        if not counts['Queued'] and not counts['Processing']:
            print ''
            print '-------------------------------------------------------------------------------'
            print 'Test Results'
            print '-------------------------------------------------------------------------------'
            sys.stdout.flush()
            break
        
        print 'Completed: %(Completed)s  Processing: %(Processing)s  Queued: %(Queued)s' % counts
        sys.stdout.flush()
        sleep(poll_interval)
    
    # Get the test results by method
    res = sf.query_all("SELECT StackTrace,Message, ApexLogId, AsyncApexJobId,MethodName, Outcome, ApexClassId, TestTimestamp FROM ApexTestResult WHERE AsyncApexJobId = '%s'" % job_id)
    
    counts = {
        'Pass': 0,
        'Fail': 0,
        'CompileFail': 0,
        'Skip': 0,
    }
    for result in res['records']:
        class_name = classes_by_id[result['ApexClassId']]
        results_by_class_name[class_name][result['MethodName']] = result
        counts[result['Outcome']] += 1
        if debug and result['ApexLogId']:
            classes_by_log_id[result['ApexLogId']] = result['ApexClassId']
    
    # Fetch debug logs if debug is enabled
    if debug:
        log_ids = "('%s')" % "','".join([str(id) for id in classes_by_log_id.keys()])
        res = sf.query_all("SELECT Id, Application, DurationMilliseconds, Location, LogLength, LogUserId, Operation, Request, StartTime, Status from ApexLog where Id in %s" % log_ids)
        for log in res['records']:
            class_id = classes_by_log_id[log['Id']]
            class_name = classes_by_id[class_id]
            logs_by_class_id[class_id] = log
            # Fetch the debug log file
            body_url = '%ssobjects/ApexLog/%s/Body' % (sf.base_url, log['Id'])
            resp = sf.request.get(body_url, headers=sf.headers)
            log_file = class_name + '.log'
            if debug_logdir:
                log_file = debug_logdir + os.sep + log_file
            f = open(log_file, 'w')
            f.write(resp.content)
            f.close()

            # Parse stats from the log file
            f = open(log_file, 'r')
            method_stats = parse_log(class_name, f)
            
            # Add method stats to results_by_class_name
            for method, info in method_stats.items():
                results_by_class_name[class_name][method].update(info)

        # Delete the trace flags
        for trace_id in traces_by_class_id.values():
            TraceFlag.delete(trace_id)

    # Build an OrderedDict of results
    test_results = []

    class_names = results_by_class_name.keys()
    class_names.sort()
    for class_name in class_names:
        class_id = classes_by_name[class_name]
        duration = None
        if debug and class_id in logs_by_class_id:
            duration = int(logs_by_class_id[class_id]['DurationMilliseconds']) * .001
            print 'Class: %s (%ss)' % (class_name, duration)
        else:
            print 'Class: %s' % class_name
        sys.stdout.flush()

        method_names = results_by_class_name[class_name].keys()
        method_names.sort()
        for method_name in method_names:
            result = results_by_class_name[class_name][method_name]

            test_results.append({
                'Children': result.get('children', None),
                'ClassName': class_name,
                'Method': result['MethodName'],
                'Message': result['Message'],
                'Outcome': result['Outcome'],
                'StackTrace': result['StackTrace'],
                'Stats': result.get('stats', None),
                'TestTimestamp': result.get('TestTimestamp', None),
            })
    
            # Output result for method
            print '   %(Outcome)s: %(MethodName)s' % result

            if debug:
                print '     DEBUG LOG INFO:'
                stats = result.get('stats',None)
                if not stats:
                    print '       No stats found, likely because of debug log size limit'
                else:
                    stat_keys = stats.keys()
                    stat_keys.sort()
                    for stat in stat_keys:
                        try:
                            value = stats[stat]
                            output = '       %s / %s' % (value['used'], value['allowed'])
                            print output.ljust(26) + stat
                        except:
                            output = '       %s' % stats[stat]
                            print output.ljust(26) + stat
    
            # Print message and stack trace if failed
            if result['Outcome'] in ['Fail','CompileFail']:
                print '   Message: %(Message)s' % result
                print '   StackTrace: %(StackTrace)s' % result
            sys.stdout.flush()
    
    print '-------------------------------------------------------------------------------'
    print 'Passed: %(Pass)s  Fail: %(Fail)s  Compile Fail: %(CompileFail)s  Skipped: %(Skip)s' % counts
    print '-------------------------------------------------------------------------------'
    sys.stdout.flush()

    if json_output:
        f = open(json_output, 'w')
        f.write(json.dumps(test_results))
        f.close()

    return counts