Exemplo n.º 1
0
def put_manifest_sdb(creds, sdb_domain, config_dict):
    sdb_client = boto3.client(
        'sdb',
        region_name='us-east-1',
        aws_access_key_id=creds['Credentials']['AccessKeyId'],
        aws_secret_access_key=creds['Credentials']['SecretAccessKey'],
        aws_session_token=creds['Credentials']['SessionToken'],
    )
    local_source_root = config_dict['localSourceRoot']
    lambda_source_root = config_dict['lambdaSourceRoot']

    manifest = JasmineManifest(config_dict['testGlobs'],
                               config_dict['includeTags'],
                               config_dict['excludeTags'])

    every_test = manifest.get_all_runnable_tests()
    tests = every_test

    test_index = 1
    job_id = config_dict['executionName']

    for test in tests:
        db_key = str(test_index) + '/' + job_id
        test_path = test.test_path.replace(local_source_root,
                                           lambda_source_root)
        put_manifest_task_simple_db(sdb_client, sdb_domain, db_key, job_id,
                                    str(test_index).zfill(8), test_path,
                                    test.test_name, test.test_class_name)
        if config_dict["maxTestsToRun"] == "1":
            print("RUNNING JOB IN SMOKE MODE! ONLY SCHEDULING ONE TASK IN DB")
            break
        test_index = test_index + 1
def test_get_jasmine_file_list(test_resource_dir):
    # Test: Pass No files to method; Expect: Empty list of tests (no objects)
    no_jasmine_files = JasmineManifest([test_resource_dir + '*.py'], [], [])
    assert len(no_jasmine_files.jasmine_tests) == 0

    # Test: Pass more than 0 files to method; Expect: more than 0 Jasmin File Object
    test_resources_directory = JasmineManifest([test_resource_dir + '*.ts'],
                                               [], [])
    assert len(test_resources_directory.jasmine_tests) > 0
def test_get_all_tests(test_resource_dir):
    # Test: pass 'glob' filePatterns to directory with 1 matching file and 5 tests; Expect: jasmine_test_list to contain 5 items
    five_jasmine_tests = JasmineManifest([test_resource_dir + 'five_tests.ts'],
                                         [], [])
    assert len(five_jasmine_tests.jasmine_tests) == 5

    # Test: pass 'glob' filePatterns to directory containing no files with matching pattern'; Expect: empty jasmine_test_list
    does_not_contain_jasmine_tests = JasmineManifest(
        [test_resource_dir + '*.py'], [], [])
    assert len(does_not_contain_jasmine_tests.jasmine_tests) == 0
def test_get_total_number_runnable(test_resource_dir):
    # Test: Pass file with No runnable tests; Expect return 0
    no_runnable = JasmineManifest([], [], [])
    assert no_runnable.get_total_number_runnable() == 0

    # Test: Pass file with runnable tests; Expect return > 0
    has_runnable = JasmineManifest([test_resource_dir + '*.ts'], [], [])
    assert has_runnable.get_total_number_runnable() > 0
def test_get_all_runnable_tests(test_resource_dir):
    # Test: Pass file with no tests; Expect return 0
    no_tests = JasmineManifest([], [], [])
    assert len(no_tests.get_all_runnable_tests()) == 0

    # Test: Pass file path to recourses directory; Expected return more than 0
    more_tests = JasmineManifest([test_resource_dir + '*.ts'], [], [])
    assert len(more_tests.get_all_runnable_tests()) > 0
def test_get_total_number_of_tests(test_resource_dir):
    # Test: Pass file with no tests; Expect return 0
    no_tests = JasmineManifest([], [], [])
    assert no_tests.get_total_number_tests() == 0

    # Test: Pass file path to recourses directory; Expected return more than 0
    more_tests = JasmineManifest([test_resource_dir + '*.ts'], [], [])
    assert more_tests.get_total_number_tests() > 0
def test_is_runnable(test_resource_dir):
    # Test: pass no test name; expect False
    no_test = JasmineManifest([], [], [])
    assert not no_test.is_runnable('')

    # Test: pass filepath with runnable tests; Expect True
    runnable = JasmineManifest([test_resource_dir + 'is_runnable_test.ts'], [],
                               [])
    assert runnable.is_runnable('is_runnable')
def test_get_total_number_not_runnable(test_resource_dir):
    # Test: pass file pattern with all runnable tests; Expect return 0
    all_runnable = JasmineManifest([test_resource_dir + 'five_tests.ts'], [],
                                   [])
    assert all_runnable.get_total_number_not_runnable() == 0

    # Test: pass file pattern with 5 non-runnable tests; Expect: return 5
    five_non_runnable = JasmineManifest([test_resource_dir + 'five_tests.ts'],
                                        ['#include'], [])
    assert five_non_runnable.get_total_number_not_runnable() == 5
def test_get_all_non_runnable_tests(test_resource_dir):
    # Test: Pass file with five non_runnable Tests: Expect return 5
    five_runnable = JasmineManifest([test_resource_dir + 'five_tests.ts'],
                                    ['#include'], [])
    assert len(five_runnable.get_all_non_runnable_tests()) == 5

    # Test: Pass file with all runnable tests; Expect 0 return
    all_runnable = JasmineManifest([test_resource_dir + 'five_tests.ts'], [],
                                   [])
    assert len(all_runnable.get_all_non_runnable_tests()) == 0
Exemplo n.º 10
0
def test_junit_manifest(report_path):
    setup_tests_abs_path = os.path.abspath(
        'pipeline/integration_tests/webdriver_tests/jasmine_reporter_test.ts')

    jasmine_manifest = JasmineManifest([setup_tests_abs_path],
                                       ['#integrationSuite'], ['#quarantine'])
    jasmine_manifest_skipped = JasmineManifest([setup_tests_abs_path],
                                               ['#quarantine'], [])
    report = JunitHelper(report_path)
    runnable_case_list = report.get_runnable_test_elements(jasmine_manifest)
    not_runnable_case_list = jasmine_manifest.get_all_non_runnable_tests()
    complete_case_list = jasmine_manifest.jasmine_tests
    total_junit_cases = len(report.get_test_attributes()
                            ) - 1  # one record is a fake for expired test
    total_tests = jasmine_manifest.get_total_number_tests()
    total_runnable = jasmine_manifest.get_total_number_runnable()
    total_not_runnable = jasmine_manifest.get_total_number_not_runnable()

    does_total_match = False
    for item in complete_case_list:
        print(item.test_name)
    if total_runnable + total_not_runnable == total_tests:
        does_total_match = True

    assert does_total_match
    assert total_tests == total_junit_cases
    assert jasmine_manifest.get_total_number_runnable(
    ) == jasmine_manifest_skipped.get_total_number_not_runnable()
    assert jasmine_manifest_skipped.get_total_number_runnable(
    ) == jasmine_manifest.get_total_number_not_runnable()
    assert jasmine_manifest.get_total_number_tests(
    ) == jasmine_manifest_skipped.get_total_number_tests()

    # runnable cases in junit are found within its own inventory
    for item in runnable_case_list:
        is_found = False
        for case in report.get_test_attributes():
            if case.is_match(item.get('name')):
                is_found = True
                break
        assert is_found
Exemplo n.º 11
0
def get_artifacts(bucket_name, sts_creds, report_destination_path,
                  config_dict):
    s3 = boto3.resource(
        's3',
        region_name='us-east-1',
        aws_access_key_id=sts_creds['Credentials']['AccessKeyId'],
        aws_secret_access_key=sts_creds['Credentials']['SecretAccessKey'],
        aws_session_token=sts_creds['Credentials']['SessionToken'])

    pathlib.Path('junit_results').mkdir(parents=True, exist_ok=True)
    test_report_bucket = bucket_name
    test_report_key = 'artifacts/' + config_dict[
        'testBranchJobIdentifier'] + '/' + config_dict[
            'timestampForTestMetrics'] + '.xml'
    test_stylesheet_key = 'artifacts/' + config_dict[
        'testBranchJobIdentifier'] + '/stylesheet.xsl'

    s3.Bucket(test_report_bucket).download_file(test_report_key,
                                                report_destination_path)
    stylesheet_path = str(
        pathlib.Path(report_destination_path).parent.joinpath(
            'stylesheet.xsl'))
    s3.Bucket(test_report_bucket).download_file(test_stylesheet_key,
                                                stylesheet_path)

    # Create temporary shell environment with AWS creds from STS
    new_env = os.environ.copy()
    new_env['AWS_ACCESS_KEY_ID'] = sts_creds['Credentials']['AccessKeyId']
    new_env['AWS_SECRET_ACCESS_KEY'] = sts_creds['Credentials'][
        'SecretAccessKey']
    new_env['AWS_SESSION_TOKEN'] = sts_creds['Credentials']['SessionToken']

    aws_command_template_results = 'aws s3 cp s3://{}/artifacts/' + config_dict['testBranchJobIdentifier'] + '/' \
                  + config_dict['timestampForTestMetrics'] + '/allure-results/ allure-results --recursive'
    aws_command_results = aws_command_template_results.format(bucket_name)

    aws_command_template_artifacts = 'aws s3 cp s3://{}/artifacts/' + config_dict['testBranchJobIdentifier'] + '/' \
                           + config_dict['timestampForTestMetrics'] + '/allure-artifacts/ artifacts --recursive'
    aws_command_artifacts = aws_command_template_artifacts.format(bucket_name)

    print('Running command: ' + aws_command_results)
    call_aws_command_results = subprocess.Popen(aws_command_results,
                                                stderr=subprocess.STDOUT,
                                                shell=True,
                                                preexec_fn=os.setsid,
                                                env=new_env)

    print('Running command: ' + aws_command_artifacts)
    call_aws_command_artifacts = subprocess.Popen(aws_command_artifacts,
                                                  stderr=subprocess.STDOUT,
                                                  shell=True,
                                                  preexec_fn=os.setsid,
                                                  env=new_env)

    call_aws_command_results.wait()
    call_aws_command_artifacts.wait()
    allure_helper = AllureHelper('allure-results/*.json')
    manifest = JasmineManifest(
        config_dict["testGlobs"],
        ["#quarantine", "#new", "#nobackendsupport", "#setup"], [])
    allure_helper.update_results(manifest)
def main():
    allure_results_glob = ''
    junit_results_glob = ''
    retry_glob = ''
    output_report_name = ''
    report_type = 'runnable'
    include_tags = []
    exclude_tags = []
    # Set default test spec paths, override with --testSpecGlobs option
    test_spec_globs = [
        'build/test/test/e2e/uia/spec/**/**/*_test.js',
        'build/test/test/e2e/uia/spec/**/*_test.js',
        'build/test/test/e2e/setup/spec/*_test.js'
    ]

    # Parse any possible configuration options
    try:
        opts, args = getopt.getopt(sys.argv[1:], "", [
            "allureGlob=", "junitGlob=", "retryGlob=", "outputReportName=",
            "reportType=", "includeTags=", "excludeTags=", "testSpecGlobs="
        ])
    except getopt.GetoptError as err:
        # print help information and exit:
        print(str(err))  # will print something like "option -a not recognized"
        sys.exit(2)

    allure_results_glob_found = False
    junit_glob_found = False
    retry_glob_found = False
    output_report_name_found = False

    for o, a in opts:
        if o == "--allureGlob":
            allure_results_glob = a
            allure_results_glob_found = True
        elif o == "--junitGlob":
            junit_results_glob = a
            junit_glob_found = True
        elif o == "--retryGlob":
            retry_glob = a
            retry_glob_found = True
        elif o == "--outputReportName":
            output_report_name = a
            output_report_name_found = True
        elif o == "--reportType":
            report_type = a
        elif o == "--excludeTags":
            if a == "":
                exclude_tags = []
            else:
                exclude_tags = "".join(a).split(",")
        elif o == "--includeTags":
            if a == "":
                include_tags = []
            else:
                include_tags = "".join(a).split(",")
        elif o == "--testSpecGlobs":
            test_spec_globs = "".join(a).split(",")
        else:
            assert False, "unhandled option"

    jasmine_manifest = JasmineManifest(test_spec_globs, include_tags,
                                       exclude_tags)
    print("Total tests: " + str(jasmine_manifest.get_total_number_tests()))

    # Create runnable report
    if junit_glob_found and output_report_name_found and not retry_glob_found and report_type == 'runnable':
        output_file = output_report_name
        output_file_missing = output_report_name.replace(
            '.xml', '-missing.xml')
        runnable = JunitMerger(junit_results_glob, output_file,
                               jasmine_manifest)
        print("Total runnable tests: " +
              str(runnable.non_runnable_manifest.get_total_number_runnable()))
        print("Creating runnable tests report...")
        runnable.create_runnable_report()
        print("Creating disabled runnable tests report...")
        missing = JunitMerger(junit_results_glob, output_file_missing,
                              jasmine_manifest)
        missing.create_missing_report()
        runnable.remove_old_files()

    # Create non_runnable report
    elif output_report_name_found and not retry_glob_found and report_type == 'non_runnable':
        output_file = output_report_name
        non_runnable = JunitMerger('', output_file, jasmine_manifest)
        print("Total non_runnable tests: " +
              str(non_runnable.non_runnable_manifest.
                  get_total_number_not_runnable()))
        print("Creating non_runnable tests report...")
        non_runnable.create_non_runnable_report()

    # Tweak allure reports if --allureGlob and --junitGlob options are passed in
    elif junit_glob_found and allure_results_glob:
        allure_report = AllureTweaker(junit_results_glob, allure_results_glob)
        allure_report.update_results()

    # Process test retries if --retryGlob is passed in
    elif retry_glob_found and junit_glob_found and output_report_name_found:
        print("Processing test retries and merging reports")
        output_file = output_report_name
        retry_helper = RetryCombine(retry_glob)
        merger = JunitMerger(junit_results_glob, output_file, jasmine_manifest)
        merger.create_report(retry_helper)
        merger.remove_old_files()

    # Write skipped test with native allure formatting
    elif report_type == 'non_runnable_allure' and output_report_name_found:
        print('Writing Allure skipped results to: ' + output_report_name)
        manifest = JasmineManifest(test_spec_globs, [], [])
        test_list = {}

        for test in manifest.get_all_tests():
            test_class_name = strip_escape(test.test_class_name)
            test_name = strip_escape(test.test_name)
            full_name = test_class_name + ' ' + test_name

            test_list[full_name] = {
                'class': test_class_name,
                'name': test_name,
                'found': False
            }

        for file_glob in glob.glob(output_report_name + '/*-result.json'):
            test_result = json.loads(open(file_glob).read())
            test_list[strip_escape(test_result['historyId'])]['found'] = True

        write_native_allure_results(test_list)

    else:
        # Raise error if required allure report arguments are missing
        if output_report_name_found is not True:
            raise ValueError(
                'Argument for Junit Report Missing! Run command with --outputReportName my_report.xml !'
            )
        if allure_results_glob_found is not True:
            raise ValueError(
                'Argument for Allure Glob Missing! Run command with --allureGlob or -g! I.e. -g folder/myglob*.json'
            )
Exemplo n.º 13
0
def handler(event, context):
    print('Event: ')
    print(json.dumps(event, sort_keys=False))

    setup_start_time = current_time_milli()

    gc.collect()

    cloud_watch_logs = CloudWatchLogs()
    cloud_watch_logs.print_marker()

    server_login = event["Records"][0]["Sns"]["MessageAttributes"][
        "ServerLogin"]["Value"]
    print("Server Login: "******"Records"][0]["Sns"]["MessageAttributes"][
        "ServerPassword"]["Value"]
    print("Server Password: "******"Records"][0]["Sns"]["MessageAttributes"]["ServerUrl"][
        "Value"]
    print("Server URL: " + server_url)

    test_id = event["Records"][0]["Sns"]["MessageAttributes"]["TestId"][
        "Value"]
    print("TestID: " + test_id)

    test_name = event["Records"][0]["Sns"]["MessageAttributes"]["TestName"][
        "Value"]
    print("TestName: " + test_name)

    test_class_name = event["Records"][0]["Sns"]["MessageAttributes"][
        "TestClassName"]["Value"]
    print("TestClassName: " + test_class_name)

    test_file_path = event["Records"][0]["Sns"]["MessageAttributes"][
        "TestFilePath"]["Value"]
    print("Test File Path: " + test_file_path)

    tarball_s3_bucket = event["Records"][0]["Sns"]["MessageAttributes"][
        "TarballS3Bucket"]["Value"]
    print("Tarball S3 Bucket: " + tarball_s3_bucket)

    node_runtime_s3_path = event["Records"][0]["Sns"]["MessageAttributes"][
        "NodeRuntimeS3Path"]["Value"]
    print("Node Runtime S3 Path: " + node_runtime_s3_path)

    xvfb_s3_path = event["Records"][0]["Sns"]["MessageAttributes"][
        "XvfbS3Path"]["Value"]
    print("Xvfb S3 path: " + xvfb_s3_path)

    system_libs_s3_path = event["Records"][0]["Sns"]["MessageAttributes"][
        "SystemLibsS3Path"]["Value"]
    print("System Libs S3 Path: " + system_libs_s3_path)

    protractor_tarball_s3_path = event["Records"][0]["Sns"][
        "MessageAttributes"]["ProtractorTarballS3Path"]["Value"]
    print("Protractor Tarball S3 Path: " + protractor_tarball_s3_path)

    run_test_command = event["Records"][0]["Sns"]["MessageAttributes"][
        "RunTestCommand"]["Value"]
    print("Bash command for running tests: " + run_test_command)

    job_id = event["Records"][0]["Sns"]["Message"]
    print("SNS Message Body " + job_id)

    assumable_role = event["Records"][0]["Sns"]["MessageAttributes"][
        "AssumeRoleBoolean"]["Value"]
    print("Assume different AWS Role?: " + assumable_role)

    assumable_role_arn = event["Records"][0]["Sns"]["MessageAttributes"][
        "AssumableRoleArn"]["Value"]
    print("Assume different AWS Role?: " + assumable_role_arn)

    artifact_s3_bucket = os.environ['TASKRUNNERBUCKET']
    print("Test Artifact Destination Bucket: " + artifact_s3_bucket)

    artifact_s3_path = "test-runner/artifacts/" + job_id
    print("Test Artifact Destination Path: " + artifact_s3_path)

    junit_report_s3_path = artifact_s3_path + "/" + job_id.replace(
        "/", "-") + ".xml"
    print("Test Report Path: " + junit_report_s3_path)

    test_branch_job_identifier = event["Records"][0]["Sns"][
        "MessageAttributes"]["TestBranchId"]["Value"]
    print("Test Branch Job ID: " + test_branch_job_identifier)

    timestamp_for_test_metrics = event["Records"][0]["Sns"][
        "MessageAttributes"]["MetricsId"]["Value"]
    print("Metrics Timestamp ID: " + timestamp_for_test_metrics)

    clear_tmp()

    sdb_domain = os.environ['SDBDOMAIN']
    print("SDBDOMAIN: " + sdb_domain)

    test_start = str(datetime.datetime.now().strftime("%y-%m-%d-%H-%M-%f"))
    test_start_milli = current_time_milli()
    print("Marking test start in database with the following stamp: " +
          test_start)
    sdb = get_simple_db_client(assumable_role, assumable_role_arn)

    sdb_put_test_start(sdb, sdb_domain, job_id, test_start, test_id,
                       artifact_s3_bucket, artifact_s3_path, context)

    kill_leftover_processes()

    s3 = get_s3_resource(assumable_role, assumable_role_arn)
    try:

        # TODO: https://stackoverflow.com/questions/6028000/how-to-read-a-static-file-from-inside-a-python-package
        decompress_xvfb = subprocess.check_output([
            "tar", "-xf", "/var/task/functions/xvfb.tar.gz", "-C", "/tmp",
            "--warning=no-unknown-keyword"
        ])
        print(decompress_xvfb)

        print('Untarring Chrome...')
        chrome_tar_location = '/var/task/functions/' + os.environ[
            'CHROME_VERSION'] + '.tar.gz'
        decompress_chrome = subprocess.check_output([
            "tar", "-xf", chrome_tar_location, "-C", "/tmp",
            "--warning=no-unknown-keyword"
        ])

        # Rename chrome directory
        chrome_location = '/tmp/' + os.environ['CHROME_VERSION']
        os.rename(chrome_location, '/tmp/chrome-lambda')

        print(decompress_chrome)

        print('Downloading protractor payload from s3...')
        s3.Bucket(tarball_s3_bucket).download_file(protractor_tarball_s3_path,
                                                   '/tmp/payload.tar.gz')
        decompress_protractor = subprocess.check_output([
            "tar", "-xf", "/tmp/payload.tar.gz", "-C", "/tmp",
            "--warning=no-unknown-keyword"
        ])

        rm_protractor = subprocess.check_output(
            ["cd /tmp && rm -rf payload.tar.gz && df -h /tmp"],
            stderr=subprocess.STDOUT,
            shell=True)
        print(decompress_protractor)
        print(rm_protractor)

        print("Attempting to start chrome driver...")
        chromedriver = subprocess.Popen(
            "/tmp/chrome-lambda/chromedriver --verbose --log-path=/tmp/chromedriver.log",
            stdout=subprocess.PIPE,
            stderr=subprocess.STDOUT,
            shell=True,
            preexec_fn=os.setsid)

        for stdout_line in chromedriver.stdout:
            line = str(stdout_line, 'UTF-8').strip()
            print(line)
            if "Only local connections are allowed." in line:
                print('Chromedriver successfully started')
                break

    except botocore.exceptions.ClientError as e:
        if e.response['Error']['Code'] == "404":
            print("The object does not exist.")
        else:
            raise

    remove_old_test_logs = subprocess.check_output(
        ["rm", "-rf", "/tmp/lambda_protractor/test/build/"])
    pathlib.Path(
        "/tmp/lambda_protractor/build/test/test/e2e/retry_attempts/").mkdir(
            parents=True, exist_ok=True)
    print(remove_old_test_logs)

    new_env = os.environ.copy()
    new_env[
        'LD_LIBRARY_PATH'] = '/lib64:/usr/lib64:/var/runtime:/var/runtime/lib:/var/task:/var/task/lib:/tmp/xvfb-1/libs'
    start_xvfb = subprocess.Popen(
        "/tmp/xvfb-1/xvfb :99 -ac -screen 0 1920x1080x24 -nolisten tcp -dpi 96 +extension RANDR &",
        stdout=subprocess.PIPE,
        stderr=subprocess.STDOUT,
        shell=True,
        preexec_fn=os.setsid,
        env=new_env)

    for stdout_line in start_xvfb.stdout:
        line = str(stdout_line, 'UTF-8').strip()
        print(line)
        if "Errors from xkbcomp are not fatal to the X server" in line \
                or "Cannot establish any listening sockets - Make sure an X server isn't already running(EE)" in line:
            print('Xvfb successfully started')
            break

    record_vid = subprocess.Popen(
        "/var/task/ffmpeg -video_size 1920x1080 -framerate 1 -f x11grab -i :99.0 -pix_fmt yuv420p -vcodec libx264 "
        "/tmp/videoFile.mp4 ",
        stdin=subprocess.PIPE,
        shell=True,
        preexec_fn=os.setsid)

    print('Running Test...')
    protractor_template = run_test_command

    # Format test name to prevent issues with protractor grep option
    test_name_chars = list(test_name)

    for index, char in enumerate(test_name_chars):
        if not char.isalnum() and not char.isspace() and char not in "\\":
            new_char = "\\" + char
            test_name_chars[index] = new_char

    shell_safe_test_name = "".join(test_name_chars)

    print('Setup time: ' + str(current_time_milli() - setup_start_time))

    # Format final protractor command
    protractor_cmd = protractor_template.format(test_file_path, server_url,
                                                server_login, server_password,
                                                shell_safe_test_name)

    time_remaining_after_test_timeout = 30  # Seconds
    test_timeout = int(context.get_remaining_time_in_millis() /
                       1000) - time_remaining_after_test_timeout
    print('Remaining seconds: ' + str(test_timeout))
    print(protractor_cmd)

    test_timed_out = False

    console_output = '/tmp/console.log'

    # TODO: Pass custom environment vars to prevent task from accessing anything sensitive
    run_test = subprocess.Popen(protractor_cmd,
                                stderr=subprocess.STDOUT,
                                stdout=open(console_output, 'w'),
                                shell=True)
    try:
        run_test.wait(test_timeout)
    except:
        print('Test timed out')
        test_timed_out = True
        run_test.terminate()

    test_stop_milli = current_time_milli()

    print("###UIA TEST OUTPUT###")
    print(run_test)

    time.sleep(3)
    check_space = subprocess.check_output(["df", "-h", "/tmp"])
    print(check_space)

    cleanup_junk = subprocess.check_output(
        'rm -rf /tmp/homedir/* && rm -rf /tmp/user-data/* && rm -rf /tmp/cache-dir/*; exit 0',
        stderr=subprocess.STDOUT,
        shell=True)

    time.sleep(3)
    check_space_again = subprocess.check_output(["df", "-h", "/tmp"])
    print(check_space_again)

    print(cleanup_junk)
    print('...Finished Running Test!')

    allure_results_s3_path = 'artifacts/' \
                             + test_branch_job_identifier \
                             + '/' \
                             + timestamp_for_test_metrics \
                             + '/allure-results/'

    allure_links_s3_path = 'artifacts/' \
                             + test_branch_job_identifier \
                             + '/' \
                             + timestamp_for_test_metrics \
                             + '/allure-artifacts/'

    test_error_screenshot_s3_path = allure_links_s3_path + '/' + job_id.replace(
        "/", "-") + ".mp4"
    chrome_log_path = allure_links_s3_path + '/' + job_id.replace("/",
                                                                  "-") + ".log"
    chrome_driver_log_path = allure_links_s3_path + '/' + job_id.replace(
        "/", "-") + ".chromedriver.log"
    console_log_path = allure_links_s3_path + '/' + job_id.replace(
        "/", "-") + ".console.log"

    test_error_screenshot_s3_path_link = 'artifacts/' + job_id.replace(
        "/", "-") + ".mp4"
    chrome_log_path_link = 'artifacts/' + job_id.replace("/", "-") + ".log"
    chrome_driver_log_path_link = 'artifacts/' + job_id.replace(
        "/", "-") + ".chromedriver.log"
    console_log_path_link = 'artifacts/' + job_id.replace("/",
                                                          "-") + ".console.log"

    allure_links = [
        create_link('Test Video', test_error_screenshot_s3_path_link,
                    'video/mp4'),
        create_link('Chrome Log', chrome_log_path_link),
        create_link('Chrome Driver Log', chrome_driver_log_path_link),
        create_link('Console Log', console_log_path_link),
        cloud_watch_logs.get_allure_link()
    ]

    # Search for allure test result and upload specific test execution
    allure_test_case_found = False
    if allure_results_directory_exists(
    ) and allure_results_directory_contains_files():
        for json_result in get_json_results():
            result = open(json_result).read()
            data = json.loads(result)
            found_name = ''.join(e for e in data['name'] if e.isalnum())
            expected_name = ''.join(e for e in test_name if e.isalnum())

            if found_name == expected_name:
                print('Allure result found: ' + json_result)
                allure_test_case_found = True
                data['links'].extend(allure_links)
                data['labels'].extend(get_host_labels())
                key = allure_results_s3_path + str(
                    uuid.uuid4()) + '-result.json'
                upload_allure_result(s3, artifact_s3_bucket, key, data)
            else:
                print('Looking for: ' + expected_name)
                print('Found: ' + found_name)

    # Generate skipped result if the test execution is not found
    if allure_test_case_found is False:
        print(
            'Allure Test Case Not Found, marking as disabled / skipped / timed out!'
        )
        key = allure_results_s3_path + str(uuid.uuid4()) + '-result.json'
        skipped_result = create_missing_allure_result(
            test_name, test_class_name, test_start_milli, test_stop_milli,
            allure_links, test_timed_out)
        upload_allure_result(s3, artifact_s3_bucket, key, skipped_result)

    # chuck artifacts in s3 within someBucketName/test-runner/jobId/junit.xml, console.txt, error.png
    retry_glob = '/tmp/lambda_protractor/build/test/test/e2e/retry_attempts/*.json'
    junit_glob = '/tmp/lambda_protractor/build/test/test/e2e/results/*.xml'
    retry_helper = RetryCombine(retry_glob)

    if retry_helper.has_retries():
        start_retry = test_start_milli + 1
        stop_retry = start_retry + 1
        full_test_name = test_class_name.replace('E2E.', '') + ' ' + test_name
        for retry in retry_helper.get_matching_retries(full_test_name):
            result = create_retry_allure_results(test_name, test_class_name,
                                                 start_retry, stop_retry,
                                                 retry.error_msg,
                                                 retry.attempt_number,
                                                 allure_links)
            retry_key = allure_results_s3_path + str(
                uuid.uuid4()) + '-result.json'
            upload_allure_result(s3, artifact_s3_bucket, retry_key, result)
            start_retry = start_retry + 1
            stop_retry = stop_retry + 1

    jasmine_manifest = JasmineManifest([test_file_path], [], [])
    merger = JunitMerger(junit_glob, '/tmp/combined_retry.xml',
                         jasmine_manifest)
    merger.create_report(retry_helper)

    report_file = '/tmp/combined_retry.xml'
    root = XMLTree.parse(report_file)
    test_case_found = False
    test_case_element = XMLTree.Element('testsuite')
    for test_case in root.findall('/testsuite/testcase'):
        name = test_case.get('name').replace("'", "").replace('"', '')
        tn = test_name.replace('\\', "").replace("'", "").replace('"', '')
        if name == tn:
            print("Matching test case found...")
            test_case_found = True
            test_case.set('loggroupid', context.log_group_name)
            test_case.set('logstreamname', context.log_stream_name)
            test_case.set('awsrequestid', context.aws_request_id)
            test_case_element.append(test_case)

    if test_case_found is not True:
        print(
            "Could not find test report, marking as disabled / skipped / timed out!'"
        )
        new_test_case = XMLTree.Element('testcase')
        fixed_name = test_name.replace('\\', "").replace("'",
                                                         "").replace('"', '')
        new_test_case.set('name', fixed_name)
        new_test_case.set('loggroupid', context.log_group_name)
        new_test_case.set('logstreamname', context.log_stream_name)
        new_test_case.set('awsrequestid', context.aws_request_id)
        new_test_case.set('time', str(test_start_milli - test_stop_milli))
        new_test_case.set('isretried', '0')
        if test_timed_out:
            failure = XMLTree.Element('failed')
            failure.set("message",
                        "Exceeded Timeout in AWS Lambda, see cloudwatch log!")
            failure.set("type", "AWS_TIMEOUT")
            new_test_case.set('classname', 'E2E.Expired')
            new_test_case.set("isexpired", "true")
            new_test_case.set("isfailed", "true")
            new_test_case.append(failure)
        else:
            new_test_case.set('classname', 'E2E.Disabled')
            new_test_case.append(XMLTree.Element('skipped'))
        test_case_element.append(new_test_case)

    # Write the completed junit report
    xmlstr = minidom.parseString(
        XMLTree.tostring(test_case_element)).toprettyxml()
    with open('/tmp/report.xml', "w") as f:
        f.write(xmlstr)

    print(
        'Modified Test report found, attempting to upload /tmp/report.xml to S3 destination '
        + junit_report_s3_path)
    s3.Object(artifact_s3_bucket,
              junit_report_s3_path).put(Body=open('/tmp/report.xml', 'rb'))

    record_vid.communicate("q".encode())  # stop recording
    # kill chromedriver
    os.killpg(os.getpgid(chromedriver.pid), signal.SIGTERM)
    # kill  xvfb
    os.killpg(os.getpgid(start_xvfb.pid), signal.SIGTERM)

    try:
        s3.Object(artifact_s3_bucket, test_error_screenshot_s3_path).put(
            Body=open('/tmp/videoFile.mp4', 'rb'),
            ContentType='video/mp4',
            ContentDisposition='inline')
    except:
        print('Unable to upload video file')

    try:
        s3.Object(artifact_s3_bucket,
                  chrome_log_path).put(Body=open('/tmp/chrome_debug.log',
                                                 'rb'),
                                       ContentType='text/plain; charset=UTF-8',
                                       ContentDisposition='inline')
    except:
        print('Unable to upload chrome debug log')

    try:
        s3.Object(artifact_s3_bucket, chrome_driver_log_path).put(
            Body=open('/tmp/chromedriver.log', 'rb'),
            ContentType='text/plain; charset=UTF-8',
            ContentDisposition='inline')
    except:
        print('Unable to upload chromedriver log')

    try:
        s3.Object(artifact_s3_bucket, console_log_path).put(
            Body=open(console_output, 'rb'),
            ContentType='text/plain; charset=UTF-8',
            ContentDisposition='inline')
    except:
        print('Unable to upload console log')

    test_finish = str(datetime.datetime.now().strftime("%y-%m-%d-%H-%M-%f"))
    print("Marking test finish in database with the following stamp: " +
          test_finish)
    put_attributes(sdb, sdb_domain, job_id, "testFinish", test_finish)