Exemple #1
0
    def assert_images(self, *paths):
        normalized_paths = [file_utils.normalize_path(p) for p in paths]
        actual_paths = [
            file_utils.normalize_path(image[0]) for image in self.images
        ]

        self.assertCountEqual(normalized_paths, actual_paths)
Exemple #2
0
    def post(self):
        try:
            request_data = self.request.body

            execution_info = external_model.to_execution_info(
                request_data.decode("UTF-8"))

            script_name = execution_info.get_script()

            config = load_config(script_name)

            if not config:
                respond_error(
                    self, 400,
                    "Script with name '" + str(script_name) + "' not found")

            working_directory = config.get_working_directory()
            if working_directory is not None:
                working_directory = file_utils.normalize_path(
                    working_directory)

            script_path = file_utils.normalize_path(config.get_script_path(),
                                                    working_directory)

            script_args = build_parameter_string(
                execution_info.get_param_values(), config)

            command = []
            command.append(script_path)
            command.extend(script_args)

            script_logger = logging.getLogger("scriptServer")
            script_logger.info("Calling script: " + " ".join(command))

            if config.is_requires_terminal():
                self.process_wrapper = execution.PtyProcessWrapper(
                    command, config.get_name(), working_directory)
            else:
                self.process_wrapper = execution.POpenProcessWrapper(
                    command, config.get_name(), working_directory)

            process_id = self.process_wrapper.get_process_id()

            running_scripts[process_id] = self.process_wrapper

            self.write(str(process_id))

        except Exception as e:
            script_logger = logging.getLogger("scriptServer")
            script_logger.exception("Error while calling the script")

            if hasattr(e, "strerror") and e.strerror:
                error_output = e.strerror
            else:
                error_output = "Unknown error occurred, contact the administrator"

            result = " ---  ERRORS  --- \n"
            result += error_output

            respond_error(self, 500, result)
Exemple #3
0
    def parse_script_body(self, config, working_directory):
        script_body = config.get_script_body()
        if (' ' in script_body) and (not sys.platform.startswith('win')):
            args = shlex.split(script_body)
            script_path = file_utils.normalize_path(args[0], working_directory)
            body_args = args[1:]
            for i, body_arg in enumerate(body_args):
                expanded = os.path.expanduser(body_arg)
                if expanded != body_arg:
                    body_args[i] = expanded
        else:
            script_path = file_utils.normalize_path(script_body, working_directory)
            body_args = []

        return script_path, body_args
Exemple #4
0
    def assert_script_code(self, script_path, code, error):
        script_code = self.config_service.load_script_code(
            'ConfX', self.admin_user)
        if 'code_edit_error' not in script_code:
            script_code['code_edit_error'] = None

        if not os.path.isabs(script_code['file_path']):
            script_code['file_path'] = file_utils.normalize_path(
                script_code['file_path'])

        self.assertEqual(
            {
                'code': code,
                'file_path': file_utils.normalize_path(script_path),
                'code_edit_error': error
            }, script_code)
Exemple #5
0
    def test_mixed_images_when_multiple_output(self):
        path1 = test_utils.create_file('test123.png')
        path2 = test_utils.create_file('images/test.png')
        path3 = test_utils.create_file('a.b.c.png')
        path4 = test_utils.create_file('test456.png')
        path5 = test_utils.create_file('some/long/path/me.jpg')

        config = create_config_model(
            'my_script',
            output_files=[
                inline_image(test_utils.temp_folder + os_utils.path_sep() +
                             '#test\d+.png#'),
                inline_image(path2),
                inline_image(path3),
                inline_image('##any_path/path/\w+#.jpg')
            ])

        execution_id = self.start_execution(config)

        paths = [
            normalize_path(p) for p in (path1, path2, path3, path4, path5)
        ]
        for index, path in enumerate(paths):
            self.write_output(execution_id, '__ ' + path + ' __\n')
            self.wait_output_chunks(execution_id, chunks_count=index + 1)

        self.write_output(execution_id, '__ ' + path2 + ' __\n')
        self.wait_output_chunks(execution_id, chunks_count=len(paths) + 1)

        self.assert_images(*paths)
Exemple #6
0
    def test_glob_absolute_path(self):
        created_files = ['file1', 'file2', 'file3']
        test_utils.create_files(created_files)

        matcher = self.create_matcher(
            [file_utils.normalize_path('*1', test_utils.temp_folder)])
        files = model_helper.list_files(test_utils.temp_folder,
                                        excluded_files_matcher=matcher)
        self.assertEqual(['file2', 'file3'], files)
    def save_file(self, filename, body, username) -> str:
        upload_folder = self.user_file_storage.prepare_new_folder(
            username, self.folder)
        pref_result_path = os.path.join(upload_folder, filename)

        result_path = file_utils.create_unique_filename(pref_result_path)
        file_utils.write_file(result_path, body, True)

        return file_utils.normalize_path(result_path)
Exemple #8
0
def incremental_rebuild(last_revision, current_revision):
    changed_files = vcs_gateway.get_revision_changed_files(
        ROOT_PROJECT_PATH, last_revision, current_revision)

    changed_project_poms = set([])

    for file_path in changed_files:
        file_path = file_utils.normalize_path(file_path)

        if os.path.isdir(file_path):
            parent_path = file_path
        else:
            parent_path = os.path.dirname(file_path)

        while parent_path and not (file_utils.is_root(parent_path)):
            pom_path = os.path.join(parent_path, "pom.xml")

            if os.path.exists(pom_path):
                changed_project_poms.add(pom_path)
                break

            if parent_path == ROOT_PROJECT_PATH:
                break

            parent_path = os.path.dirname(parent_path)

    changed_projects = common.to_mvn_projects(changed_project_poms,
                                              ROOT_PROJECT_PATH, ROOT_ONLY)

    print('Rebuilding revision changes (' + last_revision + ';' +
          current_revision + ']. Changed projects:')
    print('\n'.join(collections.to_strings(changed_projects)))

    all_poms = mvn_utils.gather_all_poms(ROOT_PROJECT_PATH, ROOT_ONLY)
    unchanged_project_poms = []
    for pom_path in all_poms:
        if pom_path in changed_project_poms:
            continue

        unchanged_project_poms.append(pom_path)

    for pom_path in unchanged_project_poms:
        unchanged_project = mvn_utils.create_project(pom_path)

        if not mvn_utils.is_built(unchanged_project):
            print('project ' + str(unchanged_project) +
                  ' was cleaned, sending to rebuild')
            changed_projects.append(unchanged_project)
            continue

        mvn_utils.fast_install(unchanged_project, MAVEN_REPO_PATH)

    mvn_utils.rebuild(ROOT_PROJECT_PATH,
                      changed_projects,
                      MVN_OPTS,
                      silent=False)
Exemple #9
0
    def test_recursive_glob_absolute_path(self):
        created_files = ['file1', 'file2', 'file3']
        test_utils.create_files(created_files, 'sub')

        matcher = self.create_matcher(
            [file_utils.normalize_path('**/file1', test_utils.temp_folder)])
        subfolder_path = os.path.join(test_utils.temp_folder, 'sub')
        files = model_helper.list_files(subfolder_path,
                                        excluded_files_matcher=matcher)
        self.assertEqual(['file2', 'file3'], files)
Exemple #10
0
    def prepare_downloadable_files(self, config, script_output,
                                   script_param_values, audit_name):
        output_files = config.output_files

        if not output_files:
            return []

        output_files = substitute_parameter_values(config.parameters,
                                                   config.output_files,
                                                   script_param_values)

        correct_files = []

        for output_file in output_files:
            files = find_matching_files(output_file, script_output)

            if files:
                for file in files:
                    file_path = file_utils.normalize_path(
                        file, config.get_working_directory())
                    if not os.path.exists(file_path):
                        LOGGER.warning('file ' + file + ' (full path = ' +
                                       file_path + ') not found')
                    elif os.path.isdir(file_path):
                        LOGGER.warning('file ' + file +
                                       ' is a directory. Not allowed')
                    elif file_path not in correct_files:
                        correct_files.append(file_path)
            else:
                LOGGER.warning("Couldn't find file for " + output_file)

        if not correct_files:
            return []

        download_folder = self.user_file_storage.prepare_new_folder(
            audit_name, self.result_folder)
        LOGGER.info('Created download folder for ' + audit_name + ': ' +
                    download_folder)

        result = []
        for file in correct_files:
            preferred_download_file = os.path.join(download_folder,
                                                   os.path.basename(file))

            try:
                download_file = create_unique_filename(preferred_download_file)
            except file_utils.FileExistsException:
                LOGGER.exception('Cannot get unique name')
                continue

            copyfile(file, download_file)

            result.append(download_file)

        return result
Exemple #11
0
    def test_multiple_exclusions_when_no_match(self):
        created_files = ['fileA', 'fileB', 'fileC', 'fileD']
        test_utils.create_files(created_files)

        matcher = self.create_matcher([
            '*2', 'file1',
            file_utils.normalize_path('file4', test_utils.temp_folder)
        ])
        files = model_helper.list_files(test_utils.temp_folder,
                                        excluded_files_matcher=matcher)
        self.assertEqual(created_files, files)
Exemple #12
0
    def test_recursive_glob_absolute_path_and_deep_nested(self):
        created_files = ['file1', 'file2', 'file3']
        abc_subfolder = os.path.join('a', 'b', 'c')
        test_utils.create_files(created_files, abc_subfolder)

        matcher = self.create_matcher(
            [file_utils.normalize_path('**/file1', test_utils.temp_folder)])
        abc_path = os.path.join(test_utils.temp_folder, abc_subfolder)
        files = model_helper.list_files(abc_path,
                                        excluded_files_matcher=matcher)
        self.assertEqual(['file2', 'file3'], files)
    def test_single_dynamic_image(self):
        path = test_utils.create_file('test.png')
        config = create_config_model('my_script', output_files=[inline_image('##any_path.png#')])

        execution_id = self.start_execution(config)

        full_path = file_utils.normalize_path(path)
        self.write_output(execution_id, '123\n' + full_path + '\n456')
        self.wait_output_chunks(execution_id, chunks_count=1)

        self.assert_images(full_path)
Exemple #14
0
    def _prepare_downloadable_files(self,
                                    output_files,
                                    config,
                                    script_output,
                                    *,
                                    should_exist=True):
        found_files = {}

        for output_file in output_files:
            files = find_matching_files(output_file, script_output)

            if files:
                for file in files:
                    file_path = file_utils.normalize_path(
                        file, config.working_directory)
                    if not os.path.exists(file_path):
                        if should_exist:
                            LOGGER.warning('file ' + file + ' (full path = ' +
                                           file_path + ') not found')
                    elif os.path.isdir(file_path):
                        LOGGER.warning('file ' + file +
                                       ' is a directory. Not allowed')
                    elif file_path not in found_files:
                        found_files[file] = file_path
            elif should_exist:
                LOGGER.warning("Couldn't find file for " + output_file)

        if not found_files:
            return {}

        result = {}
        for original_file_path, normalized_path in found_files.items():
            if original_file_path in self.prepared_files:
                result[original_file_path] = self.prepared_files[
                    original_file_path]
                continue

            preferred_download_file = os.path.join(
                self.download_folder, os.path.basename(normalized_path))

            try:
                download_file = create_unique_filename(preferred_download_file)
            except file_utils.FileExistsException:
                LOGGER.exception('Cannot get unique name')
                continue

            copyfile(normalized_path, download_file)

            result[original_file_path] = download_file
            self.prepared_files[original_file_path] = download_file

        return result
Exemple #15
0
def parse_options():
    parser = argparse.ArgumentParser(description="Rebuild of complex maven projects.")
    parser.add_argument("-r", "--root_path", help="path to the root project", default=".")
    parser.add_argument("-m", "--maven", help="maven parameters to pass to mvn command", default="")
    parser.add_argument("-o", "--root_only", help="skip projects, which are not submodules of root project hierarchy",
                        action='store_true')
    parser.add_argument("-t", "--track_unversioned", help="also consider local changes in unversioned files",
                        action='store_true')
    parser.add_argument("-c", "--vcs", help="version control system", choices=['svn', 'git'])
    args = vars(parser.parse_args())

    if args["root_path"]:
        root_project_path = args["root_path"]
    else:
        root_project_path = "."

    mvn_opts = args["maven"]

    root_only = args["root_only"]
    track_unversioned = args["track_unversioned"]

    root_project_path = file_utils.normalize_path(root_project_path)
    print("Root project path: " + root_project_path)
    print("Additional maven arguments: " + str(mvn_opts))

    root_pom_path = os.path.join(root_project_path, "pom.xml")
    if not os.path.exists(root_pom_path):
        print("ERROR! No root pom.xml find in path", os.path.abspath(root_project_path))
        sys.exit(1)

    if args['vcs']:
        if args['vcs'] == 'git':
            vcs_gateway = git_utils.GitGateway()
        else:
            vcs_gateway = svn_utils.SvnGateway()
    else:
        if svn_utils.is_svn_repo(root_project_path):
            vcs_gateway = svn_utils.SvnGateway()
        elif git_utils.is_git_repo(root_project_path):
            vcs_gateway = git_utils.GitGateway()
        else:
            print("Couldn't resolve VCS type, please specify it explicitly using -c argument")
            sys.exit(-1)

    if '-Dmaven.repo.local=' in mvn_opts:
        mvn_repo_path = get_arg_value(mvn_opts, '-Dmaven.repo.local')
    else:
        mvn_repo_path = mvn_utils.def_repo_path()

    return (root_project_path, mvn_repo_path, mvn_opts, root_only, track_unversioned, vcs_gateway)
Exemple #16
0
def incremental_rebuild(last_revision, current_revision):
    changed_files = vcs_gateway.get_revision_changed_files(ROOT_PROJECT_PATH, last_revision, current_revision)

    changed_project_poms = set([])

    for file_path in changed_files:
        file_path = file_utils.normalize_path(file_path)

        if os.path.isdir(file_path):
            parent_path = file_path
        else:
            parent_path = os.path.dirname(file_path)

        while parent_path and not (file_utils.is_root(parent_path)):
            pom_path = os.path.join(parent_path, "pom.xml")

            if os.path.exists(pom_path):
                changed_project_poms.add(pom_path)
                break

            if parent_path == ROOT_PROJECT_PATH:
                break

            parent_path = os.path.dirname(parent_path)

    changed_projects = common.to_mvn_projects(changed_project_poms, ROOT_PROJECT_PATH, ROOT_ONLY)

    print('Rebuilding revision changes (' + last_revision + ';' + current_revision + ']. Changed projects:')
    print('\n'.join(collections.to_strings(changed_projects)))

    all_poms = mvn_utils.gather_all_poms(ROOT_PROJECT_PATH, ROOT_ONLY)
    unchanged_project_poms = []
    for pom_path in all_poms:
        if pom_path in changed_project_poms:
            continue

        unchanged_project_poms.append(pom_path)

    for pom_path in unchanged_project_poms:
        unchanged_project = mvn_utils.create_project(pom_path)

        if not mvn_utils.is_built(unchanged_project):
            print('project ' + str(unchanged_project) + ' was cleaned, sending to rebuild')
            changed_projects.append(unchanged_project)
            continue

        mvn_utils.fast_install(unchanged_project, MAVEN_REPO_PATH)

    mvn_utils.rebuild(ROOT_PROJECT_PATH, changed_projects, MVN_OPTS, silent=False)
    def _prepare_downloadable_files(self, config, script_output, script_param_values, execution_owner):
        output_files = config.output_files

        if not output_files:
            return []

        output_files = substitute_parameter_values(
            config.parameters,
            config.output_files,
            script_param_values)

        correct_files = []

        for output_file in output_files:
            files = find_matching_files(output_file, script_output)

            if files:
                for file in files:
                    file_path = file_utils.normalize_path(file, config.working_directory)
                    if not os.path.exists(file_path):
                        LOGGER.warning('file ' + file + ' (full path = ' + file_path + ') not found')
                    elif os.path.isdir(file_path):
                        LOGGER.warning('file ' + file + ' is a directory. Not allowed')
                    elif file_path not in correct_files:
                        correct_files.append(file_path)
            else:
                LOGGER.warning("Couldn't find file for " + output_file)

        if not correct_files:
            return []

        download_folder = self.user_file_storage.prepare_new_folder(execution_owner, self.result_folder)
        LOGGER.info('Created download folder for ' + execution_owner + ': ' + download_folder)

        result = []
        for file in correct_files:
            preferred_download_file = os.path.join(download_folder, os.path.basename(file))

            try:
                download_file = create_unique_filename(preferred_download_file)
            except file_utils.FileExistsException:
                LOGGER.exception('Cannot get unique name')
                continue

            copyfile(file, download_file)

            result.append(download_file)

        return result
Exemple #18
0
    def _path_to_json(self, path):
        if path is None:
            return None

        path = file_utils.normalize_path(path, self._config_folder)

        if os.path.exists(path):
            try:
                file_content = file_utils.read_file(path)
                return json.loads(file_content)
            except:
                LOGGER.exception('Failed to load included file ' + path)
                return None
        else:
            LOGGER.warning('Failed to load included file, path does not exist: ' + path)
            return None
Exemple #19
0
    def _path_to_json(self, path):
        if path is None:
            return None

        path = file_utils.normalize_path(path, self._config_folder)

        if os.path.exists(path):
            try:
                file_content = file_utils.read_file(path)
                return json.loads(file_content)
            except:
                LOGGER.exception('Failed to load included file ' + path)
                return None
        else:
            LOGGER.warning('Failed to load included file, path does not exist: ' + path)
            return None
    def test_double_asterisk_match_multiple_files_when_complex(self):
        test_utils.create_file(os.path.join('f1', 'test1.txt'))
        test_utils.create_file(os.path.join('f1', 'test2.txt'))
        test_utils.create_file(os.path.join('d2', 'test3.txt'))
        test_utils.create_file(os.path.join('d2', 'd3', 'test4.txt'))
        test_utils.create_file(os.path.join('d3', 'd4', 'd5', 'test5.png'))
        test_utils.create_file(os.path.join('d3', 'd6', 'd7', 'test6.txt'))

        temp_folder = file_utils.normalize_path(test_utils.temp_folder)
        files = set(file_download_feature.find_matching_files(temp_folder + '/d*/**/*.txt', None))

        self.assertCountEqual(files, {
            os.path.join(temp_folder, 'd2', 'test3.txt'),
            os.path.join(temp_folder, 'd2', 'd3', 'test4.txt'),
            os.path.join(temp_folder, 'd3', 'd6', 'd7', 'test6.txt')
        })
    def test_double_asterisk_match_multiple_files_when_complex(self):
        test_utils.create_file(os.path.join('f1', 'test1.txt'))
        test_utils.create_file(os.path.join('f1', 'test2.txt'))
        test_utils.create_file(os.path.join('d2', 'test3.txt'))
        test_utils.create_file(os.path.join('d2', 'd3', 'test4.txt'))
        test_utils.create_file(os.path.join('d3', 'd4', 'd5', 'test5.png'))
        test_utils.create_file(os.path.join('d3', 'd6', 'd7', 'test6.txt'))

        temp_folder = file_utils.normalize_path(test_utils.temp_folder)
        files = set(file_download_feature.find_matching_files(temp_folder + '/d*/**/*.txt', None))

        self.assertCountEqual(files, {
            os.path.join(temp_folder, 'd2', 'test3.txt'),
            os.path.join(temp_folder, 'd2', 'd3', 'test4.txt'),
            os.path.join(temp_folder, 'd3', 'd6', 'd7', 'test6.txt')
        })
    def test_image_path_split_in_chunks(self):
        path = test_utils.create_file('test123.png')

        config = create_config_model('my_script', output_files=[inline_image('##any_path.png#')])

        execution_id = self.start_execution(config)

        normalized = normalize_path(path)

        self.write_output(execution_id, normalized[:4])
        self.wait_output_chunks(execution_id, chunks_count=1)

        self.write_output(execution_id, normalized[4:] + '\n')
        self.wait_output_chunks(execution_id, chunks_count=2)

        self.assert_images(path)
Exemple #23
0
    def test_find_multiple_images_by_same_pattern(self):
        path1 = test_utils.create_file('test123.png')
        test_utils.create_file('images/test.png')
        path3 = test_utils.create_file('a.b.c.png')
        path4 = test_utils.create_file('some/sub/folder/test456.png')

        config = create_config_model(
            'my_script', output_files=[inline_image('##any_path.png#')])

        execution_id = self.start_execution(config)

        paths = [normalize_path(p) for p in (path1, path3, path4)]
        for index, path in enumerate(paths):
            self.write_output(execution_id, '__ ' + path + ' __\n')
            self.wait_output_chunks(execution_id, chunks_count=index + 1)

        self.assert_images(*paths)
def split_command(script_command, working_directory=None):
    if ' ' in script_command:
        posix = not os_utils.is_win()
        args = shlex.split(script_command, posix=posix)

        if not posix:
            args = [string_utils.unwrap_quotes(arg) for arg in args]
    else:
        args = [script_command]

    script_path = file_utils.normalize_path(args[0], working_directory)
    script_args = args[1:]
    for i, body_arg in enumerate(script_args):
        expanded = os.path.expanduser(body_arg)
        if expanded != body_arg:
            script_args[i] = expanded

    return [script_path] + script_args
    def test_image_path_split_in_chunks_and_no_newlines(self):
        path = test_utils.create_file('test123.png')

        config = create_config_model('my_script', output_files=[inline_image('##any_path.png#')])

        execution_id = self.start_execution(config)

        normalized = normalize_path(path)

        self.write_output(execution_id, normalized[:4])
        self.wait_output_chunks(execution_id, chunks_count=1)

        self.write_output(execution_id, normalized[4:])
        self.wait_output_chunks(execution_id, chunks_count=2)

        self.executor_service.get_active_executor(execution_id, DEFAULT_USER).process_wrapper.stop()
        self.wait_close(execution_id)

        self.assert_images(path)
Exemple #26
0
def split_command(script_command, working_directory=None):
    if ' ' in script_command:
        if _is_file_path(script_command, working_directory):
            args = [script_command]
        else:
            posix = not os_utils.is_win()
            args = shlex.split(script_command, posix=posix)

            if not posix:
                args = [string_utils.unwrap_quotes(arg) for arg in args]
    else:
        args = [script_command]

    script_path = file_utils.normalize_path(args[0], working_directory)
    if (not os.path.isabs(script_path)) or (not os.path.exists(script_path)):
        script_path = args[0]

    script_args = args[1:]
    for i, body_arg in enumerate(script_args):
        expanded = os.path.expanduser(body_arg)
        if expanded != body_arg:
            script_args[i] = expanded

    return [script_path] + script_args
Exemple #27
0
    def post(self):
        script_name = None

        try:
            request_data = self.request.body

            execution_info = external_model.to_execution_info(request_data.decode("UTF-8"))

            script_name = execution_info.script

            config = load_config(script_name)

            if not config:
                respond_error(self, 400, "Script with name '" + str(script_name) + "' not found")
                return

            working_directory = config.get_working_directory()
            if working_directory is not None:
                working_directory = file_utils.normalize_path(working_directory)

            script_logger = logging.getLogger("scriptServer")

            valid_parameters = model_helper.validate_parameters(execution_info.param_values, config)
            if not valid_parameters:
                respond_error(self, 400, 'Received invalid parameters')
                return

            script_base_command = process_utils.split_command(config.get_script_command(), working_directory)

            script_args = build_command_args(execution_info.param_values, config)
            command = script_base_command + script_args

            audit_script_args = build_command_args(
                execution_info.param_values,
                config,
                model_helper.value_to_str)
            audit_command = script_base_command + audit_script_args

            script_logger.info('Calling script: ' + ' '.join(audit_command))
            script_logger.info('User info: ' + str(get_all_audit_names(self, script_logger)))

            run_pty = config.is_requires_terminal()
            if run_pty and not pty_supported:
                script_logger.warning(
                    "Requested PTY mode, but it's not supported for this OS (" + sys.platform + "). Falling back to POpen")
                run_pty = False

            if run_pty:
                self.process_wrapper = execution_pty.PtyProcessWrapper(command,
                                                                       config.get_name(),
                                                                       working_directory,
                                                                       config,
                                                                       execution_info)
            else:
                self.process_wrapper = execution_popen.POpenProcessWrapper(command,
                                                                           config.get_name(),
                                                                           working_directory,
                                                                           config,
                                                                           execution_info)
            self.process_wrapper.start()

            process_id = self.process_wrapper.get_process_id()

            running_scripts[process_id] = self.process_wrapper

            self.write(str(process_id))

            alerts_config = self.application.alerts_config
            if alerts_config:
                self.subscribe_fail_alerter(script_name, script_logger, alerts_config)


        except Exception as e:
            script_logger = logging.getLogger("scriptServer")
            script_logger.exception("Error while calling the script")

            if hasattr(e, "strerror") and e.strerror:
                error_output = e.strerror
            else:
                error_output = "Unknown error occurred, contact the administrator"

            result = " ---  ERRORS  --- \n"
            result += error_output

            if script_name:
                script = str(script_name)
            else:
                script = "Some script"

            audit_name = get_audit_name(self, script_logger)
            send_alerts(self.application.alerts_config, script + ' NOT STARTED',
                        "Couldn't start the script " + script + ' by ' + audit_name + '.\n\n' +
                        result)

            respond_error(self, 500, result)
Exemple #28
0
def _normalize_working_dir(working_directory):
    if working_directory is None:
        return None
    return file_utils.normalize_path(working_directory)
Exemple #29
0
    def post(self):
        script_name = None
        script_logger = logging.getLogger("scriptServer")
        try:
            request_data = self.request.body

            execution_info = external_model.to_execution_info(
                request_data.decode("UTF-8"))

            script_name = execution_info.get_script()

            config = load_config(script_name)
            if not config:
                respond_error(
                    self, 400,
                    "Script with name '" + str(script_name) + "' not found")

            script_logger.info("Config: %s" % (config.redirect))
            if not config.redirect:
                working_directory = config.get_working_directory()
                if working_directory is not None:
                    working_directory = file_utils.normalize_path(
                        working_directory)

                (script_path,
                 body_args) = self.parse_script_body(config, working_directory)

                script_args = build_parameter_string(
                    execution_info.get_param_values(), config)

                command = []
                command.append(script_path)
                command.extend(body_args)
                command.extend(script_args)

                script_logger.info("Calling script: " + " ".join(command))

                run_pty = config.is_requires_terminal()
                if run_pty and not pty_supported:
                    script_logger.warn(
                        "Requested PTY mode, but it's not supported for this OS ("
                        + sys.platform + "). Falling back to POpen")
                    run_pty = False

                if run_pty:
                    self.process_wrapper = execution_pty.PtyProcessWrapper(
                        command, config.get_name(), working_directory)
                else:
                    self.process_wrapper = execution_popen.POpenProcessWrapper(
                        command, config.get_name(), working_directory)

                process_id = self.process_wrapper.get_process_id()

                running_scripts[process_id] = self.process_wrapper

                self.write(str(process_id))

                alerts_config = self.application.alerts_config
                if alerts_config:
                    self.subscribe_fail_alerter(script_name, script_logger,
                                                alerts_config)
            else:
                IndexHandler.redirect(self,
                                      url='/audio.html',
                                      permanent="false")

        except Exception as e:
            script_logger = logging.getLogger("scriptServer")
            script_logger.exception("Error while calling the script")

            if hasattr(e, "strerror") and e.strerror:
                error_output = e.strerror
            else:
                error_output = "Unknown error occurred, contact the administrator"

            result = " ---  ERRORS  --- \n"
            result += error_output

            if script_name:
                script = str(script_name)
            else:
                script = "Some script"

            audit_name = get_audit_name(self, self.application.auth,
                                        script_logger)
            send_alerts(
                self.application.alerts_config, script + ' NOT STARTED',
                "Couldn't start the script " + script + ' by ' + audit_name +
                '.\n\n' + result)

            respond_error(self, 500, result)
Exemple #30
0
        result = root_project_path.replace('\\', "_")
    else:
        result = root_project_path.replace('/', "_")

    result = result.replace(":", "_")
    return result


changed_files = vcs_gateway.get_local_changed_files(ROOT_PROJECT_PATH,
                                                    not TRACK_UNVERSIONED)
important_files = filter(is_important, changed_files)

pom_paths = set([])

for file_path in important_files:
    file_path = file_utils.normalize_path(file_path)

    if os.path.isdir(file_path):
        parent_path = file_path
    else:
        parent_path = os.path.dirname(file_path)

    while parent_path and not (file_utils.is_root(parent_path)):
        pom_path = os.path.join(parent_path, "pom.xml")

        if os.path.exists(pom_path):
            pom_paths.add(pom_path)
            break

        if parent_path == ROOT_PROJECT_PATH:
            break
def _resolve_list_files_dir(file_dir, working_dir):
    if not file_dir or not working_dir:
        return file_dir

    return file_utils.normalize_path(file_dir, working_dir)
Exemple #32
0
    if os.name == 'nt':
        result = root_project_path.replace('\\', "_")
    else:
        result = root_project_path.replace('/', "_")

    result = result.replace(":", "_")
    return result


changed_files = vcs_gateway.get_local_changed_files(ROOT_PROJECT_PATH, not TRACK_UNVERSIONED)
important_files = filter(is_important, changed_files)

pom_paths = set([])

for file_path in important_files:
    file_path = file_utils.normalize_path(file_path)

    if os.path.isdir(file_path):
        parent_path = file_path
    else:
        parent_path = os.path.dirname(file_path)

    while parent_path and not (file_utils.is_root(parent_path)):
        pom_path = os.path.join(parent_path, "pom.xml")

        if os.path.exists(pom_path):
            pom_paths.add(pom_path)
            break

        if parent_path == ROOT_PROJECT_PATH:
            break
def _resolve_list_files_dir(file_dir, working_dir):
    if not file_dir or not working_dir:
        return file_dir

    return file_utils.normalize_path(file_dir, working_dir)
Exemple #34
0
def prepare_downloadable_files(config, script_output, script_param_values,
                               audit_name, secret, temp_folder):
    output_files = config.output_files

    if not output_files:
        return []

    output_files = substitute_parameter_values(config.parameters,
                                               config.output_files,
                                               script_param_values)

    logger = logging.getLogger("scriptServer")

    correct_files = []

    for output_file in output_files:
        files = find_matching_files(output_file, script_output)

        if files:
            for file in files:
                file_path = file_utils.normalize_path(
                    file, config.get_working_directory())
                if not os.path.exists(file_path):
                    logger.warn('file ' + file + ' (full path = ' + file_path +
                                ') not found')
                elif os.path.isdir(file_path):
                    logger.warn('file ' + file +
                                ' is a directory. Not allowed')
                elif file_path not in correct_files:
                    correct_files.append(file_path)
        else:
            logger.warn("Couldn't find file for " + output_file)

    if not correct_files:
        return []

    user_hashed = get_user_download_folder(audit_name, secret)

    download_folder = build_download_path(user_hashed, temp_folder)
    logger.info('Created download folder for ' + audit_name + ': ' +
                download_folder)

    if not os.path.exists(download_folder):
        os.makedirs(download_folder)

    result = []
    for file in correct_files:
        download_file = os.path.join(download_folder, os.path.basename(file))

        if os.path.exists(download_file):
            i = 0

            filename_split = os.path.splitext(os.path.basename(file))
            extension = ''
            name = ''
            if len(filename_split) > 0:
                name = filename_split[0]
                if len(filename_split) > 1:
                    extension = filename_split[1]

            while os.path.exists(download_file) and i < 1000:
                download_file = os.path.join(download_folder,
                                             name + '_' + str(i) + extension)
                i += 1

            if os.path.exists(download_file):
                logger.warn("Couldn't create unique filename for " + file)
                continue

        copyfile(file, download_file)

        result.append(download_file)

    return result
Exemple #35
0
 def _get_working_directory(self):
     working_directory = self.config.working_directory
     if working_directory is not None:
         working_directory = file_utils.normalize_path(working_directory)
     return working_directory
Exemple #36
0
def _normalize_working_dir(working_directory):
    if working_directory is None:
        return None
    return file_utils.normalize_path(working_directory)