コード例 #1
0
    def test_find_files(self, d):
        os.chdir(d.path)

        FileUtil.write_to_file('.', 'file1.txt', '')
        FileUtil.write_to_file('.', 'file2.txt', '')
        FileUtil.write_to_file('.', 'file3.log', '')

        files_txt = FileUtil.find_files('file*.txt')
        files_log = FileUtil.find_files('file*.log')

        self.assertEqual(len(files_txt), 2)
        self.assertEqual(len(files_log), 1)
コード例 #2
0
ファイル: task.py プロジェクト: nicoddemus/ezored
    def run(self, process_data, template_data, working_dir):
        Logger.d('Running task: {0}...'.format(self.get_name()))

        if process_data:
            if self.type == self.TYPE_COPY_FILE:
                from_path = self.params['from'] if self.params['from'] else None
                to_path = self.params['to'] if self.params['to'] else None

                FileUtil.copy_file(from_path=from_path, to_path=to_path)

            elif self.type == self.TYPE_COPY_FILES:
                to_path = self.params['to'] if self.params['to'] else None
                file_pattern = self.params[
                    'from'] if 'from' in self.params else None
                file_pattern = process_data.parse_text(file_pattern)
                found_files = FileUtil.find_files(file_pattern)

                for f in found_files:
                    if f:
                        FileUtil.copy_file(from_path=f,
                                           to_path=os.path.join(
                                               to_path, os.path.basename(f)))

            elif self.type == self.TYPE_PARSE_FILE:
                file_pattern = self.params[
                    'file'] if 'file' in self.params else None
                file_pattern = process_data.parse_text(file_pattern)
                found_files = FileUtil.find_files(file_pattern)

                for f in found_files:
                    if f:
                        template_file = os.path.abspath(f)
                        template_loader = jinja2.FileSystemLoader(
                            searchpath=os.path.dirname(template_file))
                        template_env = jinja2.Environment(
                            loader=template_loader)
                        template = template_env.get_template(
                            os.path.basename(template_file))
                        templ_result = template.render(template_data)

                        FileUtil.write_to_file(os.path.dirname(template_file),
                                               os.path.basename(template_file),
                                               str(templ_result))

            elif self.type == self.TYPE_RUN:
                run_args = self.params[
                    'args'] if 'args' in self.params else None

                if run_args:
                    exitcode, stderr, stdout = FileUtil.run(
                        run_args, working_dir,
                        process_data.get_merged_data_for_runner())

                    if exitcode == 0:
                        Logger.i('Run finished for task: {0}'.format(
                            self.get_name()))
                    else:
                        if stdout:
                            Logger.i('Run output for task: {0}'.format(
                                self.get_name()))
                            Logger.clean(stdout)

                        if stderr:
                            Logger.i('Error output while run task: {0}'.format(
                                self.get_name()))
                            Logger.clean(stderr)

                        Logger.f('Failed to run task: {0}'.format(
                            self.get_name()))

            else:
                Logger.f('Invalid task type')
        else:
            Logger.d('Process data is invalid to run task')
コード例 #3
0
    def get_target_data_by_target_name_and_parse(self, target_name,
                                                 process_data):
        Logger.d('Getting target data from dependency: {0}...'.format(
            self.get_name()))

        target_file_data = self.repository.load_target_data_file()

        if target_file_data:
            if 'targets' in target_file_data:
                targets_data = target_file_data['targets']

                for target_data_item in targets_data:
                    current_target_name = target_data_item['name']

                    if self.match_name(pattern=current_target_name,
                                       name=target_name):
                        # get target data
                        target_data = TargetData()

                        if 'data' in target_data_item:
                            target_data_dict = target_data_item['data']

                            if 'header_search_paths' in target_data_dict:
                                if target_data_dict['header_search_paths']:
                                    target_data.header_search_paths.extend(
                                        FileUtil.normalize_path_from_list(
                                            target_data_dict[
                                                'header_search_paths']))

                            if 'library_search_paths' in target_data_dict:
                                if target_data_dict['library_search_paths']:
                                    target_data.library_search_paths.extend(
                                        FileUtil.normalize_path_from_list(
                                            target_data_dict[
                                                'library_search_paths']))

                            if 'c_flags' in target_data_dict:
                                if target_data_dict['c_flags']:
                                    target_data.c_flags.extend(
                                        target_data_dict['c_flags'])

                            if 'cxx_flags' in target_data_dict:
                                if target_data_dict['cxx_flags']:
                                    target_data.cxx_flags.extend(
                                        target_data_dict['cxx_flags'])

                            if 'library_links' in target_data_dict:
                                if target_data_dict['library_links']:
                                    target_data.library_links.extend(
                                        target_data_dict['library_links'])

                            if 'framework_links' in target_data_dict:
                                if target_data_dict['framework_links']:
                                    target_data.framework_links.extend(
                                        target_data_dict['framework_links'])

                            if 'tasks' in target_data_dict:
                                if target_data_dict['tasks']:
                                    for target_data_task in target_data_dict[
                                            'tasks']:
                                        task = Task.from_dict(target_data_task)
                                        target_data.tasks.append(task)

                            # create source group if have files for it
                            target_data_header_files = []
                            target_data_source_files = []

                            if 'header_files' in target_data_dict:
                                if target_data_dict['header_files']:
                                    for file_data in target_data_dict[
                                            'header_files']:
                                        # find all files
                                        source_file_to_find = SourceFile.from_dict(
                                            file_data)

                                        if source_file_to_find:
                                            # process file pattern before
                                            file_pattern = source_file_to_find.file
                                            file_pattern = process_data.parse_text(
                                                file_pattern)

                                            found_files = FileUtil.find_files(
                                                file_pattern)
                                            found_files = FileUtil.normalize_path_from_list(
                                                found_files)

                                            # create new source file for each found file
                                            for f in found_files:
                                                target_data_header_files.append(
                                                    SourceFile(
                                                        source_file=f,
                                                        compile_flags=
                                                        source_file_to_find.
                                                        compile_flags))

                            if 'source_files' in target_data_dict:
                                if target_data_dict['source_files']:
                                    for file_data in target_data_dict[
                                            'source_files']:
                                        # find all files
                                        source_file_to_find = SourceFile.from_dict(
                                            file_data)

                                        if source_file_to_find:
                                            # process file pattern before
                                            file_pattern = source_file_to_find.file
                                            file_pattern = process_data.parse_text(
                                                file_pattern)

                                            found_files = FileUtil.find_files(
                                                file_pattern)
                                            found_files = FileUtil.normalize_path_from_list(
                                                found_files)

                                            # create new source file for each found file
                                            for f in found_files:
                                                target_data_source_files.append(
                                                    SourceFile(
                                                        source_file=FileUtil.
                                                        normalize_path(f),
                                                        compile_flags=
                                                        source_file_to_find.
                                                        compile_flags))

                            if len(target_data_header_files) > 0 or len(
                                    target_data_source_files) > 0:
                                target_data_source_group = SourceGroup()
                                target_data_source_group.name = self.get_name()
                                target_data_source_group.header_files = target_data_header_files
                                target_data_source_group.source_files = target_data_source_files

                                target_data.source_groups.append(
                                    target_data_source_group)

                            # parse all things
                            target_data.parse(process_data)
                            return target_data