Пример #1
0
    def process(self, document):
        """Processing a group of tasks."""
        self.logger.info("Processing group of tasks (parallel=%s)",
                         self.get_parallel_mode())
        self.pipeline.data.env_list[2] = {}

        output, shells = [], []
        result = Adapter({'success': True, 'output': []})
        for task_entry in document:
            key, entry = list(task_entry.items())[0]

            if (not self.parallel or key == 'env') and len(shells) > 0:
                result = Adapter(self.process_shells(shells))
                output += result.output
                shells = []
                if not result.success:
                    break

            if key == 'env':
                self.pipeline.data.env_list[2].update(entry)

            elif key in [
                    'shell', 'docker(container)', 'docker(image)', 'python',
                    'packer', 'ansible(simple)'
            ]:
                self.prepare_shell_data(shells, key, entry)

        if result.success:
            result = Adapter(self.process_shells(shells))
            output += result.output

        self.event.delegate(result.success)
        return {'success': result.success, 'output': output}
Пример #2
0
 def expected_results():
     """Expected test results."""
     return [
         Adapter({'filename': 'com_only.py', 'loc': 0, 'com': 4, 'ratio': 1.0}),
         Adapter({'filename': 'fifty_fifty.cpp', 'loc': 7, 'com': 7, 'ratio': 1.0}),
         Adapter({'filename': 'fifty_fifty.py', 'loc': 2, 'com': 2, 'ratio': 1.0}),
         Adapter({'filename': 'more_com_than_loc.py', 'loc': 2, 'com': 3, 'ratio': 1.0}),
         Adapter({'filename': 'more_loc_than_com.py', 'loc': 4, 'com': 1, 'ratio': 0.25})
     ]
Пример #3
0
    def verify_results(self, loc):
        """Testing for expected results."""
        results = sorted(loc.results, key=lambda entry: Adapter(entry).file)
        assert_that(len(results), equal_to(len(self.expected_results())))

        for idx, expected in enumerate(self.expected_results()):
            assert_that(Adapter(results[idx]).file, ends_with(expected.filename))
            assert_that(Adapter(results[idx]).loc, equal_to(expected.loc))
            assert_that(Adapter(results[idx]).com, equal_to(expected.com))
            assert_that(float(Adapter(results[idx]).ratio), equal_to(expected.ratio))
Пример #4
0
    def process_shells_parallel(self, shells):
        """Processing a list of shells parallel."""
        output = []
        success = True
        with closing(multiprocessing.Pool(
                multiprocessing.cpu_count())) as pool:
            for result in [
                    Adapter(entry)
                    for entry in pool.map(worker, [shell for shell in shells])
            ]:
                output += result.output
                the_shell = [
                    shell for shell in shells if shell['id'] == result.id
                ][0]
                self.__handle_variable(the_shell['entry'], result.output)
                if not result.success:
                    success = False
        if success:
            self.logger.info("Parallel Processing Bash code: finished")
            return {'success': True, 'output': output}

        for line in self.run_cleanup(shells[0]['env'], 99):
            output.append(line)
        self.logger.error("Pipeline has failed: immediately leaving!")
        self.event.failed()
        return {'success': False, 'output': output}
Пример #5
0
    def __init__(self, **kwargs):
        """
        Initializing and validating fields.

        Args:
            kwargs (dict): application command line options.
        """
        try:
            arguments = Adapter(
                Schema(ApplicationOptions.SCHEMA).validate(kwargs))
            self.definition = arguments.definition
            self.matrix_tags = [
                entry for entry in arguments.matrix_tags.split(',')
                if len(entry) > 0
            ]
            self.tags = [
                entry for entry in arguments.tags.split(',') if len(entry) > 0
            ]
            self.validate_only = arguments.validate_only
            self.dry_run = arguments.dry_run
            self.event_logging = arguments.event_logging
            self.logging_config = arguments.logging_config
            self.debug = arguments.debug
            self.strict = arguments.strict
            self.report = arguments.report
            self.temporary_scripts_path = arguments.temporary_scripts_path
        except SchemaError as exception:
            logging.getLogger(__name__).error(exception)
            raise RuntimeError(str(exception))
Пример #6
0
 def test_valid_loader(self):
     """Testing Loader used the right way."""
     yaml_file = os.path.join(os.path.dirname(__file__),
                              'data/loader_main.yaml')
     document = Adapter(Loader.load(yaml_file))
     render(document.some, model=document.model)
     assert_that(render(document.some, model=document.model),
                 equal_to('hello world!'))
Пример #7
0
 def __init__(self, **kwargs):
     """Initializing and validating fields."""
     try:
         arguments = Adapter(ShellConfig.schema().validate(kwargs))
         self.script = arguments.script
         self.title = arguments.title
         self.model = arguments.model.data
         self.env = arguments.env.data
         self.item = arguments.item
         self.dry_run = arguments.dry_run
         self.debug = arguments.debug
         self.strict = arguments.strict
         self.variables = arguments.variables.data
         self.temporary_scripts_path = arguments.temporary_scripts_path
         self.internal = arguments.internal
     except SchemaError as exception:
         logging.getLogger(__name__).error(exception)
         raise RuntimeError(str(exception))
Пример #8
0
    def __init__(self, **kwargs):
        """
        Initializing and validating fields.

        Args:
             kwargs (dict): application command line options.

        Raises:
            RuntimeError: when validation of parameters has failed.
        """
        try:
            arguments = Adapter(
                CollectorStage.schema_complete().validate(kwargs))
            self.stage = arguments.stage
            self.status = arguments.status
            self.events = arguments.events
        except SchemaError as exception:
            Logger.get_logger(__name__).error(exception)
            raise RuntimeError(str(exception))
Пример #9
0
 def process_shells_ordered(self, shells):
     """Processing a list of shells one after the other."""
     output = []
     for shell in shells:
         entry = shell['entry']
         config = ShellConfig(
             script=entry['script'],
             title=entry['title'] if 'title' in entry else '',
             model=shell['model'],
             env=shell['env'],
             item=shell['item'],
             dry_run=shell['dry_run'],
             debug=shell['debug'],
             strict=shell['strict'],
             variables=shell['variables'],
             temporary_scripts_path=shell['temporary_scripts_path'])
         result = Adapter(
             self.process_shell(get_creator_by_name(shell['creator']),
                                entry, config))
         output += result.output
         self.__handle_variable(entry, result.output)
         if not result.success:
             return {'success': False, 'output': output}
     return {'success': True, 'output': output}
Пример #10
0
 def load_configuration(self):
     """Loading configuration."""
     filename = os.path.join(os.path.dirname(__file__),
                             'templates/spline-loc.yml.j2')
     with open(filename) as handle:
         return Adapter(safe_load(handle)).configuration
Пример #11
0
    def run(self):
        """Processing the pipeline."""
        self.logger.info("Running with Python %s",
                         sys.version.replace("\n", ""))
        self.logger.info("Running on platform %s", platform.platform())
        self.logger.info("Current cpu count is %d",
                         multiprocessing.cpu_count())

        configuration = self.load_configuration()
        paths = [os.path.abspath(path) for path in Adapter(self.options).path]
        supported_extension = [
            ext.strip() for entry in configuration
            for ext in Adapter(entry).extension.split()
        ]

        for path, path_and_filename, extension in Application.walk_files_for(
                paths, supported_extension):
            entry = Select(*configuration) \
                .where(lambda entry: extension in Adapter(entry).extension.split()) \
                .transform(lambda entry: Adapter(entry)) \
                .build()[0]
            # parsing file with regex to get loc and com values
            # 100 lines of code (total) with 50 lines of comments means: loc=50, com=50
            # the ratio would be then: 1.0
            loc, com = self.analyse(path_and_filename, entry.regex)
            ratio = float(com) / float(loc) if loc > 0 and com < loc else 1.0

            self.results.append({
                'type':
                entry.type,
                'file':
                path_and_filename.replace(path + '/', ''),
                'loc':
                loc,
                'com':
                com,
                'ratio':
                "%.2f" % ratio
            })
        # for the table we are mainly interested in ratio below defined threshold
        # (except you want to see all of your code: --show-all)
        ppresults = Select(*self.results).where(
            lambda entry: float(Adapter(entry).ratio) < Adapter(self.options).
            threshold or Adapter(self.options).show_all).build()

        # print out results in table format
        pprint(ppresults, keys=['ratio', 'loc', 'com', 'file', 'type'])

        if Adapter(self.options).average:
            all_ratio = Select(*self.results).transform(
                lambda entry: float(Adapter(entry).ratio)).build()
            avg_ratio = float(sum(all_ratio)) / float(
                len(all_ratio)) if len(all_ratio) > 0 else 1.0
            self.logger.info('average ratio is %.2f for %d files', avg_ratio,
                             len(all_ratio))
            return avg_ratio >= Adapter(self.options).threshold

        # providing results (mainly for unittesting)
        return len(
            Select(*self.results).where(
                lambda entry: float(Adapter(entry).ratio) < Adapter(
                    self.options).threshold).build()) == 0
Пример #12
0
 def test_simple(self):
     """Testing simple usage."""
     adapted = Adapter({'a': 10, 'b': {'c': 20}})
     assert_that(adapted.a, equal_to(10))
     assert_that(adapted.b.c, equal_to(20))
Пример #13
0
 def test_unknown_field_or_callable(self):
     """Testing unknown field or callable."""
     data = {'a': 10, 'b': {'c': 20}}
     adapted = Adapter(data)
     assert_that(adapted.foo, equal_to(None))
Пример #14
0
 def test_callable(self):
     """Testing callable attribute"""
     data = {'a': 10, 'b': {'c': 20}}
     adapted = Adapter(data)
     assert_that(list(adapted.items()), equal_to(list(data.items())))
Пример #15
0
 def test_str(self):
     """Testing __str__ method."""
     data = {'a': 10, 'b': {'c': 20}}
     adapted = Adapter(data)
     assert_that(str(adapted), equal_to(str(data)))
Пример #16
0
 def test_length(self):
     """Testing __len__ method."""
     adapted = Adapter({'a': 10, 'b': {'c': 20}})
     assert_that(len(adapted), equal_to(2))