Ejemplo n.º 1
0
    def test_convert_to_d_and_load(self):
        import pbsmrtpipe.loader as L
        import pprint

        rtasks, rfile_types, chunk_operators, pipelines = L.load_all()

        for pipeline in pipelines.values():
            pipeline_d = IO.pipeline_template_to_dict(pipeline, rtasks)
            #print "Raw Pipeline converted to dict"
            #print pprint.pformat(pipeline_d)

            pipeline_loaded = IO.load_pipeline_template_from(pipeline_d)
            #print "Pipeline task options loaded from dict"
            #print pprint.pformat(pipeline_loaded.task_options)

            self.assertEqual(pipeline.idx, pipeline_loaded.idx)
            self.assertEqual(pipeline.display_name,
                             pipeline_loaded.display_name)
            self.assertEqual(len(pipeline.all_bindings),
                             len(pipeline_loaded.all_bindings))
            self.assertEqual(len(pipeline.entry_bindings),
                             len(pipeline_loaded.entry_bindings))

            # note that the internally registered pipeline does not necessarily
            # have any task_options at this point, so we can't simply test for
            # equality.  however after another cycle they should be identical
            if len(pipeline.task_options) > 0:
                self.assertGreater(len(pipeline_loaded.task_options), 0)
                pipeline_d2 = IO.pipeline_template_to_dict(
                    pipeline_loaded, rtasks)
                #print pprint.pformat(pipeline_d)
                pipeline_loaded2 = IO.load_pipeline_template_from(pipeline_d2)

                self.assertEqual(len(pipeline_loaded.task_options),
                                 len(pipeline_loaded2.task_options))

                n = 1
                for k, v in pipeline.task_options.iteritems():
                    if k in pipeline_loaded2.task_options:
                        v2 = pipeline_loaded2.task_options[k]
                        msg = "task option #{n} {k} expected '{v}' got '{x}'".format(
                            k=k, v=v, x=v2, n=n)
                        self.assertEqual(v, v2, msg)
                        print "Valid " + msg
                        n += 1
Ejemplo n.º 2
0
    def test_all_sane(self):
        """Test that all pipelines are well defined"""
        errors = []
        rtasks, rfiles_types, chunk_operators, pipelines = L.load_all()

        for pipeline_id, pipeline in pipelines.items():
            emsg = "Pipeline {p} is not valid.".format(p=pipeline_id)
            log.debug("Checking Sanity of registered Pipeline {i}".format(
                i=pipeline_id))
            log.info(pipeline_id)
            log.debug(pipeline)
            try:
                # Validate with Avro
                d = pipeline_template_to_dict(pipeline, rtasks)
                _ = validate_pipeline_template(d)
                name = pipeline_id + "_pipeline_template.avro"
                output_file = get_temp_file(suffix=name)
                log.info(
                    "{p} converted to avro successfully".format(p=pipeline_id))

                bg = BU.binding_strs_to_binding_graph(rtasks,
                                                      pipeline.all_bindings)
                BU.validate_binding_graph_integrity(bg)
                BU.validate_compatible_binding_file_types(bg)
                validate_entry_points(d)
                # pprint.pprint(d)

                # for debugging purposes
                output_json = output_file.replace(".avro", '.json')
                log.info("writing pipeline to {p}".format(p=output_json))
                with open(output_json, 'w') as j:
                    j.write(json.dumps(d, sort_keys=True, indent=4))

                log.info(
                    "writing pipeline template to {o}".format(o=output_file))

                # Test writing to avro if the pipeline is actually valid
                write_pipeline_template_to_avro(pipeline, rtasks, output_file)
                log.info("Pipeline {p} is valid.".format(p=pipeline_id))

                log.info("Loading avro {i} from {p}".format(i=pipeline_id,
                                                            p=output_file))
                pipeline_d = load_pipeline_template_from_avro(output_file)
                self.assertIsInstance(pipeline_d, dict)

            except Exception as e:
                m = emsg + " Error: " + e.message
                log.error(m)
                errors.append(m)
                log.error(emsg)
                log.error(e)

        msg = "\n".join(errors) if errors else ""
        self.assertEqual([], errors, msg)
Ejemplo n.º 3
0
    def test_convert_to_d_and_load(self):
        import pbsmrtpipe.loader as L

        rtasks, rfile_types, chunk_operators, pipelines = L.load_all()

        for pipeline in pipelines.values():
            pipeline_d = IO.pipeline_template_to_dict(pipeline, rtasks)
            pipeline_loaded = IO.load_pipeline_template_from(pipeline_d)
            self.assertEqual(pipeline.idx, pipeline_loaded.idx)
            self.assertEqual(pipeline.display_name, pipeline_loaded.display_name)
            self.assertEqual(len(pipeline.all_bindings), len(pipeline_loaded.all_bindings))
            self.assertEqual(len(pipeline.entry_bindings), len(pipeline_loaded.entry_bindings))
Ejemplo n.º 4
0
    def test_convert_to_d_and_load(self):
        import pbsmrtpipe.loader as L

        rtasks, rfile_types, chunk_operators, pipelines = L.load_all()

        for pipeline in pipelines.values():
            pipeline_d = IO.pipeline_template_to_dict(pipeline, rtasks)
            pipeline_loaded = IO.load_pipeline_template_from(pipeline_d)
            self.assertEqual(pipeline.idx, pipeline_loaded.idx)
            self.assertEqual(pipeline.display_name, pipeline_loaded.display_name)
            self.assertEqual(len(pipeline.all_bindings), len(pipeline_loaded.all_bindings))
            self.assertEqual(len(pipeline.entry_bindings), len(pipeline_loaded.entry_bindings))
            # note that the internally registered pipeline does not necessarily
            # have any task_options at this point, so we can't simply test for
            # equality.  however after another cycle they should be identical
            if len(pipeline.task_options) > 0:
                self.assertGreater(len(pipeline_loaded.task_options), 0)
                pipeline_d2 = IO.pipeline_template_to_dict(pipeline_loaded, rtasks)
                pipeline_loaded2 = IO.load_pipeline_template_from(pipeline_d2)
                self.assertEqual(len(pipeline_loaded.task_options),
                                 len(pipeline_loaded2.task_options))
                for k,v in pipeline.task_options.iteritems():
                    if k in pipeline_loaded2.task_options:
                        self.assertEqual(v, pipeline_loaded2.task_options[k])