def _collect_and_assign_outputs(self, jobs, output_map, slice_paths_map):
        """
        Collect and combine the outputs of the individual create_dbs node
        recipes. Combine into output mapfiles and save these at the supplied
        path locations       
        """
        # Create a container for the output parmdbs: same host and 
        output_map.iterator = DataMap.TupleIterator
        parmdbs_list = []
        # loop over the raw data including the skip file (use the data member)
        for output_entry in output_map.data:
            parms_tuple = tuple([output_entry.host, [],
                                output_entry.skip])
            parmdbs_list.append(parms_tuple)

        parmdbs_map = MultiDataMap(parmdbs_list)

        output_map.iterator = parmdbs_map.iterator = DataMap.SkipIterator # The maps are synced
        succesfull_run = False
        for (output_item, parmdbs_item, job) in zip(
                                                output_map, parmdbs_map, jobs):
            node_succeeded = job.results.has_key("parmdbs") and \
                    job.results.has_key("sourcedb")

            host = output_item.host

            # The current job has to be skipped (due to skip field)
            # Or if the node failed:
            if not node_succeeded:
                self.logger.warn("Warning failed ImagerCreateDBs run "
                    "detected: No sourcedb file created, {0} continue".format(
                                                            host))
                output_item.file = "failed"
                output_item.skip = True
                parmdbs_item.file = ["failed"]
                parmdbs_item.skip = True

            # Else it succeeded and we can write te results
            else:
                succesfull_run = True
                output_item.file = job.results["sourcedb"]
                parmdbs_item.file = job.results["parmdbs"]

                # we also need to manually set the skip for this new 
                # file list
                parmdbs_item.file_skip = [False] * len(job.results["parmdbs"])

        # Fail if none of the nodes returned all data
        if not succesfull_run:
            self.logger.error("The creation of dbs on the nodes failed:")
            self.logger.error("Not a single node produces all needed data")
            self.logger.error(
                "products. sourcedb_files: {0}".format(output_map))
            self.logger.error("parameter dbs: {0}".format(parmdbs_map))
            return 1

        # write the mapfiles     
        output_map.save(self.inputs["sourcedb_map_path"])
        parmdbs_map.save(self.inputs["parmdbs_map_path"])
        self.logger.debug("Wrote sourcedb dataproducts: {0} \n {1}".format(
            self.inputs["sourcedb_map_path"], self.inputs["parmdbs_map_path"]))

        # Set the outputs
        self.outputs['sourcedb_map_path'] = self.inputs["sourcedb_map_path"]
        self.outputs['parmdbs_map_path'] = self.inputs["parmdbs_map_path"]

        return 0
Example #2
0
    def _collect_and_assign_outputs(self, jobs, output_map, slice_paths_map):
        """
        Collect and combine the outputs of the individual create_dbs node
        recipes. Combine into output mapfiles and save these at the supplied
        path locations
        """
        # Create a container for the output parmdbs: same host and
        output_map.iterator = DataMap.TupleIterator
        parmdbs_list = []
        # loop over the raw data including the skip file (use the data member)
        for output_entry in output_map.data:
            parms_tuple = tuple([output_entry.host, [],
                                output_entry.skip])
            parmdbs_list.append(parms_tuple)

        parmdbs_map = MultiDataMap(parmdbs_list)

        output_map.iterator = parmdbs_map.iterator = DataMap.SkipIterator    # The maps are synced
        succesfull_run = False
        for (output_item, parmdbs_item, job) in zip(
                                                output_map, parmdbs_map, jobs):
            node_succeeded = "parmdbs" in job.results and \
                    "sourcedb" in job.results

            host = output_item.host

            # The current job has to be skipped (due to skip field)
            # Or if the node failed:
            if not node_succeeded:
                self.logger.warn("Warning failed selfcalCreateDBs run "
                    "detected: No sourcedb file created, {0} continue".format(
                                                            host))
                output_item.file = "failed"
                output_item.skip = True
                parmdbs_item.file = []
                parmdbs_item.skip = True

            # Else it succeeded and we can write te results
            else:
                succesfull_run = True
                output_item.file = job.results["sourcedb"]
                parmdbs_item.file = job.results["parmdbs"]

                # we also need to manually set the skip for this new
                # file list
                parmdbs_item.file_skip = [False] * len(job.results["parmdbs"])

        # Fail if none of the nodes returned all data
        if not succesfull_run:
            self.logger.error("The creation of dbs on the nodes failed:")
            self.logger.error("Not a single node produces all needed data")
            self.logger.error(
                "products. sourcedb_files: {0}".format(output_map))
            self.logger.error("parameter dbs: {0}".format(parmdbs_map))
            return 1

        # write the mapfiles
        output_map.save(self.inputs["sourcedb_map_path"])
        parmdbs_map.save(self.inputs["parmdbs_map_path"])
        self.logger.debug("Wrote sourcedb dataproducts: {0} \n {1}".format(
            self.inputs["sourcedb_map_path"], self.inputs["parmdbs_map_path"]))

        # Set the outputs
        self.outputs['sourcedb_map_path'] = self.inputs["sourcedb_map_path"]
        self.outputs['parmdbs_map_path'] = self.inputs["parmdbs_map_path"]

        return 0
Example #3
0
 def test_new_style_load_store(self):
     tmp_file = self.new_style_map_file + '.tmp'
     data_map = MultiDataMap(self.new_style_map)
     data_map.save(tmp_file)
     reloaded_data_map = MultiDataMap.load(tmp_file)
     self.assertEqual(data_map, reloaded_data_map)
Example #4
0
 def test_new_style_load_store(self):
     tmp_file = self.new_style_map_file + '.tmp'
     data_map = MultiDataMap(self.new_style_map)
     data_map.save(tmp_file)
     reloaded_data_map = MultiDataMap.load(tmp_file)
     self.assertEqual(data_map, reloaded_data_map)