def save_exposure_atts(self, filename, use_parallel=True): """ Save the exposure attributes, including latitude and longitude. The file type saved is based on the filename extension. Options '.npz': Save the arrays into a single file in uncompressed .npz format. :param use_parallel: Set to True for parallel behaviour Which is only node 0 writing to file. :param filename: The file to be written. :return write_dict: The whole dictionary, returned for testing. """ write_dict = self.exposure_att.copy() write_dict[EX_LAT] = self.exposure_lat write_dict[EX_LONG] = self.exposure_long if use_parallel: assert misc.INTID in write_dict write_dict = parallel.gather_dict(write_dict, write_dict[misc.INTID]) if parallel.STATE.rank == 0 or not use_parallel: if filename[-4:] == '.csv': save_csv(write_dict, filename) else: numpy.savez(filename, **write_dict) # The write_dict is returned for testing # When running in paralled this is a way of getting all # of the context info return write_dict
def save_exposure_atts(self, filename, use_parallel=True): """ Save the exposure attributes, including latitude and longitude. The file type saved is based on the filename extension. Options '.npz': Save the arrays into a single file in uncompressed .npz format. :param use_parallel: Set to True for parallel behaviour Which is only node 0 writing to file. :param filename: The file to be written. :return write_dict: The whole dictionary, returned for testing. """ [filename, bucket_name, bucket_key] = \ misc.create_temp_file_path_for_s3(filename) s1 = self.prov.entity( ":HazImp output file", { "prov:label": "Full HazImp output file", "prov:type": "void:Dataset", "prov:atLocation": os.path.basename(filename) }) a1 = self.prov.activity(":SaveImpactData", datetime.now().strftime(DATEFMT), None) self.prov.wasGeneratedBy(s1, a1) self.prov.wasInformedBy(a1, self.provlabel) write_dict = self.exposure_att.copy() write_dict[EX_LAT] = self.exposure_lat write_dict[EX_LONG] = self.exposure_long if use_parallel: assert misc.INTID in write_dict write_dict = parallel.gather_dict(write_dict, write_dict[misc.INTID]) if parallel.STATE.rank == 0 or not use_parallel: if filename[-4:] == '.csv': save_csv(write_dict, filename) else: numpy.savez(filename, **write_dict) misc.upload_to_s3_if_applicable(filename, bucket_name, bucket_key) # The write_dict is returned for testing # When running in paralled this is a way of getting all # of the context info return write_dict
def test_gather_dict2D(self): # This test can be run with # mpirun -n 2 python test_parallel.py # if there is a path problem, try adding -x PYTHONPATH try: import pypar # pylint: disable=W0612, W0404 except ImportError: # can't do this test return # In a real run the subsets will not be the same subset = {"foo": numpy.array([0, 2]), "woo": numpy.array([[1, 3, 4], [2, 4, 5]])} if STATE.size == 1: all_indexes = [[0, 1]] elif STATE.size == 2: all_indexes = [[0, 2], [1, 3]] elif STATE.size == 3: all_indexes = [[0, 3], [1, 4], [2, 5]] if STATE.size < 4: whole = gather_dict(subset, all_indexes[STATE.rank]) if STATE.size == 1: self.assertDictEqual(whole, subset) elif STATE.size == 2 and STATE.rank == 0: act = {"foo": numpy.array([0, 0, 2, 2]), "woo": numpy.array([[1, 3, 4], [1, 3, 4], [2, 4, 5], [2, 4, 5]])} for key in list(act.keys()): self.assertTrue(numpy.allclose(act[key], whole[key])) elif STATE.size == 3 and STATE.rank == 0: act = {"foo": numpy.array([0, 0, 0, 2, 2, 2]), "woo": numpy.array([[1, 3, 4], [1, 3, 4], [1, 3, 4], [2, 4, 5], [2, 4, 5], [2, 4, 5]])} for key in list(act.keys()): self.assertTrue(numpy.allclose(act[key], whole[key])) else: pass