def __init__(self, basis_dir, grid_dir, gap_data_list, flag=1, file_name="fort.14", table_folder=None, executable_dir=None): """ Initalizes a gridInfo object and sets up a directory with links to the necessary input files to run :program:`Griddata_v1.1.32.F90` from within that directory. To run in parallel use :program:`mpirun` or :program:`ibrun`. If :program:`Griddata_v1.32.F90` has been compiled using the ``DGHIGHMEM`` option make sure to correctly set ``OMP_NUM_THREADS`` to be the number of processors per node. Then the number of mpi tasks should be set to the number of nodes. :param string basis_dir: the path to create the landuse_## directories in :param string grid_dir: the path where the ``fort.14`` formatted file is located, there also needs to be a ``fort.13`` formatted file here to use as a template :param gap_data_list: a list() of :class:`~polyadcirc.pyGriddata.table_management.gapInfo` objects :type gap_data_list: list :param int flag: flag to choose which averaging scheme to use ..see:: :meth:`~polyadcirc.pyADCIRC.flag_fort14.flag_fort14` :param string file_name: the name of the ``fort.14`` formatted file in ``grid_dir`` :param string table_folder: The folder containing the ``*.table`` file. This is ONLY necessary when running simutaneous copies of the :program:`Griddata`. :param string executable_dir: path to the directory containing the compiled ``Griddata_*.out`` executable """ self.file_name = file_name #: Name of grid file, ``*.14`` self.grid_dir = grid_dir #: path for the dir of the grid file self.gap_data_files = gap_data_list #: a list of gapInfo objects self.basis_dir = basis_dir #: path for the dir to create landuse_ in self.table_folder = table_folder #: path for the dir with ``*.table`` """ list() of :class:`~polyadcirc.table_management.gapInfo` objects """ self.__landclasses = [] self.__unique_tables = {} for gap in self.gap_data_files: #: dict of unique ``*.table`` files used self.__unique_tables[gap.table.file_name] = gap.table for k, v in self.__unique_tables.iteritems(): for x in v.get_landclasses(): #: list of landclasses used by this grid self.__landclasses.append((x, k)) self.flag = flag #: averaging scheme flag if rank == 0: # Look for ``fort.14`` formatted file in grid_dir and place a link # to it in basis_dir fm.symlink(os.path.join(grid_dir, file_name), os.path.join(basis_dir, file_name)) flagged_file_name = f14.flag_go(self, flag) self.file_name = os.path.basename(flagged_file_name) # check to see if Griddata is here if executable_dir is None: executable_dir = sys.path else: executable_dir = [executable_dir] if len(glob.glob(os.path.join(self.basis_dir, 'Griddata_*.out'))) == 0: # check to see if Griddata is compiled and on the python path compiled_prog = None for p in executable_dir: locations1 = glob.glob(os.path.join(p, "*Griddata_*.out")) locations2 = glob.glob(os.path.join(p, "polyadcirc", "pyGriddata", "Griddata_*.out")) if locations1: compiled_prog = locations1[0] elif locations2: compiled_prog = locations2[0] else: compiled_prog = None break # put link to Griddata here if compiled_prog: fm.symlink(compiled_prog, os.path.join(basis_dir, os.path.basename(compiled_prog))) else: print """Compile a copy of Griddata_v1.32.F90 and specify it's location using executable_dir""" # Create links to gap files (*.asc) using gap_list of gapInfo # objects for gap in self.gap_data_files: local_file_name = os.path.basename(gap.file_name) fm.symlink(gap.file_name, os.path.join(basis_dir, local_file_name)) if os.path.exists(gap.file_name+'.binary'): fm.symlink(gap.file_name+'.binary', os.path.join(basis_dir, local_file_name+'.binary')) gap.file_name = local_file_name self.file_name = comm.bcast(self.file_name, root=0) super(gridInfo, self).__init__()
def prep_all(self, removeBinaries=False, class_nums=None, condense=True, TOL=None): """ Assumes that all the necessary input files are in ``self.basis_dir``. This function generates a ``landuse_##`` folder in ``self.basis_dir`` for every land classification number containing a ``fort.13`` file specific to that land classification number. .. todo:: Update so that landuse folders can be prepped n at a time and so that this could be run on a HPC system Currently, the parallel option preps the first folder and then all the remaining folders at once. :param binary parallel: Flag whether or not to simultaneously prep landuse folders. :param binary removeBinarues: Flag whether or not to remove ``*.asc.binary`` files when completed. :param list class_nums: List of integers indicating which classes to prep. This assumes all the ``*.asc.binary`` files are already in existence. :param bool condense: Flag whether or not to condense ``fort.13`` to only non-zero values within a tolerance. :param double TOL: Tolerance below which to consider a Manning's n value to be zero if ``condense == True`` """ if class_nums is None: class_nums = range(len(self.__landclasses)) if rank > class_nums: print "There are more MPI TASKS than land classes." print "This code only scales to MPI_TASKS = len(land_classes)." print "Extra MPI TASKS will not be used." return # Are there any binary files? binaries = glob.glob(os.path.join(self.basis_dir, '*.asc.binary')) # If not create them if not(binaries) and rank == 0: # set up first landuse folder first_script = self.setup_landuse_folder(class_nums[0]) # set up remaining land-use classifications script_list = self.setup_landuse_folders(False) # run grid_all_data in this folder subprocess.call(['./'+first_script], cwd=self.basis_dir) class_nums.remove(0) landuse_folder = 'landuse_00' self.cleanup_landuse_folder(os.path.join(self.basis_dir, landuse_folder)) fm.rename13([landuse_folder], self.basis_dir) if condense: print "Removing values below TOL" landuse_folder_path = os.path.join(self.basis_dir, landuse_folder) # read fort.13 file mann_dict = f13.read_nodal_attr_dict(landuse_folder_path) # condense fort.13 file condensed_bv = tmm.condense_bv_dict(mann_dict, TOL) # write new file f13.update_mann(condensed_bv, landuse_folder_path) elif rank == 0: script_list = self.setup_landuse_folders() else: script_list = None class_nums = None class_nums = comm.bcast(class_nums, root=0) script_list = comm.bcast(script_list, root=0) if len(class_nums) != len(script_list): temp = [script_list[i] for i in class_nums] script_list = temp # run remaining bash scripts for i in range(0+rank, len(script_list), size): # run griddata subprocess.call(['./'+script_list[i]], cwd=self.basis_dir) # clean up folder match_string = r"grid_all_(.*)_"+self.file_name[:-3]+r"\.sh" landuse_folder = re.match(match_string, script_list[i]).groups()[0] self.cleanup_landuse_folder(os.path.join(self.basis_dir, landuse_folder)) # rename fort.13 file fm.rename13([landuse_folder], self.basis_dir) if condense: print "Removing values below TOL" landuse_folder_path = os.path.join(self.basis_dir, landuse_folder) # read fort.13 file mann_dict = f13.read_nodal_attr_dict(landuse_folder_path) # condense fort.13 file condensed_bv = tmm.condense_bv_dict(mann_dict, TOL) # write new file f13.update_mann(condensed_bv, landuse_folder_path) print "Done" # remove unnecessary files if removeBinaries and rank == 0: binaries = glob.glob(os.path.join(self.basis_dir, '*.asc.binary')) for f in binaries: os.remove(f)