Example #1
0
    def update_mann(self, data, path=None, default=None, file_name='fort.13'):
        """
        Write out fort.13 to path with the attributes contained in Data.  

        :type data: :class:`numpy.ndarray` or :class:`dict`
        :param data: containing the nodal attribute information
        :type path: string or None
        :param path: the directory to which the fort.13 file will be written
        :type default: None or float
        :param default: default value
        :type file_name: string
        :param file_name: the name of the ``fort.13`` formatted file

        """
        f13.update_mann(data, path, default, file_name)   
Example #2
0
def condense_lcm_folder(basis_folder, TOL=None):
    """
    Condenses the ``fort.13`` lanudse classification mesh files in
    ``landuse_*`` folders in ``basis_dir`` by removing values taht are below
    ``TOL``.

    :param string basis_dir: the path to directory containing the
        ``landuse_##`` folders
    :param double TOL: Tolerance close to zero, default is 1e-7
    """

    folders = glob.glob(os.path.join(basis_folder, "landuse_*"))
    for i in range(0 + rank, len(folders), size):
        mann_dict = f13.read_nodal_attr_dict(folders[i])
        mann_dict = condense_bv_dict(mann_dict, TOL)
        f13.update_mann(mann_dict, folders[i])
Example #3
0
def condense_lcm_folder(basis_folder, TOL=None):
    """
    Condenses the ``fort.13`` lanudse classification mesh files in
    ``landuse_*`` folders in ``basis_dir`` by removing values taht are below
    ``TOL``.

    :param string basis_dir: the path to directory containing the
        ``landuse_##`` folders
    :param double TOL: Tolerance close to zero, default is 1e-7
    """

    folders = glob.glob(os.path.join(basis_folder, "landuse_*"))
    for i in range(0+rank, len(folders), size):
        mann_dict = f13.read_nodal_attr_dict(folders[i])
        mann_dict = condense_bv_dict(mann_dict, TOL)
        f13.update_mann(mann_dict, folders[i])
Example #4
0
    def prep_all(self, removeBinaries=False, class_nums=None, condense=True,
                 TOL=None):
        """
        Assumes that all the necessary input files are in ``self.basis_dir``.
        This function generates a ``landuse_##`` folder in ``self.basis_dir``
        for every land classification number containing a ``fort.13`` file
        specific to that land classification number.

        .. todo:: Update so that landuse folders can be prepped n at a time and
                  so that this could be run on a HPC system

        Currently, the parallel option preps the first folder and then all the
        remaining folders at once.

        :param binary parallel: Flag whether or not to simultaneously prep
            landuse folders.
        :param binary removeBinarues: Flag whether or not to remove
            ``*.asc.binary`` files when completed.
        :param list class_nums: List of integers indicating which classes to
            prep. This assumes all the ``*.asc.binary`` files are already in
            existence.
        :param bool condense: Flag whether or not to condense ``fort.13`` to
            only non-zero values within a tolerance.
        :param double TOL: Tolerance below which to consider a Manning's n
            value to be zero if ``condense == True``
        
        """
        if class_nums is None:
            class_nums = range(len(self.__landclasses))
        if rank > class_nums:
            print "There are more MPI TASKS than land classes."
            print "This code only scales to MPI_TASKS = len(land_classes)."
            print "Extra MPI TASKS will not be used."
            return

        # Are there any binary files?
        binaries = glob.glob(os.path.join(self.basis_dir, '*.asc.binary'))
        # If not create them
        if not(binaries) and rank == 0:
            # set up first landuse folder
            first_script = self.setup_landuse_folder(class_nums[0])
            # set up remaining land-use classifications
            script_list = self.setup_landuse_folders(False)
            # run grid_all_data in this folder 
            subprocess.call(['./'+first_script], cwd=self.basis_dir)
            class_nums.remove(0)
            landuse_folder = 'landuse_00'
            self.cleanup_landuse_folder(os.path.join(self.basis_dir,
                                                     landuse_folder))
            fm.rename13([landuse_folder], self.basis_dir)
            if condense:
                print "Removing values below TOL"
                landuse_folder_path = os.path.join(self.basis_dir,
                                                   landuse_folder)
                # read fort.13 file
                mann_dict = f13.read_nodal_attr_dict(landuse_folder_path)
                # condense fort.13 file
                condensed_bv = tmm.condense_bv_dict(mann_dict, TOL)
                # write new file
                f13.update_mann(condensed_bv, landuse_folder_path)
        elif rank == 0:
            script_list = self.setup_landuse_folders()
        else:
            script_list = None
            class_nums = None
        class_nums = comm.bcast(class_nums, root=0)
        script_list = comm.bcast(script_list, root=0)
        
        if len(class_nums) != len(script_list):
            temp = [script_list[i] for i in class_nums]
            script_list = temp

        # run remaining bash scripts
        for i in range(0+rank, len(script_list), size):
            # run griddata
            subprocess.call(['./'+script_list[i]], cwd=self.basis_dir)
            # clean up folder
            match_string = r"grid_all_(.*)_"+self.file_name[:-3]+r"\.sh"
            landuse_folder = re.match(match_string, script_list[i]).groups()[0]
            self.cleanup_landuse_folder(os.path.join(self.basis_dir,
                                                     landuse_folder))
            # rename fort.13 file
            fm.rename13([landuse_folder], self.basis_dir) 
            if condense:
                print "Removing values below TOL"
                landuse_folder_path = os.path.join(self.basis_dir,
                                                   landuse_folder)
                # read fort.13 file
                mann_dict = f13.read_nodal_attr_dict(landuse_folder_path)
                # condense fort.13 file
                condensed_bv = tmm.condense_bv_dict(mann_dict, TOL)
                # write new file
                f13.update_mann(condensed_bv, landuse_folder_path) 
        print "Done"
        # remove unnecessary files
        if removeBinaries and rank == 0:
            binaries = glob.glob(os.path.join(self.basis_dir, '*.asc.binary'))
            for f in binaries:
                os.remove(f)
Example #5
0
    def run_nobatch_q(self,
                      data,
                      wall_points,
                      mann_points,
                      save_file,
                      num_procs=12,
                      procs_pnode=12,
                      stations=None,
                      screenout=True,
                      num_writers=None,
                      TpN=12):
        """
        Runs :program:`ADCIRC` for all of the configurations specified by
        ``wall_points`` and ``mann_points`` and returns a dictonary of arrays
        containing data from output files. Runs batches of :program:`PADCIRC`
        as a single for loop and preps both the ``fort.13`` and fort.14`` in
        the same step.
        
        Stores only the QoI at the stations defined in `stations``. In this
        case the QoI is the ``maxele63`` at the designated station. 

        Reads in a default Manning's *n* value from self.save_dir and stores
        it in data.manningsn_default                                                                   
        :param data: :class:`~polyadcirc.run_framework.domain`
        :type wall_points: :class:`np.array` of size (5, ``num_of_walls``)
        :param wall_points: containts the box_limits, and wall_height for each
            wall [ximin, xmax, ymin, ymax, wall_height]
        :type mann_points: :class:`np.array` of size (``num_of_basis_vec``,
            ``num_of_random_fields``), ``num_of_random_fields`` MUST be the
            same as ``num_of_walls``. The ith wall will be associated with
            the ith field specifed by mann_points
        :param mann_points: containts the weights to be used for each run
        :type save_file: string
        :param save_file: name of file to save mdict to 
        :type num_procs: int or 12
        :param num_procs: number of processors per :program:`ADCIRC`
            simulation, 12 on lonestar, and 16 on stamped
        :param int procs_pnode: number of processors per node
        :param list() stations: list of stations to gather QoI from. If
            ``None`` uses the stations defined in ``data``
        :param boolean screenout: flag (True --  write ``ADCIRC`` output to
            screen, False -- write ``ADCIRC`` output to temp file
        :param int num_writers: number of MPI processes to dedicate soley to
            the task of writing ascii files. This MUST be < num_procs
        :param int TpN: number of tasks (cores to use) per node (wayness)
        :rtype: (:class:`np.array`, :class:`np.ndarray`, :class:`np.ndarray`)
        :returns: (``time_obs``, ``ts_data``, ``nts_data``)

        .. note:: Currently supports ADCIRC output files ``fort.6*``,
                  ``*.63``, ``fort.7*``, but NOT Hot Start Output
                  (``fort.67``, ``fort.68``)

        """
        # setup and save to shelf
        # set up saving
        if glob.glob(self.save_dir + '/' + save_file):
            os.remove(self.save_dir + '/' + save_file)

        # Save matricies to *.mat file for use by MATLAB or Python
        mdict = dict()
        mdict['mann_pts'] = mann_points
        mdict['wall_pts'] = wall_points

        self.save(mdict, save_file)

        #bv_array = tmm.get_basis_vec_array(self.basis_dir)
        bv_dict = tmm.get_basis_vectors(self.basis_dir)

        # Pre-allocate arrays for various data files
        num_points = mann_points.shape[1]
        num_walls = wall_points.shape[1]
        if num_walls != num_points:
            print "Error: num_walls != num_points"
            quit()

        # store the wall points with the mann_points as points
        mdict['points'] = np.vstack((wall_points, mann_points))

        # Pre-allocate arrays for non-timeseries data
        nts_data = {}
        self.nts_data = nts_data
        nts_data['maxele63'] = np.empty(
            (data.node_num, self.num_of_parallel_runs))

        # Pre-allocate arrays for QoI data
        if stations == None:
            stations = data.stations['fort61']
        xi = np.array([[s.x, s.y] for s in stations])
        points = np.column_stack((data.array_x(), data.array_y()))
        Q = np.empty((num_points, xi.shape[0]))
        self.Q = Q
        mdict['Q'] = Q

        # Update and save
        self.update_mdict(mdict)
        self.save(mdict, save_file)

        default = data.read_default(path=self.save_dir)

        for k in xrange(0, num_points, self.num_of_parallel_runs):
            if k + self.num_of_parallel_runs >= num_points - 1:
                stop = num_points
                step = stop - k
            else:
                stop = k + self.num_of_parallel_runs
                step = self.num_of_parallel_runs
            run_script = self.write_run_script(num_procs, step, procs_pnode,
                                               TpN, screenout, num_writers)
            self.write_prep_script(5)
            # set walls
            wall_dim = wall_points[..., k]
            data.read_spatial_grid()
            data.add_wall(wall_dim[:4], wall_dim[-1])
            # update wall and prep all
            for rf_dir in self.rf_dirs:
                os.remove(rf_dir + '/fort.14')
                shutil.copy(self.grid_dir + '/fort.14', rf_dir)
                f14.update(data, path=rf_dir)
            #PARALLEL: update file containing the list of rf_dirs
            self.update_dir_file(self.num_of_parallel_runs)
            devnull = open(os.devnull, 'w')
            p = subprocess.Popen(['./prep_2.sh'],
                                 stdout=devnull,
                                 cwd=self.save_dir)
            p.communicate()
            devnull.close()
            for i in xrange(0, step):
                # generate the Manning's n field
                r_field = tmm.combine_basis_vectors(mann_points[..., i + k],
                                                    bv_dict, default,
                                                    data.node_num)
                # create the fort.13 for r_field
                f13.update_mann(r_field, self.rf_dirs[i])
            # do a batch run of python
            #PARALLEL: update file containing the list of rf_dirs
            self.update_dir_file(self.num_of_parallel_runs)
            devnull = open(os.devnull, 'w')
            p = subprocess.Popen(['./prep_5.sh'],
                                 stdout=devnull,
                                 cwd=self.save_dir)
            p.communicate()
            devnull.close()
            devnull = open(os.devnull, 'w')
            p = subprocess.Popen(['./' + run_script],
                                 stdout=devnull,
                                 cwd=self.base_dir)
            p.communicate()
            devnull.close()
            # get data
            for i, kk in enumerate(range(k, stop)):
                output.get_data_nts(i, self.rf_dirs[i], data, self.nts_data,
                                    ["maxele.63"])
            # fix dry nodes and interpolate to obtain QoI
            self.fix_dry_nodes_nts(data)
            for i, kk in enumerate(range(k, stop)):
                values = self.nts_data["maxele63"][:, i]
                Q[kk, :] = griddata(points, values, xi)
            # Update and save
            self.update_mdict(mdict)
            self.save(mdict, save_file)
            if num_points <= self.num_of_parallel_runs:
                pass
            elif (k + 1) % (num_points / self.num_of_parallel_runs) == 0:
                msg = str(k + 1) + " of " + str(num_points)
                print msg + " runs have been completed."

        # save data
        self.update_mdict(mdict)
        self.save(mdict, save_file)

        return Q
Example #6
0
    def run_points(self, data, wall_points, mann_points, save_file, 
                   num_procs=12, procs_pnode=12, ts_names=["fort.61"],
                   nts_names=["maxele.63"], screenout=True, s_p_wall=
                   None, num_writers=None, TpN=12):
        """
        Runs :program:`ADCIRC` for all of the configurations specified by
        ``wall_points`` and ``mann_points`` and returns a dictonary of arrays
        containing data from output files. Assumes that the number of
        ``wall_points`` is less than the number of ``mann_points``. Runs
        batches of :program:`PADCIRC` as a double for loop with the
        :program:`ADCPREP` prepping the ``fort.14`` file on the exterior loop
        and the ``fort.13`` file on the interior loop.

         Reads in a default Manning's *n* value from self.save_dir and stores
         it in data.manningsn_default                                                                   
        :param data: :class:`~polyadcirc.run_framework.domain`
        :type wall_points: :class:`np.array` of size (5, ``num_of_walls``)
        :param wall_points: containts the box_limits, and wall_height for each
            wall [ximin, xmax, ymin, ymax, wall_height]
        :type mann_points: :class:`np.array` of size (``num_of_basis_vec``,
            ``num_of_random_fields``), ``num_of_random_fields`` MUST be a
            multiple of ``num_of_walls``. The ith wall will be associated with
            the ith set of i*(num_of_random_fields/num_of_walls) mann_points
        :param mann_points: containts the weights to be used for each run
        :type save_file: string
        :param save_file: name of file to save mdict to 
        :type num_procs: int or 12
        :param num_procs: number of processors per :program:`ADCIRC`
            simulation, 12 on lonestar, and 16 on stamped
        :param int procs_pnode: number of processors per node
        :param list() ts_names: names of ADCIRC timeseries
            output files to be recorded from each run
        :param list() nts_names: names of ADCIRC non timeseries
            output files to be recorded from each run
        :param boolean screenout: flag (True --  write ``ADCIRC`` output to
            screen, False -- write ``ADCIRC`` output to temp file
        :param int num_writers: number of MPI processes to dedicate soley to
            the task of writing ascii files. This MUST be < num_procs
        :param int TpN: number of tasks (cores to use) per node (wayness)
        :rtype: (:class:`np.array`, :class:`np.ndarray`, :class:`np.ndarray`)
        :returns: (``time_obs``, ``ts_data``, ``nts_data``)

        .. note:: Currently supports ADCIRC output files ``fort.6*``,
                  ``*.63``, ``fort.7*``, but NOT Hot Start Output
                  (``fort.67``, ``fort.68``)

        """
        # setup and save to shelf
        # set up saving
        if glob.glob(self.save_dir+'/'+save_file):
            os.remove(self.save_dir+'/'+save_file)

        # Save matricies to *.mat file for use by MATLAB or Python
        mdict = dict()
        mdict['mann_pts'] = mann_points 
        mdict['wall_pts'] = wall_points 
 
        self.save(mdict, save_file)

        #bv_array = tmm.get_basis_vec_array(self.basis_dir)
        bv_dict = tmm.get_basis_vectors(self.basis_dir)

        # Pre-allocate arrays for various data files
        num_points = mann_points.shape[1]
        num_walls = wall_points.shape[1]
        if s_p_wall == None:
            s_p_wall = num_points/num_walls*np.ones(num_walls, dtype=int)
       
        # store the wall points with the mann_points as points
        mdict['points'] = np.vstack((np.repeat(wall_points, s_p_wall, 1),
                                     mann_points))

        # Pre-allocate arrays for non-timeseries data
        nts_data = {}
        self.nts_data = nts_data
        for fid in nts_names:
            key = fid.replace('.', '')
            nts_data[key] = np.zeros((data.node_num, num_points))        
        # Pre-allocate arrays for timeseries data
        ts_data = {}
        time_obs = {}
        self.ts_data = ts_data
        self.time_obs = time_obs
        for fid in ts_names:
            key = fid.replace('.', '')
            meas_locs, total_obs, irtype = data.recording[key]
            if irtype == 1:
                ts_data[key] = np.zeros((meas_locs, total_obs, num_points))
            else:
                ts_data[key] = np.zeros((meas_locs, total_obs,
                                         irtype, num_points))
            time_obs[key] = np.zeros((total_obs,))

        # Update and save
        self.update_mdict(mdict)
        self.save(mdict, save_file)

        default = data.read_default(path=self.save_dir)

        for w in xrange(num_walls):
            # set walls
            wall_dim = wall_points[..., w]
            data.read_spatial_grid()
            data.add_wall(wall_dim[:4], wall_dim[-1])
            # update wall and prep all
            for rf_dir in self.rf_dirs:
                os.remove(rf_dir+'/fort.14')
                shutil.copy(self.grid_dir+'/fort.14', rf_dir)
                f14.update(data, path=rf_dir)
            #PARALLEL: update file containing the list of rf_dirs
            self.update_dir_file(self.num_of_parallel_runs)
            devnull = open(os.devnull, 'w')
            p = subprocess.Popen(['./prep_2.sh'], stdout=devnull,
                                 cwd=self.save_dir) 
            p.communicate()
            devnull.close()
            for k in xrange(sum(s_p_wall[:w]), sum(s_p_wall[:w+1]),
                            self.num_of_parallel_runs): 
                if k+self.num_of_parallel_runs >= num_points-1:
                    stop = num_points
                    step = stop-k
                else:
                    stop = k+self.num_of_parallel_runs
                    step = self.num_of_parallel_runs
                run_script = self.write_run_script(num_procs, step,
                                                   procs_pnode, TpN, screenout,
                                                   num_writers)
                self.write_prep_script(5)
                for i in xrange(0, step):
                    # generate the Manning's n field
                    r_field = tmm.combine_basis_vectors(mann_points[..., i+k],
                                                        bv_dict, default,
                                                        data.node_num)
                    # create the fort.13 for r_field
                    f13.update_mann(r_field, self.rf_dirs[i])
                # do a batch run of python
                #PARALLEL: update file containing the list of rf_dirs
                self.update_dir_file(self.num_of_parallel_runs)
                devnull = open(os.devnull, 'w')
                p = subprocess.Popen(['./prep_5.sh'], stdout=devnull,
                                     cwd=self.save_dir) 
                p.communicate()
                devnull.close()
                devnull = open(os.devnull, 'w')
                p = subprocess.Popen(['./'+run_script], stdout=subprocess.PIPE,
                                     cwd=self.base_dir) 
                p.communicate()
                devnull.close()
                # get data
                for i, kk in enumerate(range(k, stop)):
                    output.get_data_ts(kk, self.rf_dirs[i], self.ts_data,
                                       time_obs, ts_names)
                    output.get_data_nts(kk, self.rf_dirs[i], data,
                                        self.nts_data, nts_names)
                # Update and save
                self.update_mdict(mdict)
                self.save(mdict, save_file)

        # save data
        self.update_mdict(mdict)
        self.save(mdict, save_file)

        return time_obs, ts_data, nts_data
Example #7
0
    def run_points(self,
                   data,
                   wall_points,
                   mann_points,
                   save_file,
                   num_procs=12,
                   procs_pnode=12,
                   ts_names=["fort.61"],
                   nts_names=["maxele.63"],
                   screenout=True,
                   s_p_wall=None,
                   num_writers=None,
                   TpN=12):
        """
        Runs :program:`ADCIRC` for all of the configurations specified by
        ``wall_points`` and ``mann_points`` and returns a dictonary of arrays
        containing data from output files. Assumes that the number of
        ``wall_points`` is less than the number of ``mann_points``. Runs
        batches of :program:`PADCIRC` as a double for loop with the
        :program:`ADCPREP` prepping the ``fort.14`` file on the exterior loop
        and the ``fort.13`` file on the interior loop.

         Reads in a default Manning's *n* value from self.save_dir and stores
         it in data.manningsn_default                                                                   
        :param data: :class:`~polyadcirc.run_framework.domain`
        :type wall_points: :class:`np.array` of size (5, ``num_of_walls``)
        :param wall_points: containts the box_limits, and wall_height for each
            wall [ximin, xmax, ymin, ymax, wall_height]
        :type mann_points: :class:`np.array` of size (``num_of_basis_vec``,
            ``num_of_random_fields``), ``num_of_random_fields`` MUST be a
            multiple of ``num_of_walls``. The ith wall will be associated with
            the ith set of i*(num_of_random_fields/num_of_walls) mann_points
        :param mann_points: containts the weights to be used for each run
        :type save_file: string
        :param save_file: name of file to save mdict to 
        :type num_procs: int or 12
        :param num_procs: number of processors per :program:`ADCIRC`
            simulation, 12 on lonestar, and 16 on stamped
        :param int procs_pnode: number of processors per node
        :param list() ts_names: names of ADCIRC timeseries
            output files to be recorded from each run
        :param list() nts_names: names of ADCIRC non timeseries
            output files to be recorded from each run
        :param boolean screenout: flag (True --  write ``ADCIRC`` output to
            screen, False -- write ``ADCIRC`` output to temp file
        :param int num_writers: number of MPI processes to dedicate soley to
            the task of writing ascii files. This MUST be < num_procs
        :param int TpN: number of tasks (cores to use) per node (wayness)
        :rtype: (:class:`np.array`, :class:`np.ndarray`, :class:`np.ndarray`)
        :returns: (``time_obs``, ``ts_data``, ``nts_data``)

        .. note:: Currently supports ADCIRC output files ``fort.6*``,
                  ``*.63``, ``fort.7*``, but NOT Hot Start Output
                  (``fort.67``, ``fort.68``)

        """
        # setup and save to shelf
        # set up saving
        if glob.glob(self.save_dir + '/' + save_file):
            os.remove(self.save_dir + '/' + save_file)

        # Save matricies to *.mat file for use by MATLAB or Python
        mdict = dict()
        mdict['mann_pts'] = mann_points
        mdict['wall_pts'] = wall_points

        self.save(mdict, save_file)

        #bv_array = tmm.get_basis_vec_array(self.basis_dir)
        bv_dict = tmm.get_basis_vectors(self.basis_dir)

        # Pre-allocate arrays for various data files
        num_points = mann_points.shape[1]
        num_walls = wall_points.shape[1]
        if s_p_wall == None:
            s_p_wall = num_points / num_walls * np.ones(num_walls, dtype=int)

        # store the wall points with the mann_points as points
        mdict['points'] = np.vstack((np.repeat(wall_points, s_p_wall,
                                               1), mann_points))

        # Pre-allocate arrays for non-timeseries data
        nts_data = {}
        self.nts_data = nts_data
        for fid in nts_names:
            key = fid.replace('.', '')
            nts_data[key] = np.zeros((data.node_num, num_points))
        # Pre-allocate arrays for timeseries data
        ts_data = {}
        time_obs = {}
        self.ts_data = ts_data
        self.time_obs = time_obs
        for fid in ts_names:
            key = fid.replace('.', '')
            meas_locs, total_obs, irtype = data.recording[key]
            if irtype == 1:
                ts_data[key] = np.zeros((meas_locs, total_obs, num_points))
            else:
                ts_data[key] = np.zeros(
                    (meas_locs, total_obs, irtype, num_points))
            time_obs[key] = np.zeros((total_obs, ))

        # Update and save
        self.update_mdict(mdict)
        self.save(mdict, save_file)

        default = data.read_default(path=self.save_dir)

        for w in xrange(num_walls):
            # set walls
            wall_dim = wall_points[..., w]
            data.read_spatial_grid()
            data.add_wall(wall_dim[:4], wall_dim[-1])
            # update wall and prep all
            for rf_dir in self.rf_dirs:
                os.remove(rf_dir + '/fort.14')
                shutil.copy(self.grid_dir + '/fort.14', rf_dir)
                f14.update(data, path=rf_dir)
            #PARALLEL: update file containing the list of rf_dirs
            self.update_dir_file(self.num_of_parallel_runs)
            devnull = open(os.devnull, 'w')
            p = subprocess.Popen(['./prep_2.sh'],
                                 stdout=devnull,
                                 cwd=self.save_dir)
            p.communicate()
            devnull.close()
            for k in xrange(sum(s_p_wall[:w]), sum(s_p_wall[:w + 1]),
                            self.num_of_parallel_runs):
                if k + self.num_of_parallel_runs >= num_points - 1:
                    stop = num_points
                    step = stop - k
                else:
                    stop = k + self.num_of_parallel_runs
                    step = self.num_of_parallel_runs
                run_script = self.write_run_script(num_procs, step,
                                                   procs_pnode, TpN, screenout,
                                                   num_writers)
                self.write_prep_script(5)
                for i in xrange(0, step):
                    # generate the Manning's n field
                    r_field = tmm.combine_basis_vectors(
                        mann_points[..., i + k], bv_dict, default,
                        data.node_num)
                    # create the fort.13 for r_field
                    f13.update_mann(r_field, self.rf_dirs[i])
                # do a batch run of python
                #PARALLEL: update file containing the list of rf_dirs
                self.update_dir_file(self.num_of_parallel_runs)
                devnull = open(os.devnull, 'w')
                p = subprocess.Popen(['./prep_5.sh'],
                                     stdout=devnull,
                                     cwd=self.save_dir)
                p.communicate()
                devnull.close()
                devnull = open(os.devnull, 'w')
                p = subprocess.Popen(['./' + run_script],
                                     stdout=subprocess.PIPE,
                                     cwd=self.base_dir)
                p.communicate()
                devnull.close()
                # get data
                for i, kk in enumerate(range(k, stop)):
                    output.get_data_ts(kk, self.rf_dirs[i], self.ts_data,
                                       time_obs, ts_names)
                    output.get_data_nts(kk, self.rf_dirs[i], data,
                                        self.nts_data, nts_names)
                # Update and save
                self.update_mdict(mdict)
                self.save(mdict, save_file)

        # save data
        self.update_mdict(mdict)
        self.save(mdict, save_file)

        return time_obs, ts_data, nts_data
Example #8
0
    def run_points(self,
                   data,
                   points,
                   save_file,
                   num_procs=12,
                   procs_pnode=12,
                   ts_names=["fort.61"],
                   nts_names=["maxele.63"],
                   screenout=True,
                   cleanup_dirs=True,
                   num_writers=None,
                   TpN=12):
        """
        Runs :program:`ADCIRC` for all of the configurations specified by
        ``points`` and returns a dictonary of arrays containing data from
        output files

         Reads in a default Manning's *n* value from self.save_dir and stores
         it in data.manningsn_default
        :param data: :class:`~polyadcirc.run_framework.domain`
        :type points: :class:`np.array` of size (``num_of_basis_vec``,
            ``num_of_random_fields``)
        :param points: containts the weights to be used for each run
        :type save_file: string
        :param save_file: name of file to save ``station_data`` to
        :type num_procs: int or 12
        :param num_procs: number of processors per :program:`ADCIRC`
            simulation
        :param int procs_pnode: number of processors per node, 12 on lonestar,
            and 16 on stampede
        :param list() ts_names: names of ADCIRC timeseries
            output files to be recorded from each run
        :param list() nts_names: names of ADCIRC non timeseries
            output files to be recorded from each run
        :param boolean screenout: flag (True --  write ``ADCIRC`` output to
            screen, False -- write ``ADCIRC`` output to temp file
        :param boolean cleanup_dirs: flag to delete all RF_dirs after run (True
            -- yes, False -- no)
        :param int num_writers: number of MPI processes to dedicate soley to
            the task of writing ascii files. This MUST be < num_procs
        :param int TpN: number of tasks (cores to use) per node (wayness)
        :rtype: (:class:`np.array`, :class:`np.ndarray`, :class:`np.ndarray`)
        :returns: (``time_obs``, ``ts_data``, ``nts_data``)

        .. note:: Currently supports ADCIRC output files ``fort.6*``,
                  ``*.63``, ``fort.7*``, but NOT Hot Start Output
                  (``fort.67``, ``fort.68``)

        """
        # setup and save to shelf
        # set up saving
        if glob.glob(self.save_dir + '/' + save_file):
            os.remove(self.save_dir + '/' + save_file)

        # Save matricies to *.mat file for use by MATLAB or Python
        mdict = dict()
        mdict['mann_pts'] = points
        self.save(mdict, save_file)

        #bv_array = tmm.get_basis_vec_array(self.basis_dir)
        bv_dict = tmm.get_basis_vectors(self.basis_dir)

        # Pre-allocate arrays for various data files
        num_points = points.shape[1]
        # Pre-allocate arrays for non-timeseries data
        nts_data = {}
        self.nts_data = nts_data
        for fid in nts_names:
            key = fid.replace('.', '')
            nts_data[key] = np.zeros((data.node_num, num_points))
        # Pre-allocate arrays for timeseries data
        ts_data = {}
        time_obs = {}
        self.ts_data = ts_data
        self.time_obs = time_obs
        for fid in ts_names:
            key = fid.replace('.', '')
            meas_locs, total_obs, irtype = data.recording[key]
            if irtype == 1:
                ts_data[key] = np.zeros((meas_locs, total_obs, num_points))
            else:
                ts_data[key] = np.zeros(
                    (meas_locs, total_obs, irtype, num_points))
            time_obs[key] = np.zeros((total_obs, ))

        # Update and save
        self.update_mdict(mdict)
        self.save(mdict, save_file)

        default = data.read_default(path=self.save_dir)

        for k in xrange(0, num_points, self.num_of_parallel_runs):
            if k + self.num_of_parallel_runs >= num_points - 1:
                stop = num_points
                step = stop - k
            else:
                stop = k + self.num_of_parallel_runs
                step = self.num_of_parallel_runs
            run_script = self.write_run_script(num_procs, step, procs_pnode,
                                               TpN, screenout, num_writers)
            self.write_prep_script(5)
            for i in xrange(0, step):
                # generate the Manning's n field
                r_field = tmm.combine_basis_vectors(points[...,
                                                           i + k], bv_dict,
                                                    default, data.node_num)
                # create the fort.13 for r_field
                f13.update_mann(r_field, self.rf_dirs[i])
            # do a batch run of python
            #PARALLEL: update file containing the list of rf_dirs
            self.update_dir_file(self.num_of_parallel_runs)
            devnull = open(os.devnull, 'w')
            p = subprocess.Popen(['./prep_5.sh'],
                                 stdout=devnull,
                                 cwd=self.save_dir)
            p.communicate()
            devnull.close()
            devnull = open(os.devnull, 'w')
            p = subprocess.Popen(['./' + run_script],
                                 stdout=devnull,
                                 cwd=self.base_dir)
            p.communicate()
            devnull.close()
            # get data
            for i, kk in enumerate(range(k, stop)):
                output.get_data_ts(kk, self.rf_dirs[i], self.ts_data, time_obs,
                                   ts_names)
                output.get_data_nts(kk, self.rf_dirs[i], data, self.nts_data,
                                    nts_names)
            # Update and save
            self.update_mdict(mdict)
            self.save(mdict, save_file)

        # save data
        self.update_mdict(mdict)
        self.save(mdict, save_file)

        if cleanup_dirs:
            self.remove_random_field_directories()

        return time_obs, ts_data, nts_data
Example #9
0
import polyadcirc.pyADCIRC.fort13_management as f13
import glob

# Specify run parameter folders
adcirc_dir = '/h1/lgraham/workspace'
grid_dir = adcirc_dir + '/ADCIRC_landuse/Katrina_small/inputs'
save_dir = adcirc_dir + '/ADCIRC_landuse/Katrina_small/runs/output_test'
basis_dir = adcirc_dir + '/ADCIRC_landuse/Katrina_small/landuse_basis/gap/shelf_test'

# load in the small katrina mesh
domain = dom.domain(grid_dir)
domain.update()

# load in basis vectors for domain
bv_dict = tmm.get_basis_vectors(basis_dir)

# create the shelf basis vector dictonary
shelf_limits = [0, 50]
shelf_bv = tmm.create_shelf(domain, shelf_limits, bv_dict)

# write this out to an appropriately numbered basis vector mesh in the correct
# basis_dir
# get list of landuse folder names
folders = glob.glob(basis_dir + '/landuse_*')
# create new folder
folder_name = basis_dir + '/landuse_' + '{:=02d}'.format(len(folders))
fm.mkdir(folder_name)
# copy a fort.13 file to that folder
fm.copy(save_dir + '/fort.13', folder_name + '/fort.13')
f13.update_mann(shelf_bv, folder_name)
Example #10
0
# Specify run parameter folders 
adcirc_dir = '/h1/lgraham/workspace'
grid_dir = adcirc_dir + '/ADCIRC_landuse/Katrina_small/inputs'
save_dir = adcirc_dir + '/ADCIRC_landuse/Katrina_small/runs/output_test'
basis_dir = adcirc_dir +'/ADCIRC_landuse/Katrina_small/landuse_basis/gap/shelf_test'

# load in the small katrina mesh
domain = dom.domain(grid_dir)
domain.update()

# load in basis vectors for domain
bv_dict = tmm.get_basis_vectors(basis_dir)

# create the shelf basis vector dictonary
shelf_limits = [0, 50] #[0, 100] [50, 100]
shelf_bv = tmm.create_shelf(domain, shelf_limits, bv_dict)

# write this out to an appropriately numbered basis vector mesh in the correct
# basis_dir
# get list of landuse folder names
folders = glob.glob(basis_dir+'/landuse_*')
# create new folder
folder_name = basis_dir+'/landuse_'+'{:=02d}'.format(len(folders))
fm.mkdir(folder_name)
# copy a fort.13 file to that folder
fm.copy(save_dir+'/fort.13', folder_name+'/fort.13')
f13.update_mann(shelf_bv, folder_name)


Example #11
0
    def run_points(self, data, points, save_file, num_procs=12, procs_pnode=12,
                   ts_names=["fort.61"], nts_names=["maxele.63"],
                   screenout=True, cleanup_dirs=True, num_writers=None,
                   TpN=12):
        """
        Runs :program:`ADCIRC` for all of the configurations specified by
        ``points`` and returns a dictonary of arrays containing data from
        output files

         Reads in a default Manning's *n* value from self.save_dir and stores
         it in data.manningsn_default
        :param data: :class:`~polyadcirc.run_framework.domain`
        :type points: :class:`np.array` of size (``num_of_basis_vec``,
            ``num_of_random_fields``)
        :param points: containts the weights to be used for each run
        :type save_file: string
        :param save_file: name of file to save ``station_data`` to
        :type num_procs: int or 12
        :param num_procs: number of processors per :program:`ADCIRC`
            simulation
        :param int procs_pnode: number of processors per node, 12 on lonestar,
            and 16 on stampede
        :param list() ts_names: names of ADCIRC timeseries
            output files to be recorded from each run
        :param list() nts_names: names of ADCIRC non timeseries
            output files to be recorded from each run
        :param boolean screenout: flag (True --  write ``ADCIRC`` output to
            screen, False -- write ``ADCIRC`` output to temp file
        :param boolean cleanup_dirs: flag to delete all RF_dirs after run (True
            -- yes, False -- no)
        :param int num_writers: number of MPI processes to dedicate soley to
            the task of writing ascii files. This MUST be < num_procs
        :param int TpN: number of tasks (cores to use) per node (wayness)
        :rtype: (:class:`np.array`, :class:`np.ndarray`, :class:`np.ndarray`)
        :returns: (``time_obs``, ``ts_data``, ``nts_data``)

        .. note:: Currently supports ADCIRC output files ``fort.6*``,
                  ``*.63``, ``fort.7*``, but NOT Hot Start Output
                  (``fort.67``, ``fort.68``)

        """
        # setup and save to shelf
        # set up saving
        if glob.glob(self.save_dir+'/'+save_file):
            os.remove(self.save_dir+'/'+save_file)

        # Save matricies to *.mat file for use by MATLAB or Python
        mdict = dict()
        mdict['mann_pts'] = points
        self.save(mdict, save_file)

        #bv_array = tmm.get_basis_vec_array(self.basis_dir)
        bv_dict = tmm.get_basis_vectors(self.basis_dir)

        # Pre-allocate arrays for various data files
        num_points = points.shape[1]
        # Pre-allocate arrays for non-timeseries data
        nts_data = {}
        self.nts_data = nts_data
        for fid in nts_names:
            key = fid.replace('.', '')
            nts_data[key] = np.zeros((data.node_num, num_points))
        # Pre-allocate arrays for timeseries data
        ts_data = {}
        time_obs = {}
        self.ts_data = ts_data
        self.time_obs = time_obs
        for fid in ts_names:
            key = fid.replace('.', '')
            meas_locs, total_obs, irtype = data.recording[key]
            if irtype == 1:
                ts_data[key] = np.zeros((meas_locs, total_obs, num_points))
            else:
                ts_data[key] = np.zeros((meas_locs, total_obs,
                                         irtype, num_points))
            time_obs[key] = np.zeros((total_obs,))

        # Update and save
        self.update_mdict(mdict)
        self.save(mdict, save_file)

        default = data.read_default(path=self.save_dir)

        for k in xrange(0, num_points, self.num_of_parallel_runs):
            if k+self.num_of_parallel_runs >= num_points-1:
                stop = num_points
                step = stop-k
            else:
                stop = k+self.num_of_parallel_runs
                step = self.num_of_parallel_runs
            run_script = self.write_run_script(num_procs, step, procs_pnode,
                                               TpN, screenout, num_writers)
            self.write_prep_script(5)
            for i in xrange(0, step):
                # generate the Manning's n field
                r_field = tmm.combine_basis_vectors(points[..., i+k], bv_dict,
                                                    default, data.node_num)
                # create the fort.13 for r_field
                f13.update_mann(r_field, self.rf_dirs[i])
            # do a batch run of python
            #PARALLEL: update file containing the list of rf_dirs
            self.update_dir_file(self.num_of_parallel_runs)
            devnull = open(os.devnull, 'w')
            p = subprocess.Popen(['./prep_5.sh'], stdout=devnull, cwd=
                                 self.save_dir)
            p.communicate()
            devnull.close()
            devnull = open(os.devnull, 'w')
            p = subprocess.Popen(['./'+run_script], stdout=devnull, cwd=
                                 self.base_dir)
            p.communicate()
            devnull.close()
            # get data
            for i, kk in enumerate(range(k, stop)):
                output.get_data_ts(kk, self.rf_dirs[i], self.ts_data, time_obs,
                                   ts_names)
                output.get_data_nts(kk, self.rf_dirs[i], data, self.nts_data,
                                    nts_names)
            # Update and save
            self.update_mdict(mdict)
            self.save(mdict, save_file)

        # save data
        self.update_mdict(mdict)
        self.save(mdict, save_file)

        if cleanup_dirs:
            self.remove_random_field_directories()

        return time_obs, ts_data, nts_data
Example #12
0
    def run_nobatch_q(self, data, wall_points, mann_points, save_file, 
                      num_procs=12, procs_pnode=12, stations=None,
                      screenout=True, num_writers=None, TpN=12):
        """
        Runs :program:`ADCIRC` for all of the configurations specified by
        ``wall_points`` and ``mann_points`` and returns a dictonary of arrays
        containing data from output files. Runs batches of :program:`PADCIRC`
        as a single for loop and preps both the ``fort.13`` and fort.14`` in
        the same step.
        
        Stores only the QoI at the stations defined in `stations``. In this
        case the QoI is the ``maxele63`` at the designated station. 

        Reads in a default Manning's *n* value from self.save_dir and stores
        it in data.manningsn_default                                                                   
        :param data: :class:`~polyadcirc.run_framework.domain`
        :type wall_points: :class:`np.array` of size (5, ``num_of_walls``)
        :param wall_points: containts the box_limits, and wall_height for each
            wall [ximin, xmax, ymin, ymax, wall_height]
        :type mann_points: :class:`np.array` of size (``num_of_basis_vec``,
            ``num_of_random_fields``), ``num_of_random_fields`` MUST be the
            same as ``num_of_walls``. The ith wall will be associated with
            the ith field specifed by mann_points
        :param mann_points: containts the weights to be used for each run
        :type save_file: string
        :param save_file: name of file to save mdict to 
        :type num_procs: int or 12
        :param num_procs: number of processors per :program:`ADCIRC`
            simulation, 12 on lonestar, and 16 on stamped
        :param int procs_pnode: number of processors per node
        :param list() stations: list of stations to gather QoI from. If
            ``None`` uses the stations defined in ``data``
        :param boolean screenout: flag (True --  write ``ADCIRC`` output to
            screen, False -- write ``ADCIRC`` output to temp file
        :param int num_writers: number of MPI processes to dedicate soley to
            the task of writing ascii files. This MUST be < num_procs
        :param int TpN: number of tasks (cores to use) per node (wayness)
        :rtype: (:class:`np.array`, :class:`np.ndarray`, :class:`np.ndarray`)
        :returns: (``time_obs``, ``ts_data``, ``nts_data``)

        .. note:: Currently supports ADCIRC output files ``fort.6*``,
                  ``*.63``, ``fort.7*``, but NOT Hot Start Output
                  (``fort.67``, ``fort.68``)

        """
        # setup and save to shelf
        # set up saving
        if glob.glob(self.save_dir+'/'+save_file):
            os.remove(self.save_dir+'/'+save_file)

        # Save matricies to *.mat file for use by MATLAB or Python
        mdict = dict()
        mdict['mann_pts'] = mann_points 
        mdict['wall_pts'] = wall_points 
 
        self.save(mdict, save_file)

        #bv_array = tmm.get_basis_vec_array(self.basis_dir)
        bv_dict = tmm.get_basis_vectors(self.basis_dir)

        # Pre-allocate arrays for various data files
        num_points = mann_points.shape[1]
        num_walls = wall_points.shape[1]
        if num_walls != num_points:
            print "Error: num_walls != num_points"
            quit()

        # store the wall points with the mann_points as points
        mdict['points'] = np.vstack((wall_points, mann_points))

        # Pre-allocate arrays for non-timeseries data
        nts_data = {}
        self.nts_data = nts_data
        nts_data['maxele63'] = np.empty((data.node_num,
                                         self.num_of_parallel_runs))        
        
        # Pre-allocate arrays for QoI data
        if stations == None:
            stations = data.stations['fort61']
        xi = np.array([[s.x, s.y] for s in stations])
        points = np.column_stack((data.array_x(), data.array_y()))
        Q = np.empty((num_points, xi.shape[0]))
        self.Q = Q
        mdict['Q'] = Q

        # Update and save
        self.update_mdict(mdict)
        self.save(mdict, save_file)

        default = data.read_default(path=self.save_dir)

        for k in xrange(0, num_points, self.num_of_parallel_runs):
            if k+self.num_of_parallel_runs >= num_points-1:
                stop = num_points
                step = stop-k
            else:
                stop = k+self.num_of_parallel_runs
                step = self.num_of_parallel_runs
            run_script = self.write_run_script(num_procs, step,
                                               procs_pnode, TpN, screenout,
                                               num_writers)
            self.write_prep_script(5)
            # set walls
            wall_dim = wall_points[..., k]
            data.read_spatial_grid()
            data.add_wall(wall_dim[:4], wall_dim[-1])
            # update wall and prep all
            for rf_dir in self.rf_dirs:
                os.remove(rf_dir+'/fort.14')
                shutil.copy(self.grid_dir+'/fort.14', rf_dir)
                f14.update(data, path=rf_dir)
            #PARALLEL: update file containing the list of rf_dirs
            self.update_dir_file(self.num_of_parallel_runs)
            devnull = open(os.devnull, 'w')
            p = subprocess.Popen(['./prep_2.sh'], stdout=devnull,
                                 cwd=self.save_dir) 
            p.communicate()
            devnull.close()
            for i in xrange(0, step):
                # generate the Manning's n field
                r_field = tmm.combine_basis_vectors(mann_points[..., i+k],
                                                    bv_dict, default,
                                                    data.node_num)
                # create the fort.13 for r_field
                f13.update_mann(r_field, self.rf_dirs[i])
            # do a batch run of python
            #PARALLEL: update file containing the list of rf_dirs
            self.update_dir_file(self.num_of_parallel_runs)
            devnull = open(os.devnull, 'w')
            p = subprocess.Popen(['./prep_5.sh'], stdout=devnull,
                                 cwd=self.save_dir) 
            p.communicate()
            devnull.close()
            devnull = open(os.devnull, 'w')
            p = subprocess.Popen(['./'+run_script], stdout=devnull,
                                 cwd=self.base_dir) 
            p.communicate()
            devnull.close()
            # get data
            for i, kk in enumerate(range(k, stop)):
                output.get_data_nts(i, self.rf_dirs[i], data, self.nts_data,
                                    ["maxele.63"])
            # fix dry nodes and interpolate to obtain QoI
            self.fix_dry_nodes_nts(data)
            for i, kk in enumerate(range(k, stop)):
                values = self.nts_data["maxele63"][:, i]
                Q[kk, :] = griddata(points, values, xi)
            # Update and save
            self.update_mdict(mdict)
            self.save(mdict, save_file)
            if num_points <= self.num_of_parallel_runs:
                pass
            elif (k+1)%(num_points/self.num_of_parallel_runs) == 0:
                msg = str(k+1)+" of "+str(num_points)
                print msg+" runs have been completed."

        # save data
        self.update_mdict(mdict)
        self.save(mdict, save_file)

        return Q