예제 #1
0
파일: basic.py 프로젝트: cefect/CanFlood
 def output_fig(self, fig,
                out_dir = None, overwrite=None,
                
                #figure file controls
              fmt='svg', 
               transparent=True, 
               dpi = 150,):
     #======================================================================
     # defaults
     #======================================================================
     if out_dir is None: out_dir = self.out_dir
     if overwrite is None: overwrite = self.overwrite
     log = self.logger.getChild('output')
     
     #======================================================================
     # output
     #======================================================================
     #file setup
     out_fp = os.path.join(out_dir, '%s_smry_plot.%s'%(self.name, fmt))
         
     if os.path.exists(out_fp):
         msg = 'passed output file path already esists :\n    %s'%out_fp
         if overwrite: 
             log.warning(msg)
         else: 
             raise Error(msg)
         
     #write the file
     try: 
         fig.savefig(out_fp, dpi = dpi, format = fmt, transparent=transparent)
         log.info('saved figure to file: %s'%out_fp)
     except Exception as e:
         raise Error('failed to write figure to file w/ \n    %s'%e)
     
     return out_fp
예제 #2
0
파일: rsamp.py 프로젝트: cefect/CanFlood
    def load_layers(
            self,  #load data to project (for console runs)
            rfp_l,
            finv_fp,
            providerLib='ogr'):
        """
        special input loader for standalone runs
        Im assuming for the plugin these layers will be loaded already"""
        log = self.logger.getChild('load_layers')
        #======================================================================
        # load rasters
        #======================================================================
        raster_d = dict()

        for fp in rfp_l:
            rlayer = self.load_rlay(fp)

            #add it in
            basefn = os.path.splitext(os.path.split(fp)[1])[0]
            raster_d[basefn] = rlayer

        #======================================================================
        # load finv vector layer
        #======================================================================
        fp = finv_fp
        assert os.path.exists(fp)
        basefn = os.path.splitext(os.path.split(fp)[1])[0]
        vlay_raw = QgsVectorLayer(fp, basefn, providerLib)

        # checks
        if not isinstance(vlay_raw, QgsVectorLayer):
            raise IOError

        #check if this is valid
        if not vlay_raw.isValid():
            raise Error(
                'loaded vlay \'%s\' is not valid. \n \n did you initilize?' %
                vlay_raw.name())

        #check if it has geometry
        if vlay_raw.wkbType() == 100:
            raise Error('loaded vlay has NoGeometry')

        self.mstore.addMapLayer(vlay_raw)

        vlay = vlay_raw
        dp = vlay.dataProvider()

        log.info(
            'loaded vlay \'%s\' as \'%s\' %s geo  with %i feats from file: \n     %s'
            % (vlay.name(), dp.storageType(), QgsWkbTypes().displayString(
                vlay.wkbType()), dp.featureCount(), fp))

        #======================================================================
        # wrap
        #======================================================================

        return list(raster_d.values()), vlay
예제 #3
0
    def get_cf_fp(self):
        cf_fp = self.lineEdit_cf_fp.text()

        if cf_fp is None or cf_fp == '':
            raise Error('need to specficy a control file path')
        if not os.path.exists(cf_fp):
            raise Error('need to specficy a valid control file path')

        if not os.path.splitext(cf_fp)[1] == '.txt':
            raise Error('unexpected extension on Control File')

        return cf_fp
예제 #4
0
파일: hazus.py 프로젝트: NRCan/CanFlood
    def _set_depth_conv(
            self,  #get conversion dictionary form hazus labels
            raw_l,  #example depths ['ft04m', 'ft03m', 'ft02m', 'ft01m', 'ft00', 'ft01', 'ft02', 'ft03'
            exposure_units=None,  #for changing units
    ):
        log = self.logger.getChild('get_depth_conv')
        assert isinstance(raw_l, list)
        if exposure_units is None: exposure_units = self.exposure_units
        #=======================================================================
        # get scale
        #=======================================================================
        if exposure_units == 'meters':
            scale = self.ft_m
        elif exposure_units == 'ft':
            scale = 1
        else:
            raise Error('unrecognized exposure_units: %s' % exposure_units)

        #=======================================================================
        # get conversions
        #=======================================================================
        d = dict()
        for raw in raw_l:
            assert raw.startswith('ft'), raw

            val = raw.replace('ft', '')

            if raw.endswith('m'):
                iscale = scale * -1  #invert scale for negatives
                val = val[:-1]
            else:
                iscale = scale

            try:
                d[raw] = round(float(val) * iscale, self.prec)
            except Exception as e:
                raise Error('failed to float %s=\'%s\' \n    %s' %
                            (raw, val, e))

        #=======================================================================
        # wrap
        #=======================================================================
        assert np.all(np.diff(pd.Series(d)) > 0), 'non-monotonic'

        log.info('finished w/ %i' % len(d))
        """
        d.keys()
        view(pd.Series(d))
        """
        self.depthc_d = d

        return
예제 #5
0
파일: djoin.py 프로젝트: NRCan/CanFlood
    def _prep_vlay(self, vlay_raw, keep_fnl, log=None):
        if log is None: log = self.logger.getChild('_prep_vlay')

        assert vlay_raw.crs() == self.qproj.crs(
        ), 'crs mismatch: \n    %s\n    %s' % (vlay_raw.crs(),
                                               self.qproj.crs())

        df_raw = vlay_get_fdf(vlay_raw, logger=log,
                              feedback=self.feedback).drop(['ogc_fid', 'fid'],
                                                           axis=1,
                                                           errors='ignore')

        df_raw['fid'] = df_raw.index

        #======================================================================
        # drop to keeper fields
        #======================================================================
        if keep_fnl == 'all':
            log.info('keeping all fields')
            df1 = df_raw.copy()
        elif isinstance(keep_fnl, list):
            #check the request
            miss_l = set(keep_fnl).difference(df_raw.columns)
            if len(miss_l) > 0:
                raise Error(
                    '%i requested keeper fields not in data: \n    %s' %
                    (len(miss_l), miss_l))

            #make sure the linker is in there
            if not self.cid in keep_fnl:
                keep_fnl.append(self.cid)

            if not 'fid' in keep_fnl:
                keep_fnl.append('fid')

            #make the slice
            df1 = df_raw.loc[:, keep_fnl]

            log.info('dropped to %i columns (from %i)' %
                     (len(df1.columns), len(df_raw.columns)))

        else:
            raise Error('unexpected type on keep_fnl')

        if not df1[self.cid].is_unique:
            raise Error('non-unique vlay keys')

        return df1
예제 #6
0
    def load_data(self):
        logger = self.logger.getChild('load_data')
        
        self.filepath = self.get_filepath()
        
        d = self.loadr_real(self.filepath, multi = True)
        
        #=======================================================================
        # sort and attach
        #=======================================================================
        for k, v in d.items():
            logger.debug('sending \'%s\' for cleaning'%k)
            df1 = self.clean_binv_data(v)
            
            #cleaning for wsl
            if k in ['dry', 'wet']: 
                df2 = self.wsl_clean(df1, tag=k)

                self.wsl_d[k] = df2
                
            #cleaning for area protection
            elif k == 'aprot': 
                self.aprot_df = df1.astype(np.int)
                
            else: 
                raise Error('got unexpected tab name \'%s\''%k)
        
        return
예제 #7
0
    def prep_model(
            self,
            event_slice=False,  #allow the expolike data to pass MORE events than required 
    ):

        if self.as_inun:
            raise Error('risk2 inundation percentage not implemented')

        #data loaders
        self.set_finv()
        self.set_evals()
        self.set_dmgs()

        if not self.exlikes == '':
            self.set_exlikes(event_slice=event_slice)

        if self.attriMode:
            """the control file parameter name changes depending on the model"""
            self.set_attrimat()
            self.promote_attrim()

        #activate plot styles

        self.logger.debug('finished  on Risk2')

        return
예제 #8
0
    def _get_val_str(
        self,  #helper to get value string for writing text on the plot
        val_str,  #cant be a kwarg.. allowing None
        impactFmtFunc=None,
    ):
        """
        generally just returns the val_str
            but also provides some special handles
        """
        #=======================================================================
        # defaults
        #=======================================================================
        if impactFmtFunc is None: impactFmtFunc = self.impactFmtFunc
        if val_str is None:
            val_str = self.val_str

        #=======================================================================
        # special keys
        #=======================================================================
        if isinstance(val_str, str):
            if val_str == '*default':
                assert isinstance(self.ead_tot, float)
                val_str = 'total annualized impacts = ' + impactFmtFunc(
                    self.ead_tot)
            elif val_str == '*no':
                val_str = None
            elif val_str.startswith('*'):
                raise Error('unrecognized val_str: %s' % val_str)

        return val_str
예제 #9
0
파일: CanFlood.py 프로젝트: NRCan/CanFlood
    def __init__(self, iface):
        """Constructor.

        :param iface: An interface instance that will be passed to this class
            which provides the hook by which you can manipulate the QGIS
            application at run time.
        :type iface: QgsInterface
        """

        self.iface = iface
        self.dialogs_d = dict()
        #=======================================================================
        # build dialog children
        #=======================================================================
        """todo: only construct these on first pass"""
        for attn, DialogClass in self.dialogPars_d.items():
            try:
                self.dialogs_d[attn] = DialogClass(self.iface, session=self)
            except Exception as e:
                raise Error('failed to load \'%s\' w/ \n    %s' % (attn, e))

        # Check if plugin was started the first time in current QGIS session
        # Must be set in initGui() to survive plugin reloads
        self.first_start = None

        self.pars_dir = os.path.join(os.path.dirname(__file__), '_pars')
        self.icon_dir = os.path.join(os.path.dirname(__file__), 'icons')

        #=======================================================================
        # logger
        #=======================================================================
        self.logger = plugLogger(self, log_nm='CanFlood')
예제 #10
0
파일: basic.py 프로젝트: NRCan/CanFlood
    def set_cf_pars(
        self,  #update the control file w/ the passed parameters
        new_pars_d,  #new paraemeters 
        # {section : ({valnm : value } OR string (for notes)})
        cf_fp=None,
        logger=None,
    ):
        """
        should this be on the MOdel only?
        """
        #=======================================================================
        # defautls
        #=======================================================================
        if logger is None: logger = self.logger
        log = logger.getChild('set_cf_pars')

        #get defaults
        if cf_fp is None: cf_fp = self.cf_fp
        assert isinstance(
            cf_fp, str), '%s got bad cf_fp type: %s' % (self.name, type(cf_fp))
        assert os.path.exists(cf_fp), 'bad cf_fp: \n    %s' % cf_fp

        #initiliae the parser
        pars = configparser.ConfigParser(allow_no_value=True)
        _ = pars.read(cf_fp)  #read it from the new location

        #loop and make updates
        cnt = 0
        for section, val_t in new_pars_d.items():
            assert isinstance(
                val_t,
                tuple), '\"%s\' has bad subtype: %s' % (section, type(val_t))
            assert section in pars, 'requested section \'%s\' not in the pars!' % section

            for subval in val_t:
                #value key pairs
                if isinstance(subval, dict):
                    for valnm, value in subval.items():
                        assert isinstance(value, str), \
                            'failed to get a str on %s.%s: \'%s\''%(section, valnm, type(value))

                        pars.set(section, valnm, value)
                        cnt += 1

                #single values(for notes mostly)
                elif isinstance(subval, str):
                    pars.set(section, subval)
                    cnt += 1

                else:
                    raise Error('unrecognized value type: %s' % type(subval))

        #write the config file
        with open(cf_fp, 'w') as configfile:
            pars.write(configfile)

        log.info('updated control file w/ %i pars at :\n    %s' % (cnt, cf_fp))

        return
예제 #11
0
    def check_dfeat(self):  #standard checking fcommands

        if not self.mind == self.model.mind:
            raise IOError

        #=======================================================================
        # check hierarchy
        #=======================================================================
        if self.parent is None:
            raise IOError

        if not self.hse_o == self.parent.parent:
            raise IOError

        #check acode
        if 'run' in self.session.state:
            hp_oop.check_match(self, self.hse_o, attn='acode')

            if not self.acode == self.parent.get_acode():
                raise Error('acode mismiatch with parent')

        gp = self.parent.parent
        if not self.model == gp.model:
            raise Error(
                '\"%s\' model (%s) \n does not match their grandparents \'%s\' (%s)'
                % (self.name, self.model, gp.name, gp.model))

        if self.hse_o.geo_dxcol is None:
            raise IOError

        if not self.parent.__class__.__name__ == 'Dfunc':
            raise IOError

        #=======================================================================
        # check meta_df
        #=======================================================================

        if not self.dfloc in self.parent.childmeta_df.index.tolist():
            raise Error(
                'could not find my dfloc \'%s\' in my parents \'%s\' index' %
                (self.dfloc, self.parent.name))

        if not self.name in self.parent.childmeta_df.loc[:,
                                                         'name'].values.tolist(
                                                         ):
            raise IOError
예제 #12
0
파일: togrid.py 프로젝트: NRCan/CanFlood
    def gsamp(self, #resample results to a grid (from single asset res layer)
              avlay, #asset results layer
              gvlay=None, #new polygon grid to sample
              gid=None,
              res_fnl = ['ead'], #list of result fields to downsample
              use_raw_fn=True, #whether to convert the summary field names back to raw.
              
              logger=None,
              discard_nomatch=False,
              **jkwargs #joinbylocationsummary kwargs
              ):
        """
        resample results 
        """
        #=======================================================================
        # defaults
        #=======================================================================
        if logger is None: logger=self.logger
        log=logger.getChild('gsamp')
        
        
        if gvlay is None: gvlay = self.gvlay
        if gid is None: gid=self.gid
 
        log.info('downsampling \'%s\' (%i feats) to \'%s\' (%i)'%(
            avlay.name(), avlay.dataProvider().featureCount(), gvlay.name(),
            gvlay.dataProvider().featureCount()))
        
        #=======================================================================
        # prechecks
        #=======================================================================
        fn_l = [f.name() for f in gvlay.fields()]
        assert gid in fn_l, gid
        
        fn_l = [f.name() for f in avlay.fields()]
        s = set(res_fnl).difference(fn_l)
        if not len(s)==0:
            raise Error('\'%s\' missing requested results fields: %s'%(avlay.name(), s))
        
        #check the gids
        gid_d = vlay_get_fdata(gvlay, fieldn=gid, logger=log)
        assert pd.Series(gid_d).is_unique, '%s has bad gid=\'%s\''%(gvlay.name(), gid)
        
        #=======================================================================
        # calc
        #=======================================================================
        gvlay1, nfn_l = self.joinbylocationsummary(gvlay, avlay, res_fnl, use_raw_fn=use_raw_fn,
                                                   discard_nomatch=discard_nomatch,
                                                   **jkwargs)
        
        #=======================================================================
        # wrap
        #=======================================================================
        if not discard_nomatch:
            assert gvlay1.dataProvider().featureCount()==gvlay.dataProvider().featureCount()
        
 
        return gvlay1, nfn_l
예제 #13
0
파일: dialog.py 프로젝트: NRCan/CanFlood
    def run_vuln(self):
        log = self.logger.getChild('run_vuln')
        log.debug('start')
        self.set_setup()  #attach all the commons
        self.feedback.setProgress(5)
        from misc.dikes.vuln import Dvuln
        #=======================================================================
        # collect inputs
        #=======================================================================

        #=======================================================================
        # init
        #=======================================================================
        kwargs = {
            attn: getattr(self, attn)
            for attn in self.inherit_fieldNames
        }
        wrkr = Dvuln(**kwargs)
        self.feedback.setProgress(10)

        #=======================================================================
        # load
        #=======================================================================
        wrkr._setup(dexpo_fp=self.lineEdit_v_dexpo_fp.text(),
                    dcurves_fp=self.lineEdit_v_dcurves_fp.text())
        self.feedback.setProgress(20)
        #==========================================================================
        # execute
        #==========================================================================
        pf_df = wrkr.get_failP()
        self.feedback.setProgress(60)
        #=======================================================================
        # length effect
        #=======================================================================
        #collect method from user
        if self.radioButton_v_lfx_none.isChecked():
            method = None
        elif self.radioButton_v_lfx_urs.isChecked():
            method = 'URS2007'
        else:
            raise Error('unrecognized lfx method')

        if not method is None:
            wrkr.set_lenfx(method=method)  #apply length effects
        self.feedback.setProgress(80)
        #=======================================================================
        # outputs
        #=======================================================================
        pfail_fp = wrkr.output_vdfs()
        self.feedback.setProgress(95)
        #=======================================================================
        # update gui
        #=======================================================================
        self.lineEdit_ja_pfail_fp.setText(pfail_fp)

        log.info('finished Dike Vuln w/ %s' % str(pf_df.shape))
        self.feedback.upd_prog(None)
예제 #14
0
 def get_breach_area(self):
     """
     this would be very tricky to automate....
     
     check:
     SAGA 'fill sinks' for a simple dtm filler
     SAGA 'Lake flood' for filling a dtm up to a specified depth
     
     """
     raise Error('not implemented')
예제 #15
0
    def get_wd(self):
        wd = self.lineEdit_wd.text()

        if wd is None or wd == '':
            raise Error('need to specficy a Working Directory')
        if not os.path.exists(wd):
            os.makedirs(wd)
            self.logger.info('built new working directory at:\n    %s' % wd)

        return wd
예제 #16
0
    def wsl_clean(self, df_raw, tag='?'):
        logger = self.logger.getChild('wsl_clean')
        df = df_raw.copy()
        
        #=======================================================================
        # drop columns
        #=======================================================================
        """not implemented"""
        #any flagged columns
        boolcol = df.columns.astype(str).str.startswith('~')
        
        if np.any(boolcol):
            logger.warning('dropping %i \'%s\' columns with \'~\' flag'%( boolcol.sum(), tag))
            df1 = df.loc[:,~boolcol]
        else:
            df1 = df
            
        #drop any coordinate columns
        df1 = df1.drop(labels=['X','x','Y','y'], axis=1, errors='ignore')
        
        
        #===================================================================
        # headers
        #===================================================================
        #reformat columns
        try:
            df1.columns = df1.columns.astype(int) #reformat the aeps as ints
        except:
            raise Error('failed to recast columns as int: \n %s'%(df1.columns.tolist()))
        
        
        #sort the columns
        df2 = df1.reindex(columns = sorted(df1.columns))

        
        #reformat values
        df2 = df2.astype('float32')
        
        
        #=======================================================================
        # clean the user provided null
        #=======================================================================
        if not self.na_value is None:
            boolar = df2.values == self.na_value
            df2[boolar] = np.nan
            
            logger.warning('for set %i user identified values to null with \'%s\''%
                           (boolar.sum().sum(), self.na_value))
        
        """not working for some reason
        hp_pd.cleaner_report(df, df2)"""
        
        logger.debug('cleaned to %s'%str(df2.shape))
        
        return df2
예제 #17
0
파일: basic.py 프로젝트: NRCan/CanFlood
    def _get_from_cpar(
            self,  #special parameter extraction recognizing object's t ype
            cpars,
            sectName,
            varName,
            logger=None):
        """each parameter should exist on teh class instance.
                we use this to set the type"""

        if logger is None: logger = self.logger
        log = logger.getChild('_get_from_cpar')
        #=======================================================================
        # get native type on class
        #=======================================================================
        assert hasattr(
            self, varName), '\'%s\' does not exist on %s' % (varName, self)

        #get class instance's native type
        ntype = type(getattr(self, varName))

        #==============================================================
        # retrive and typeset  (using native t ype)
        #==============================================================
        assert isinstance(cpars, configparser.ConfigParser)

        csect = cpars[sectName]
        pval_raw = csect[varName]  #raw value (always a string)

        #boolean
        if ntype == bool:
            pval = csect.getboolean(varName)

        #no check or type conversion
        elif getattr(self, varName) is None:
            pval = pval_raw

        #other types
        else:
            try:
                pval = ntype(pval_raw)
            except Exception as e:
                raise Error(
                    'failed to set %s.%s  with input \'%s\' (%s) to %s \n %s' %
                    (sectName, varName, pval_raw, type(pval_raw), ntype, e))

        #=======================================================================
        # blank set
        #=======================================================================
        """seems like we're setup for ''.... not sure the value in switching everything over
        if pval == '':
            pval = np.nan"""

        log.debug('retrieved \'%s.%s\'=\'%s\' w/ type: \'%s\'' %
                  (sectName, varName, pval, type(pval)))
        return pval
예제 #18
0
파일: basic.py 프로젝트: NRCan/CanFlood
    def output_df(
        self,  #dump some outputs
        df,
        out_fn,
        out_dir=None,
        overwrite=None,
        write_index=True,
        logger=None,
    ):
        #======================================================================
        # defaults
        #======================================================================
        if out_dir is None: out_dir = self.out_dir
        if overwrite is None: overwrite = self.overwrite
        if logger is None: logger = self.logger
        log = logger.getChild('output_df')

        #======================================================================
        # prechecks
        #======================================================================
        assert isinstance(
            out_dir, str), 'unexpected type on out_dir: %s' % type(out_dir)
        assert os.path.exists(
            out_dir
        ), 'requested output directory doesnot exist: \n    %s' % out_dir
        assert isinstance(df, pd.DataFrame)
        assert len(df) > 0, 'no data'

        #extension check
        if not out_fn.endswith('.csv'):
            out_fn = out_fn + '.csv'

        #output file path
        out_fp = os.path.join(out_dir, out_fn)

        #======================================================================
        # checeks
        #======================================================================
        if os.path.exists(out_fp):
            log.warning('file exists \n    %s' % out_fp)
            if not overwrite:
                raise Error('file already exists')

        #======================================================================
        # writ eit
        #======================================================================
        df.to_csv(out_fp, index=write_index)

        log.info('wrote to %s to filezzz: \n    %s' % (str(df.shape), out_fp))

        self.out_fp = out_fp  #set for other methods

        return out_fp
예제 #19
0
    def output_fig(
        self,
        fig,

        #file controls
        out_dir=None,
        overwrite=None,
        fname=None,  #filename

        #figure write controls
        fmt='svg',
        transparent=True,
        dpi=150,
        logger=None,
    ):
        #======================================================================
        # defaults
        #======================================================================
        if out_dir is None: out_dir = self.out_dir
        if overwrite is None: overwrite = self.overwrite
        if logger is None: logger = self.logger
        log = logger.getChild('output_fig')

        #=======================================================================
        # precheck
        #=======================================================================

        assert isinstance(fig, self.matplotlib.figure.Figure)
        log.debug('on %s' % fig)
        #======================================================================
        # output
        #======================================================================
        #file setup
        if fname is None:
            try:
                fname = fig._suptitle.get_text()
            except:
                fname = self.name

        out_fp = os.path.join(out_dir, '%s.%s' % (fname, fmt))

        if os.path.exists(out_fp): assert overwrite

        #write the file
        try:
            fig.savefig(out_fp, dpi=dpi, format=fmt, transparent=transparent)
            log.info('saved figure to file:   %s' % out_fp)
        except Exception as e:
            raise Error('failed to write figure to file w/ \n    %s' % e)

        return out_fp
예제 #20
0
파일: CanFlood.py 프로젝트: NRCan/CanFlood
    def initGui(self):  #add UI elements to Qgis
        """
        called on Qgis Load?
        
        """
        log = self.logger.getChild('initGui')
        #=======================================================================
        # configure toolbar
        #=======================================================================
        """Create the menu entries and toolbar icons inside the QGIS GUI."""
        toolbar = self.iface.addToolBar(self.menu_name)  #build a QToolBar
        toolbar.setObjectName(self.menu_name)

        #=======================================================================
        # setup actions
        #=======================================================================
        self.actions_d = {'toolbar': dict(), 'menu': dict()}

        cnt = 0
        for attn, wrkr in self.dialogs_d.items():
            try:
                #build the icon
                icon_fp = os.path.join(self.icon_dir, wrkr.icon_fn)
                assert os.path.exists(icon_fp), 'bad filepath: %s' % icon_fp
                icon = QIcon(icon_fp)

                #assemble the action
                action = QAction(icon, wrkr.icon_name, self.iface.mainWindow())

                action.setObjectName(wrkr.icon_name)
                action.setCheckable(False)
                action.triggered.connect(wrkr.launch)

                #add to the gui
                if wrkr.icon_location == 'toolbar':
                    toolbar.addAction(action)
                elif wrkr.icon_location == 'menu':
                    self.iface.addPluginToMenu(self.menu_name, action)

                self.actions_d[wrkr.icon_location][attn] = action
                cnt += 1

            except Exception as e:
                raise Error('failed to build action for \'%s\' w/ \n    %s' %
                            (attn, e))

        #wrap
        self.toolbar = toolbar
        log.debug('attached %i actions' % cnt)
예제 #21
0
파일: hazus.py 프로젝트: NRCan/CanFlood
    def _agri(
        self,
        df_raw,
        gname,
        crve_d,
        sourceCn='FunctionSource',
        assetCn='Crop',
        logger=None,
    ):

        raise Error(
            'not really sure how these curves work... skipping for now')
        #=======================================================================
        # defaults
        #=======================================================================
        if logger is None: logger = self.logger
        log = logger.getChild(gname)
        """
        view(df_raw)
        """

        #=======================================================================
        # meta update
        #=======================================================================
        crve_d.update({
            'desc': crve_d['desc'] + ' for Agriculture',
            'scale_var': 'building replacement cost',
            'scale_units': 'monetary',
            'impact_units': 'pct',
            'impact_var': 'loss'
        })

        srceLib_d = dict()
        for srce, gdf in df_raw.groupby(sourceCn):

            crve_d['source'] = '%s, table=\'%s\' file=%s' % (srce, gname,
                                                             self.source_str)
            #log.debug('%s w/\n%s'%(srce, gdf['Occupancy'].value_counts()))
            srceLib_d[srce] = dict()

            for crop, cdf in gdf.groupby(assetCn):
                """
                view(cdf)
                """
                print(srce, crop)

        return dict()
예제 #22
0
파일: rsamp.py 프로젝트: cefect/CanFlood
    def write_res(
        self,
        vlay,
        out_dir=None,  #directory for puts
        names_d=None,  #names conversion
        rname_l=None,
    ):

        log = self.logger.getChild('run')
        #======================================================================
        # defaults
        #======================================================================
        if names_d is None: names_d = self.names_d
        if rname_l is None: rname_l = self.rname_l
        if out_dir is None: out_dir = self.out_dir
        res_name = vlay.name()

        #======================================================================
        # prechekss
        #======================================================================
        assert os.path.exists(out_dir), 'bad out_dir'
        #======================================================================
        # write data----------------
        #======================================================================
        #extract data
        df = vlay_get_fdf(vlay)

        #rename
        if len(names_d) > 0:
            df = df.rename(columns=names_d)
            log.info('renaming columns: %s' % names_d)

        #check the raster names
        miss_l = set(rname_l).difference(df.columns.to_list())
        if len(miss_l) > 0:
            raise Error(
                'failed to map %i raster layer names onto results: \n    %s' %
                (len(miss_l), miss_l))

        out_fp = self.output_df(df,
                                '%s.csv' % res_name,
                                out_dir=out_dir,
                                write_index=False)

        self.out_fp = out_fp

        return
예제 #23
0
 def treat_wetnull(self): #apply the wetnull_code algorhitim to the dry
     #=======================================================================
     # defaults
     #=======================================================================
     logger = self.logger.getChild('treat_wetnull')
             
     dfwet = self.wsl_d['wet']
     dfdry = self.wsl_d['dry']
     
     dfwet_raw = dfwet.copy()
     dfdry_raw = dfdry.copy()
     
     #=======================================================================
     # precheck
     #=======================================================================
     if self.db_f:
         if np.any(pd.isnull(dfdry)):
             logger.debug('nulls per column: \n%s'%pd.isnull(dfdry).sum(axis=0))
             logger.debug('nulls per row: \n%s'%pd.isnull(dfdry).sum(axis=1))
             raise Error('got %i null values for dfdry'%pd.isnull(dfdry).sum().sum()) 
     
     
     #=======================================================================
     # take _wet
     #=======================================================================
     if self.wetnull_code == 'take_dry':
         
         #identify location of null values in the dry frame
         boolar = pd.isnull(dfwet.values)
         
         #replace all the null values with the value from dfwet
         dfwet = dfwet.where(~boolar, other=dfdry)
         
         logger.info('set %i values from the wet flood to the dry flood'%boolar.sum())
         
     else: raise IOError
     
     
     #=======================================================================
     # reset into dict
     #=======================================================================
     self.wsl_d['wet'] = dfwet
     self.wsl_d['dry'] = dfdry
     
     return 
예제 #24
0
    def _check_finv(self,
                    logger=None):  #check the finv and some paramter logic
        """
        see also Model.check_finv() for data level checks
        """
        if logger is None: logger = self.logger
        log = logger.getChild('_check_finv')
        #=======================================================================
        # selection checks
        #=======================================================================
        assert not self.cid is None, 'must specify a valid cid'
        assert isinstance(
            self.finv_vlay,
            QgsVectorLayer), 'must select a VectorLayer for the finv'

        #=======================================================================
        # data checks
        #=======================================================================
        #CRS
        assert self.finv_vlay.crs() == self.qproj.crs(
        ), 'finv CRS (%s) does not match projects (%s)' % (
            self.finv_vlay.crs(), self.qproj.crs())

        #cid in the fields
        fields_d = {f.name(): f for f in self.finv_vlay.fields()}
        assert self.cid in fields_d, 'specified cid not found on finv'

        #field type
        assert 'int' in fields_d[self.cid].typeName().lower(), \
        'cid field \'%s\' must be integer type not \'%s\''%(
            self.cid, fields_d[self.cid].typeName())

        #unique values
        cid_ser = hlpr.Q.vlay_get_fdata(self.finv_vlay,
                                        fieldn=self.cid,
                                        fmt='df',
                                        logger=log)
        boolidx = cid_ser.duplicated(keep=False)
        if boolidx.any():
            log.debug('duplicated values \n%s' % cid_ser[boolidx])

            raise Error(
                'passed finv cid=\'%s\' values contain %i duplicates... see logger'
                % (self.cid, boolidx.sum()))
예제 #25
0
파일: testAll.py 프로젝트: NRCan/CanFlood
    def get_tests(
            self,  #generate the test suites
            wFLow_l,
            **kwargs):
        log = self.logger.getChild('get_tests')
        #=======================================================================
        # defaults
        #=======================================================================

        #===========================================================================
        # assemble suite
        #===========================================================================
        suite = unittest.TestSuite()  #start the suite container

        for fWrkr in wFLow_l:

            #===================================================================
            # setup the flow
            #===================================================================
            """tests handle flows AFTER they've run
            lets us run many tests on a completed flow without having to re-run teh flow each time"""
            try:
                runr = self._run_wflow(fWrkr, **kwargs)
            except Exception as e:
                raise Error('failed to execute %s w/ \n    %s' %
                            (fWrkr.__name__, e))

            runr.load_pick()

            #build a test for each mathing method in the class
            for tMethodName in TestLoader().getTestCaseNames(runr.Test):
                """inits TestU
                only 1 test method per TeestU for now"""
                suite.addTest(runr.Test(tMethodName, runr=runr))

            runr.mstore.removeAllMapLayers()

        #wrap
        log.info(
            'constructed test suite from %i flows w/ %i tests in %s\n \n' %
            (len(wFlow_l), suite.countTestCases(),
             datetime.datetime.now() - start))
        return suite
예제 #26
0
파일: basic.py 프로젝트: cefect/CanFlood
 def update_cf(self, #update one parameter  control file 
               new_pars_d, #new paraemeters {section : {valnm : value }}
               cf_fp = None):
     
     log = self.logger.getChild('update_cf')
     
     #get defaults
     if cf_fp is None: cf_fp = self.cf_fp
     
     assert os.path.exists(cf_fp), 'bad cf_fp: %s'%cf_fp
     
     #initiliae the parser
     pars = configparser.ConfigParser(allow_no_value=True)
     _ = pars.read(cf_fp) #read it from the new location
     
     #loop and make updates
     for section, val_t in new_pars_d.items():
         assert isinstance(val_t, tuple), '\"%s\' has bad subtype: %s'%(section, type(val_t))
         assert section in pars, 'requested section \'%s\' not in the pars!'%section
         
         for subval in val_t:
             #value key pairs
             if isinstance(subval, dict):
                 for valnm, value in subval.items():
                     pars.set(section, valnm, value)
                     
             #single values    
             elif isinstance(subval, str):
                 pars.set(section, subval)
                 
             else:
                 raise Error('unrecognized value type: %s'%type(subval))
             
     
     #write the config file 
     with open(cf_fp, 'w') as configfile:
         pars.write(configfile)
         
     log.info('updated contyrol file w/ %i pars at :\n    %s'%(
         len(new_pars_d), cf_fp))
     
     return
예제 #27
0
    def check_binv_data(self, df):
        #=======================================================================
        # defaults
        #=======================================================================
        logger = self.logger.getChild('check_binv_data')
        
        binv_df = self.parent.kids_d['binv'].childmeta_df
        
        #null check
        hp_pd.fancy_null_chk(df, detect='error', dname=self.name, logger=logger)

        

        
        #length check
        if not len(df) == len(binv_df):
            raise Error('my data length (%i) does not match the binv length (%i)'%(len(df), len(binv_df)))
        
        #check for index match
        if not np.all(df.index == binv_df.index):
            raise IOError
예제 #28
0
    def to_curveset(
        self,
        df_raw,
        bsmt_ht=None,  #for combination curves,
        nrpParkAD=215.0,  #default $/m2 for NRP uderground parking

        #metatdata default
        metac_d={
            'desc': 'rfda format converted to CanFlood format',
            'location': 'Calgary and Edmonton, AB',
            'date': 2014,
        },
        logger=None,
    ):
        """
        converting rfda style residential + nrp curves into CanFlood
        
        TODO: add for displacement stlye?
        """

        #=======================================================================
        # defaults
        #=======================================================================
        if logger is None: logger = self.logger
        if bsmt_ht is None: bsmt_ht = self.bsmt_ht

        log = logger.getChild('to_curveset')

        #=======================================================================
        # precheck
        #=======================================================================
        assert isinstance(df_raw, pd.DataFrame)
        assert isinstance(bsmt_ht, float)

        log.debug('on %s' % str(df_raw.shape))

        #=======================================================================
        # update the defaults
        #=======================================================================
        crve_d = self.crve_d.copy()  #start with a copy
        for k, v in {
                **{
                    'source':
                    'CanFlood.%s_%s_%s' % (mod_name, self.tag,
                                           datetime.datetime.today().strftime('%Y%m%d')),
                    #'bsmt_ht':bsmt_ht,
                },
                **metac_d
        }.items():
            crve_d[k] = v
        #==============================================================================
        # load
        #==============================================================================

        #drop the counts
        df = df_raw.drop(0, axis=0)

        #set the index
        df = df.set_index(0)
        df.index.name = 'cname'

        #get the curve name prefix
        df['cnp'] = df.index.str.slice(start=0, stop=2)

        #set the dcount columns
        df = df.rename(columns={1: 'dcount'})

        #re-order the columns
        boolcol = df.columns.isin(['dcount', 'cnp'])
        df = df.loc[:, ~boolcol].join(df.loc[:, boolcol])

        #==============================================================================
        # convert residential tags
        #==============================================================================
        #identify the residentials
        rboolidx = df.loc[:, 24].isin(['MC', 'MS', 'BC', 'BS'])

        #build new index
        df['nindex'] = df.loc[rboolidx, 'cnp'] + '_' + df.loc[rboolidx, 24]

        df.loc[~rboolidx, 'nindex'] = df[~rboolidx].index
        df['oindex'] = df.index
        df = df.set_index('nindex', drop=True)

        #ctype = df.loc[boolidx,24].to_dict() #get all the types

        #==============================================================================
        # create individuals--------------------
        #==============================================================================
        res_d = dict()  #container for CanFlood function tabs
        dd_set_d = dict()  #container for all the depth damages
        dd_set_d2 = dict()

        boolar = df.columns.isin(['dcount', 'cnp', 'oindex'])

        for cname, row in df.iterrows():

            #==========================================================================
            # set meta info
            #==========================================================================
            dcurve_d = crve_d.copy()
            dcurve_d['tag'] = cname

            #==========================================================================
            # depth damage info
            #==========================================================================
            #get just depth damage
            dd_ser = row[~boolar].dropna()

            #identify depths (evens)
            bool_dep = dd_ser.index.values % 2 == 0

            #identiy damages
            bool_dmg = np.invert(bool_dep)

            #bundle depth:damage
            dd_d = dict(
                zip(dd_ser[bool_dep].tolist(), dd_ser[bool_dmg].tolist()))

            #check for validty
            if max(dd_d.values()) == 0:
                print('%s has no damages! skipping' % cname)

            #add it in
            res_d[cname] = {**dcurve_d, **dd_d}
            dd_set_d[cname] = dd_d  #used below  B+M
            dd_set_d2[cname] = dd_d  #used below S+C
            print('added %s' % dcurve_d['tag'])

        #==============================================================================
        # create combined basement+mf----------------
        #==============================================================================
        #slice to this
        boolidx = df.loc[:, 24].isin(['MC', 'MS', 'BC', 'BS'])

        assert boolidx.any(
        ), 'unable to find expected curve type keys in column 24'

        df_res = df.loc[boolidx, :].dropna(axis=1, how='all')

        df_res = df_res.rename(columns={24: 'ctype'})

        cnp_l = df_res.loc[:, 'cnp'].unique().tolist()

        #loop and collect
        res_bm_C_d = dict()  #collect just these
        res_bm_S_d = dict()  #collect just these

        for cnp in cnp_l:
            #loop on structural and contents
            for ctype in ('S', 'C'):
                #get this
                boolidx1 = np.logical_and(
                    df_res['cnp'] == cnp,  #this class
                    df_res['ctype'].str.contains(ctype),  #this ctype
                )

                #check it
                if not boolidx1.sum() == 2:
                    raise IOError('unexpected count')

                #======================================================================
                # #collect by floor
                #======================================================================
                fdd_d = dict()
                for floor in ('M', 'B'):

                    boolidx2 = np.logical_and(
                        boolidx1, df_res['ctype'].str.contains(floor))

                    if not boolidx2.sum() == 1:
                        raise IOError('unexpected count')

                    #get this dict
                    cname = df_res.index[boolidx2][0]
                    fdd_d[floor] = dd_set_d.pop(cname)

                #======================================================================
                # adjust basement
                #======================================================================
                #add bsmt_ht to all the basement

                res_serf = pd.Series(fdd_d['B'])

                if bsmt_ht > max(res_serf.index):
                    raise IOError(
                        'requested basement height %.2f out of range' %
                        bsmt_ht)

                res_serf.index = res_serf.index - bsmt_ht
                res_serf.index = res_serf.index.values.round(2)

                #get max value
                dmgmax = max(res_serf)

                #drop all positives (basement cant have posiitive depths)
                res_ser = res_serf[res_serf.index <= 0].sort_index(
                    ascending=True)

                #set highest value to max
                res_ser.loc[0] = dmgmax

                #======================================================================
                # assemble
                #======================================================================
                mf_ser = pd.Series(fdd_d['M']) + dmgmax

                res_ser = res_ser.append(
                    mf_ser, ignore_index=False).sort_index(ascending=True)

                #only take positive values
                res_ser = res_ser[res_ser > 0]
                #======================================================================
                # set meta
                #======================================================================
                tag = '%s_%s' % (cnp, ctype)

                dcurve_d = crve_d.copy()
                dcurve_d['tag'] = tag
                dcurve_d[
                    'desc'] = ' %s \nrfda converted and combined w/ bsmt_ht = %.2f, M+B ' % (
                        dcurve_d['desc'], bsmt_ht)

                #add it in
                res_d[tag] = {**dcurve_d, **res_ser.to_dict()}

                if ctype == 'S':
                    res_bm_S_d[cnp] = res_ser
                elif ctype == 'C':
                    res_bm_C_d[cnp] = res_ser
                else:
                    raise Error('bad ctype')

                print('added %s' % tag)

        #======================================================================
        # create combined mf+bsmt+S+C----------
        #======================================================================
        for cnp, Sser in res_bm_S_d.items():
            Cser = res_bm_C_d[cnp]

            assert np.array_equal(Sser.index, Cser.index), 'index mismatch'
            assert not cnp in res_d, 'tag already taken'

            #==================================================================
            # cpombine
            #==================================================================
            res_ser = Cser + Sser

            dcurve_d = crve_d.copy()
            dcurve_d['tag'] = cnp
            dcurve_d[
                'desc'] = ' %s \nrfda converted and combined w/ bsmt_ht = %.2f, BC+BS+MC+MS' % (
                    dcurve_d['desc'], bsmt_ht)

            res_d[cnp] = {**dcurve_d, **res_ser.to_dict()}

        #======================================================================
        # combine Contes + Struc
        #======================================================================
        for cnp in cnp_l:
            for floor in ('B', 'M'):
                dd_C_ser = dd_set_d2['%s_%sC' % (cnp, floor)]
                dd_S_ser = dd_set_d2['%s_%sS' % (cnp, floor)]

                res_ser = pd.Series(dd_C_ser) + pd.Series(dd_S_ser)

                tag = '%s_%s' % (cnp, floor)

                assert not tag in res_d

                dcurve_d = crve_d.copy()
                dcurve_d['tag'] = tag
                dcurve_d[
                    'desc'] = ' %s \nrfda converted and combined w/ bsmt_ht = %.2f, C+S' % (
                        dcurve_d['desc'], bsmt_ht)

                res_d[tag] = {**dcurve_d, **res_ser.to_dict()}

        #=======================================================================
        # NRP underground parking------------
        #=======================================================================
        tag = 'nrpUgPark'

        dcurve_d = crve_d.copy()
        dcurve_d['tag'] = tag
        dcurve_d['desc'] = ' %s \nrfda underground parking fixed %.2f $/m2' % (
            dcurve_d['desc'], nrpParkAD)

        res_d[tag] = {
            **dcurve_d,
            **{
                0: nrpParkAD,
                10: nrpParkAD,
            }
        }

        #==============================================================================
        # convert and check
        #==============================================================================
        df_d = dict()
        for cname, d in res_d.items():
            self.check_crvd(d)
            df_d[cname] = pd.Series(d).to_frame()

        #======================================================================
        # wrap
        #======================================================================
        log.info('finished w/ %i' % len(df_d))
        return df_d
예제 #29
0
파일: hazus.py 프로젝트: NRCan/CanFlood
    def output_set(
            self,  #output a collection of librarries
            lib_d,
            logger=None,
            out_dir=None,
            plot=True,  #whether to also plot
    ):
        #=======================================================================
        # defaults
        #=======================================================================
        if logger is None: logger = self.logger
        log = logger.getChild('output_set')
        if out_dir is None: out_dir = self.out_dir

        #=======================================================================
        # loop and write
        #=======================================================================
        meta_d = dict()
        for srce_raw, l1_d in lib_d.copy().items():
            srce = srce_raw.replace('.', '').replace(' ', '')
            od1 = os.path.join(out_dir, 'HAZUS_%s' % srce)
            if not os.path.exists(od1): os.makedirs(od1)
            meta_d[srce] = dict()

            for gn1_raw, l2_d in l1_d.items():
                gn1 = gn1_raw[2:]
                meta_d[srce][gn1] = dict()
                #check subtypes
                for k, df in l2_d.items():
                    if not isinstance(df, pd.DataFrame):
                        raise Error('bad type on %s' % gn1)

                #setup the filename
                ofn = '%s_%s_%s' % (self.libName, srce, gn1)
                ofn = ofn.replace(' ', '')
                ofn = ofn.replace('.', '')

                #do the write
                meta_d[srce][gn1]['xls_fp'] = self.output(l2_d,
                                                          ofn=ofn + '.xls',
                                                          out_dir=od1)

                #===========================================================================
                # plots
                #===========================================================================
                if plot:

                    fig = self.plotAll(
                        l2_d,
                        title='%s_%s_%s' % (self.libName, srce, gn1),
                        lib_as_df=True,
                        xlim=(0, 100),
                    )

                    meta_d[srce][gn1]['fig_fp'] = self.output_fig(fig,
                                                                  out_dir=od1,
                                                                  fname=ofn)

        log.info('wrote %i sets' % (len(meta_d)))

        return meta_d