Пример #1
0
    def get_dikes_vlay(
        self,  #promote the index on the dikes_vlay
        df=None,
        vlay=None,  #geometry to use for writing
        logger=None,
    ):

        #=======================================================================
        # defautls
        #=======================================================================

        if logger is None: logger = self.logger
        log = logger.getChild('get_dikes_vlay')

        if df is None: df = self.expo_df.copy()
        if vlay is None: vlay = self.dike_vlay

        #=======================================================================
        # update the dikes layer
        #=======================================================================
        geo_d = vlay_get_fdata(vlay, geo_obj=True, logger=log, rekey=self.sid)

        #add the index as a column so it gets into the layer
        df.index.name = None
        df[self.sid] = df.index

        return self.vlay_new_df2(df,
                                 geo_d=geo_d,
                                 logger=log,
                                 layname=vlay.name())
Пример #2
0
    def gsamp(self, #resample results to a grid (from single asset res layer)
              avlay, #asset results layer
              gvlay=None, #new polygon grid to sample
              gid=None,
              res_fnl = ['ead'], #list of result fields to downsample
              use_raw_fn=True, #whether to convert the summary field names back to raw.
              
              logger=None,
              discard_nomatch=False,
              **jkwargs #joinbylocationsummary kwargs
              ):
        """
        resample results 
        """
        #=======================================================================
        # defaults
        #=======================================================================
        if logger is None: logger=self.logger
        log=logger.getChild('gsamp')
        
        
        if gvlay is None: gvlay = self.gvlay
        if gid is None: gid=self.gid
 
        log.info('downsampling \'%s\' (%i feats) to \'%s\' (%i)'%(
            avlay.name(), avlay.dataProvider().featureCount(), gvlay.name(),
            gvlay.dataProvider().featureCount()))
        
        #=======================================================================
        # prechecks
        #=======================================================================
        fn_l = [f.name() for f in gvlay.fields()]
        assert gid in fn_l, gid
        
        fn_l = [f.name() for f in avlay.fields()]
        s = set(res_fnl).difference(fn_l)
        if not len(s)==0:
            raise Error('\'%s\' missing requested results fields: %s'%(avlay.name(), s))
        
        #check the gids
        gid_d = vlay_get_fdata(gvlay, fieldn=gid, logger=log)
        assert pd.Series(gid_d).is_unique, '%s has bad gid=\'%s\''%(gvlay.name(), gid)
        
        #=======================================================================
        # calc
        #=======================================================================
        gvlay1, nfn_l = self.joinbylocationsummary(gvlay, avlay, res_fnl, use_raw_fn=use_raw_fn,
                                                   discard_nomatch=discard_nomatch,
                                                   **jkwargs)
        
        #=======================================================================
        # wrap
        #=======================================================================
        if not discard_nomatch:
            assert gvlay1.dataProvider().featureCount()==gvlay.dataProvider().featureCount()
        
 
        return gvlay1, nfn_l
Пример #3
0
    def run(self,#join tabular results back to the finv
              vlay_raw, #finv vlay (to join results to)
              df_raw=None,
              cid=None, #linking column/field name

            #data cleaning
              relabel = 'ari', #how to relable event fields using the ttl values
                #None: no relabling
                #aep: use aep values (this is typically teh form already)
                #ari: convert to ari values
              keep_fnl = 'all', #list of field names to keep from the vlay (or 'all' to keep all)

              layname = None,

              ): 
        """
        todo: clean this up and switch over to joinattributestable algo
 
        """
        #=======================================================================
        # defaults
        #=======================================================================

        log = self.logger.getChild('djoin')
        if cid is None: cid = self.cid
        if layname is None: layname=self.resname
        if layname is None: layname = 'djoin_%s_%s'%(self.tag, vlay_raw.name())
        if df_raw is None: df_raw=self.data_d[self.fp_attn]

        #=======================================================================
        # get data
        #=======================================================================
        lkp_df = self._prep_table(df_raw, relabel, log=log)
        vlay_df = self._prep_vlay(vlay_raw, keep_fnl, log=log)

        
        #=======================================================================
        # join data
        #=======================================================================
        res_df = self.fancy_join(vlay_df, lkp_df, logger=log)

        #=======================================================================
        # generate hte new layer--------   
        #=======================================================================
        geo_d = vlay_get_fdata(vlay_raw, geo_obj=True, logger=log)

        res_vlay = self.vlay_new_df2(res_df, geo_d=geo_d, crs = vlay_raw.crs(),
                                    layname=layname, logger=log)

        
        log.info('finished on \'%s\''%res_vlay.name())
        
        return res_vlay
Пример #4
0
    def vectorize(
        self,  #map results back onto the finv geometry 
        res_df_raw,
        layName=None,
    ):

        log = self.logger.getChild('vectorize')
        if layName is None: layName = 'exlikes_%s' % self.tag
        res_df = res_df_raw.copy()
        #======================================================================
        # extract data from finv
        #======================================================================
        vlay = self.fc_vlay

        #get geometry
        geo_d = vlay_get_fdata(vlay, geo_obj=True)

        #get key conversion
        fid_cid_d = vlay_get_fdata(vlay, fieldn=self.cid, logger=log)

        #convert geo
        cid_geo_d = {fid_cid_d[k]: v for k, v in geo_d.items()}

        #======================================================================
        # build the layer
        #======================================================================
        assert res_df.index.name == self.cid, 'bad index on res_df'

        res_df[self.cid] = res_df.index  #copy it over

        res_vlay = vlay_new_df(res_df,
                               vlay.crs(),
                               geo_d=cid_geo_d,
                               layname=layName,
                               logger=log)

        return res_vlay
Пример #5
0
    def to_finv(
            self,  #converting rfda inventoryies
            rinv_vlay,
            drop_colns=['ogc_fid', 'fid'],  #optional columns to drop from df
            bsmt_ht=None):

        if bsmt_ht is None:
            bsmt_ht = self.bsmt_ht

        log = self.logger.getChild('to_finv')

        cid = self.cid

        assert isinstance(rinv_vlay, QgsVectorLayer)
        assert isinstance(bsmt_ht, float)

        dp = rinv_vlay.dataProvider()
        assert dp.featureCount() > 0, 'no features'

        log.info('on %s w/ %i feats' % (rinv_vlay.name(), dp.featureCount()))

        #======================================================================
        # get df
        #======================================================================
        df_raw = vlay_get_fdf(rinv_vlay, logger=log)
        df = df_raw.drop(drop_colns, axis=1, errors='ignore')

        assert len(
            df.columns) >= 26, 'expects at least 26 columns. got %i' % len(
                df.columns)

        log.info('loaded w/ %s' % str(df.shape))

        #convert conersion from postiion to loaded labels
        d = self.legacy_ind_d
        lab_d = {df.columns[k]: v for k, v in d.items()}

        back_lab_d = dict(zip(lab_d.values(), lab_d.keys()))

        #relabel to standardize
        df = df.rename(columns=lab_d)

        log.info('converted columns:\n    %s' % lab_d)

        #id non-res
        nboolidx = df['struct_type'].str.startswith('S')

        log.info('%i (of %i) flagged as NRP' % (nboolidx.sum(), len(nboolidx)))
        """
        view(df)
        """
        #======================================================================
        # build f0-----------
        #======================================================================
        res_df = pd.DataFrame(index=df.index)
        res_df[self.cid] = df['id1'].astype(int)

        res_df.loc[:, 'f0_elv'] = df['gel'] + df['ff_height']
        res_df.loc[:, 'f0_elv'] = res_df['f0_elv'].round(self.prec)
        res_df['f0_scale'] = df['area'].round(self.prec)
        res_df['f0_cap'] = np.nan

        #resi
        res_df['f0_tag'] = df['class'] + df[
            'struct_type']  #suffix addded for main/basement below

        #NRP (overwriting any NRPs)
        res_df.loc[nboolidx, 'f0_tag'] = df.loc[nboolidx, 'class']

        #======================================================================
        # check
        #======================================================================
        if not res_df[cid].is_unique:
            boolidx = res_df[cid].duplicated(keep=False)

            raise Error(
                'invalid indexer. %s column has %i (of %i) duplicated values: \n%s'
                % (back_lab_d['id1'], boolidx.sum(), len(boolidx),
                   res_df.loc[boolidx, cid].sort_values()))

        assert not res_df['f0_tag'].isna().any(
        ), 'got some nulls on %s and %s' % (back_lab_d['class'],
                                            back_lab_d['struct_type'])

        assert not res_df['f0_elv'].isna().any(
        ), 'got some nulls on %s and %s' % (back_lab_d['gel'],
                                            back_lab_d['ff_height'])

        assert not res_df['f0_scale'].isna().any(
        ), 'got some nulls on %s' % back_lab_d['area']

        #======================================================================
        # build f1 (basemments or structural)-----------
        #======================================================================
        #convert/cleean basements
        boolidx = df['bsmt_f'].replace(truefalse_d).astype(bool)

        log.info('adding nested curves for %i (of %i) basements' %
                 (boolidx.sum(), len(boolidx)))

        #resi basements
        res_df.loc[boolidx, 'f1_tag'] = res_df.loc[boolidx, 'f0_tag'] + '_B'
        res_df.loc[boolidx, 'f1_scale'] = res_df.loc[boolidx, 'f0_scale']
        res_df.loc[boolidx, 'f1_elv'] = res_df.loc[boolidx, 'f0_elv'] - bsmt_ht
        res_df['f1_cap'] = np.nan

        #re-tag main floor
        res_df.loc[:, 'f0_tag'] = res_df['f0_tag'] + '_M'

        #NRP
        res_df.loc[nboolidx, 'f0_tag'] = df.loc[nboolidx, 'class']  #re-tag
        res_df.loc[nboolidx, 'f1_tag'] = df.loc[nboolidx, 'struct_type']
        res_df.loc[nboolidx, 'f1_elv'] = res_df.loc[nboolidx, 'f0_elv']
        res_df.loc[nboolidx, 'f1_scale'] = res_df.loc[nboolidx, 'f0_scale']

        #=======================================================================
        # nrp basements-----------
        #=======================================================================
        NBboolidx = np.logical_and(
            nboolidx,  #NRPs 
            boolidx,  #basement=Y
        )
        if NBboolidx.any():
            log.info('buildling %i (of %i) NRP basments' %
                     (NBboolidx.sum(), len(boolidx)))

            nb = NBboolidx
            res_df.loc[nb, 'f2_tag'] = 'nrpUgPark'
            res_df.loc[nb, 'f2_scale'] = res_df.loc[nb, 'f0_scale']
            res_df.loc[nb, 'f2_elv'] = res_df.loc[nb, 'f0_elv'] - bsmt_ht
            res_df.loc[nb, 'f2_cap'] = np.nan

        #======================================================================
        # add in everything else
        #======================================================================
        res_df = res_df.join(df)
        """
        view(res_df)
        """

        #======================================================================
        # generate vlay
        #======================================================================
        geo_d = vlay_get_fdata(rinv_vlay, geo_obj=True, logger=log)
        finv_vlay = vlay_new_df(res_df,
                                rinv_vlay.crs(),
                                geo_d=geo_d,
                                logger=log,
                                layname='%s_finv' % rinv_vlay.name())

        fcnt = finv_vlay.dataProvider().featureCount()
        assert fcnt == dp.featureCount()

        log.info('finished w/ \'%s\' w/ %i feats' % (finv_vlay.name(), fcnt))

        return finv_vlay
Пример #6
0
    def join_pfails(
            self,  #join the pfail data onto the ifz polys
            eifz_d=None,  #influence poilygions {eTag: poly Vlay}
            pf_df=None,
            pf_min=0.0,  #threshold below which to ignore
    ):

        #=======================================================================
        # defaults
        #=======================================================================
        log = self.logger.getChild('jp')
        if eifz_d is None: eifz_d = self.eifz_d
        if pf_df is None: pf_df = self.pfail_df

        log.info('on %i events w/ pfail %s' % (len(eifz_d), str(pf_df.shape)))
        #=======================================================================
        # precheck
        #=======================================================================
        miss_l = set(self.etag_l).difference(pf_df.columns)
        assert len(miss_l) == 0, 'event mismatch: %s' % miss_l

        #check library keys
        miss_l = set(self.etag_l).difference(eifz_d)
        assert len(miss_l) == 0, 'event mismatch: %s' % miss_l

        #=======================================================================
        # for eTag, ed in eifz_d.items():
        #     miss_l = set(['ifz_vlay']).difference(ed.keys())
        #     assert len(miss_l)==0, '%s keys mismatch: %s'%(eTag, miss_l)
        #=======================================================================

        #=======================================================================
        # loop on events----
        #=======================================================================
        res_d = dict()
        for eTag, vlay_raw in eifz_d.items():
            log = self.logger.getChild('jp.%s' % eTag)
            #===================================================================
            # pull the data----
            #===================================================================
            """
            ed.keys()
            """
            #===================================================================
            # #vlay
            #===================================================================
            #vlay_raw = ed['ifz_vlay']
            self._check_ifz(vlay_raw)
            vdf = vlay_get_fdf(vlay_raw, logger=log)
            geoR_d = vlay_get_fdata(vlay_raw,
                                    geo_obj=True,
                                    logger=log,
                                    rekey=self.ifidN)

            log.debug("on \'%s\' w/ %i feats" % (vlay_raw.name(), len(geoR_d)))
            #===================================================================
            # #keys
            #===================================================================
            """get from dike sinstead
            sid_ifz_d = ed['sid_ifz_d']"""
            sid_ifz_d = pf_df[self.ifidN].to_dict()

            #check keysr
            miss_l = set(sid_ifz_d.values()).symmetric_difference(
                vdf[self.ifidN])
            assert len(miss_l) == 0, '%s got key mismatch: %s' % (eTag, miss_l)

            #===================================================================
            # pfail
            #===================================================================
            #clean to just our event
            l = set(self.etag_l).difference([eTag])  #other event data
            idf = pf_df.drop(l, axis=1).rename(columns={eTag: self.pfn})

            idf.loc[:, self.pfn] = idf[self.pfn].round(
                self.prec)  #force rouind (again)
            """
            view(idf)
            """
            #idf = idf.join(pd.Series(sid_ifz_d, name=self.ifidN))
            idf['eTag'] = eTag  #nice to have this on there

            #apply threshold
            boolidx = idf[self.pfn] <= pf_min

            if boolidx.all():
                log.warning('all (of %i) %s below threshold... skipping' %
                            (len(boolidx), self.pfn))
                continue
            elif boolidx.any():
                log.info('got %i (of %i) %s below threshold (%.2f)' %
                         (boolidx.sum(), len(boolidx), self.pfn, pf_min))
                idf = idf.loc[~boolidx, :]

            #make sure all these keys are there
            miss_l = set(idf.index.values).difference(sid_ifz_d.keys())
            assert len(miss_l) == 0, 'missing keys in sid_ifz_d: %s' % miss_l

            #clean out keys
            sid_ifz_d2 = {
                k: v
                for k, v in sid_ifz_d.items() if k in idf.index.values
            }
            assert len(sid_ifz_d2) == len(
                idf), 'failed to get the expected matches'
            #===================================================================
            # build new layer-----
            #===================================================================
            """keying everything by sid... one feature per segment"""

            #duplicate the geometry on our keys
            geo_d = {
                sk: QgsGeometry(geoR_d[ik])
                for sk, ik in sid_ifz_d2.items()
            }

            res_d[eTag] = self.vlay_new_df2(idf,
                                            geo_d=geo_d,
                                            logger=log,
                                            index=True,
                                            layname='%s_%s_ifz' %
                                            (self.tag, eTag))

            log.debug('df %s' % str(idf.shape))

        #=======================================================================
        # wrap
        #=======================================================================
        log = self.logger.getChild('jp')
        log.info('finished building %i layers' % len(res_d))

        self.ipf_vlay_d = res_d
        return self.ipf_vlay_d
Пример #7
0
    def to_finv(
        self,  #clean a raw vlay an add some finv colums
        in_vlay,
        drop_colns=['ogc_fid', 'fid'],  #optional columns to drop from df
        new_data={},
        newLayname=None,
        logger=None,
    ):
        #=======================================================================
        # defaults
        #=======================================================================
        if logger is None: logger = self.logger
        log = logger.getChild('to_finv')
        if newLayname is None: newLayname = 'finv_%s' % in_vlay.name()

        #=======================================================================
        # precheck
        #=======================================================================
        assert isinstance(in_vlay, QgsVectorLayer)

        dp = in_vlay.dataProvider()

        log.info('on %s w/ %i feats and %i new colums' %
                 (in_vlay.name(), dp.featureCount(), len(new_data)))

        self.feedback.upd_prog(20)
        #=======================================================================
        # extract data
        #=======================================================================
        df_raw = vlay_get_fdf(in_vlay, logger=log)
        geo_d = vlay_get_fdata(in_vlay, geo_obj=True, logger=log)

        self.feedback.upd_prog(50)

        #=======================================================================
        # clean
        #=======================================================================
        #drop specified columns
        df0 = df_raw.drop(drop_colns, axis=1, errors='ignore')

        #convert empty strings to null
        df1 = df0.replace(to_replace='', value=np.nan)
        log.info('replaced %i (of %i) null values' %
                 (df1.isna().sum().sum(), df1.size))

        #drop empty fields
        df2 = df1.dropna(axis=1, how='all')
        log.info('dropped %i empty columns' %
                 (len(df1.columns) - len(df2.columns)))

        self.feedback.upd_prog(60)

        #=======================================================================
        # add fields
        #=======================================================================
        #build the new data
        log.info('adding field data:\n    %s' % new_data)

        #join the two
        res_df = df2.join(pd.DataFrame(index=df_raw.index, data=new_data))

        self.feedback.upd_prog(70)

        #=======================================================================
        # chekc data
        #=======================================================================
        """" no? not for this intermediate function?
        self.check_finv()
        
        """
        #=======================================================================
        # reconstruct layer
        #=======================================================================
        finv_vlay = self.vlay_new_df2(res_df,
                                      geo_d=geo_d,
                                      crs=in_vlay.crs(),
                                      logger=log,
                                      layname=newLayname)

        #=======================================================================
        # wrap
        #=======================================================================
        fcnt = finv_vlay.dataProvider().featureCount()
        assert fcnt == dp.featureCount()

        log.info('finished w/ \'%s\' w/ %i feats' % (finv_vlay.name(), fcnt))

        self.feedback.upd_prog(99)
        return finv_vlay
Пример #8
0
    def prep_dike(
            self,  #do some pre-calcs on teh dike layer
            vlay_raw,
            dikeID=None,  #dike identifier field
            segID=None,  #segment identifier field
            cbfn=None,  #crest buffer fieldname
            logger=None):
        """
        not sure it makes sense to have this separate from get_dike_expo anymroe
        """

        #=======================================================================
        # defaults
        #=======================================================================
        if logger is None: logger = self.logger
        log = logger.getChild('load_dikes')

        if dikeID is None: dikeID = self.dikeID
        if segID is None: segID = self.segID
        if cbfn is None: cbfn = self.cbfn

        mstore = QgsMapLayerStore()  #build a new map store

        #=======================================================================
        # precheck
        #=======================================================================
        fnl = [f.name() for f in vlay_raw.fields()]
        #jcolns = [self.sid, 'f0_dtag', self.cbfn, self.segln]
        miss_l = set([dikeID, segID, 'f0_dtag', cbfn,
                      self.ifidN]).difference(fnl)
        assert len(
            miss_l) == 0, 'missing expected columns on dike layer: %s' % miss_l
        assert not 'length' in [
            s.lower() for s in fnl
        ], '\'length\' field not allowed on dikes layer'
        """try forcing
        assert 'int' in df[segID].dtype.name, 'bad dtype on dike layer %s'%segID
        assert 'int' in df[dikeID].dtype.name, 'bad dtype on dike layer %s'%dikeID"""

        #geometry
        assert 'Line' in QgsWkbTypes().displayString(
            vlay_raw.wkbType()), 'bad vector type on dike'

        #=======================================================================
        # add geometry data
        #=======================================================================
        d = {'CALC_METHOD': 0, 'INPUT': vlay_raw, 'OUTPUT': 'TEMPORARY_OUTPUT'}

        vlay = processing.run('qgis:exportaddgeometrycolumns',
                              d,
                              feedback=self.feedback)['OUTPUT']
        mstore.addMapLayer(vlay)
        """
        view(vlay)
        """
        #rename the vield
        vlay = vlay_rename_fields(vlay, {'length': self.segln})
        mstore.addMapLayer(vlay)
        #=======================================================================
        # pull data
        #=======================================================================
        df = vlay_get_fdf(vlay, logger=log)

        #=======================================================================
        # build global segment ids
        #=======================================================================
        #type forcing
        for coln in [dikeID, segID]:
            try:
                df[coln] = df[coln].astype(int)
            except Exception as e:
                raise Error('failed to type set dike column \'%s\' w/ \n%s' %
                            (coln, e))

        s1 = df[dikeID].astype(str).str.pad(width=3, side='left', fillchar='0')
        s2 = df[segID].astype(str).str.pad(width=2, side='left', fillchar='0')

        df[self.sid] = s1.str.cat(others=s2).astype(int)

        assert df[
            self.
            sid].is_unique, 'failed to get unique global segment ids... check your dikeID and segID'
        # bundle back into vectorlayer
        geo_d = vlay_get_fdata(vlay, geo_obj=True, logger=log)
        res_vlay = self.vlay_new_df2(df,
                                     geo_d=geo_d,
                                     logger=log,
                                     layname='%s_dike_%s' %
                                     (self.tag, vlay_raw.name()))

        #=======================================================================
        # wrap
        #=======================================================================
        dp = res_vlay.dataProvider()
        log.info('loaded dike layer \'%s\'  w/ %i segments' %
                 (vlay.name(), dp.featureCount()))

        self.dike_vlay = res_vlay
        self.dike_df = df
        """attching this again in case th user passes new values"""
        self.dikeID, self.segID, self.cbfn = dikeID, segID, cbfn  #done during init
        self.sid_vals = df[self.sid].unique().tolist()
        mstore.removeAllMapLayers()

        return self.dike_vlay
Пример #9
0
    def get_dike_expo(
        self,  #get exposure set for dikes
        noFailr_d,
        dike_vlay=None,
        dtm_rlay=None,

        #dike layer parameters
        sid=None,

        #cross profile (transect) parameters
        simp_dike=0,  #value to simplify dike cl by
        dist_dike=40,  #distance along dike to draw perpindicular profiles
        dist_trans=100,  #length (from dike cl) of transect 
        tside='Left',  #side of dike line to draw transect
        dens_int=None,  #density of sample points along transect 
        nullSamp=None,  #value for bad samples
        write_tr=False,  #wheter to output the unsampled transect layer
        calc_dist=True,  #whether to calculate distance between transects

        #wsl sampling
        #wsl_stat = 'Max', #for transect wsl zvals, stat to use for summary
        logger=None,
    ):

        #=======================================================================
        # defaults
        #=======================================================================
        if logger is None: logger = self.logger
        log = logger.getChild('get_dike_expo')
        if dike_vlay is None: dike_vlay = self.dike_vlay
        if dtm_rlay is None: dtm_rlay = self.dtm_rlay
        if sid is None: sid = self.sid
        if nullSamp is None: nullSamp = self.nullSamp
        mstore = QgsMapLayerStore()  #build a new map store
        #=======================================================================
        # prechecks
        #=======================================================================
        assert isinstance(dike_vlay, QgsVectorLayer)
        assert sid in [f.name() for f in dike_vlay.fields()], \
            'failed to get sid \'%s\'on dikes vlay fields'%(sid)

        #crs
        for layer in [dike_vlay, dtm_rlay]:
            assert layer.crs().authid() == self.qproj.crs().authid(), \
                '\'%s\' crs (%s) does not match projects: %s'%(
                    layer.name(), layer.crs().authid(), self.qproj.crs().authid())

        #tside
        tside_d = {'Left': 0, 'Right': 1, 'Both': 2}
        assert tside in tside_d, 'bad tside: \'%s\'' % tside
        assert not tside == 'Both', 'Both not supported'

        #=======================================================================
        # crossProfiles---
        #=======================================================================
        #=======================================================================
        # simplify
        #=======================================================================
        """because transects draws at each vertex, we wanna reduce the number.
        each vertex will still be on the original line
        
        NO! better to use the raw alignment... .
        even a small simplification can move the sampling off the DTM's dike crest"""
        if simp_dike > 0:
            d = {
                'INPUT': dike_vlay,
                'METHOD': 0,
                'OUTPUT': 'TEMPORARY_OUTPUT',
                'TOLERANCE': simp_dike
            }
            algo_nm = 'native:simplifygeometries'
            dvlay = processing.run(algo_nm, d,
                                   feedback=self.feedback)['OUTPUT']
            mstore.addMapLayer(dvlay)
        else:
            dvlay = dike_vlay
        #=======================================================================
        # densify
        #=======================================================================
        #make sure we get at least the number of transects requested
        d = {
            'INPUT': dvlay,
            'INTERVAL': dist_dike,
            'OUTPUT': 'TEMPORARY_OUTPUT'
        }

        algo_nm = 'native:densifygeometriesgivenaninterval'
        dvlay = processing.run(algo_nm, d, feedback=self.feedback)['OUTPUT']
        dvlay.setName('%s_prepd' % dike_vlay.name())
        #mstore.addMapLayer(dvlay) #need to keep this alive for the intersect calc below
        """
        self.vlay_write(dvlay)
        """
        #=======================================================================
        # transects
        #=======================================================================
        """draws perpindicular lines at vertex.
        keeps all the fields and adds some new ones"""
        d = {
            'ANGLE': 90,
            'INPUT': dvlay,
            'LENGTH': dist_trans,
            'OUTPUT': 'TEMPORARY_OUTPUT',
            'SIDE': tside_d[tside]
        }

        algo_nm = "native:transect"
        tr_vlay = processing.run(algo_nm, d, feedback=self.feedback)['OUTPUT']
        mstore.addMapLayer(tr_vlay)

        #see if indexer is unique
        tr_fid = 'TR_ID'
        ifn_d = vlay_get_fdata(tr_vlay, fieldn=tr_fid, logger=log)
        assert len(set(ifn_d.values())) == len(ifn_d)

        #=======================================================================
        # #clean it up
        #=======================================================================
        #remove unwanted fields
        """
        the raw transects have a 'fid' based on the dike fid (which is now non-unique)
        TR_ID is a new feature id (for the transects)
        """
        tr_colns = [sid, self.dikeID, self.segID, tr_fid, 'TR_SEGMENT']
        tr_vlay = self.deletecolumn(tr_vlay, tr_colns, logger=log, invert=True)

        #tr_vlay  = vlay_rename_fields(tr_vlay, {tr_fid:'fid_tr'})

        #=======================================================================
        # calc distance----
        #=======================================================================
        if calc_dist:
            """
            optional to join in distance along dike field for transects
            
            view(tr_vpts_vlay)
            view(tr_vlay)
            """

            #===================================================================
            # #pull out the verticies
            #===================================================================
            d = {'INPUT': dvlay, 'OUTPUT': 'TEMPORARY_OUTPUT'}

            algo_nm = 'native:extractvertices'
            tr_vpts_vlay = processing.run(algo_nm, d,
                                          feedback=self.feedback)['OUTPUT']
            mstore.addMapLayer(tr_vpts_vlay)

            #===================================================================
            # #join in values
            #===================================================================
            """
            linking up ID fields between vertex points (which have the distances) and the transects
            
            vpts: vertex_part_index
            tr: TR_SEGMENT

            """
            tr_df = vlay_get_fdf(tr_vlay, logger=log)
            vpts_df = vlay_get_fdf(tr_vpts_vlay, logger=log).drop(
                ['vertex_part', 'vertex_part_index', 'angle', 'fid'], axis=1)

            #add shifted index
            vpts_df['TR_SEGMENT'] = vpts_df['vertex_index'] + 1

            vpts_df.loc[:, 'distance'] = vpts_df['distance'].round(self.prec)

            #check
            assert len(vpts_df) == len(tr_df)

            #loop in 'sid' blocks
            """TR_SEGMENT is indexed per-segment"""
            indxr = 'TR_SEGMENT'
            tr_dfR = None
            log.info(
                'joining vertex data (%s) to transect data (%s) in %i \'%s\' blocks'
                % (str(vpts_df.shape), str(
                    tr_df.shape), len(tr_df[sid].unique()), sid))

            for sidVal, tr_sdf in tr_df.copy().groupby(sid):
                vpts_sdf = vpts_df.groupby(sid).get_group(sidVal).loc[:, (
                    'distance', indxr)]
                df = tr_sdf.join(vpts_sdf.set_index(indxr), on=indxr)

                #append results
                if tr_dfR is None:
                    tr_dfR = df
                else:
                    tr_dfR = tr_dfR.append(df)

            #===================================================================
            # clean
            #===================================================================
            tr_dfR = tr_dfR.rename(columns={'distance': self.sdistn})
            tr_colns.append(self.sdistn)
            #===================================================================
            # check
            #===================================================================
            log.debug('finished w/ %s' % str(tr_dfR.shape))
            assert len(tr_df) == len(tr_dfR)

            #check index
            tr_dfR = tr_dfR.sort_index(axis=0)
            assert np.array_equal(tr_dfR.index, tr_df.index)

            #===================================================================
            # #recreate layer
            #===================================================================
            mstore.addMapLayer(tr_vlay)  #add in old layer
            geo_d = vlay_get_fdata(tr_vlay, geo_obj=True, logger=log)
            tr_vlay = self.vlay_new_df2(tr_dfR,
                                        geo_d=geo_d,
                                        logger=log,
                                        layname='%s_%s_tr_dist' %
                                        (self.tag, dike_vlay.name()))

            log.debug('finished joining in distances')
            """
            self.vlay_write(tr_vlay)
            """

        #=======================================================================
        # densify
        #=======================================================================
        #add additional veritifies to improve the resolution of the wsl sample
        if dens_int is None: desn_int = min(dist_dike, dist_trans / 2)
        d = {
            'INPUT': tr_vlay,
            'INTERVAL': desn_int,
            'OUTPUT': 'TEMPORARY_OUTPUT'
        }
        algo_nm = 'native:densifygeometriesgivenaninterval'
        tr_vlay = processing.run(algo_nm, d, feedback=self.feedback)['OUTPUT']
        mstore.addMapLayer(tr_vlay)

        tr_vlay = self.fixgeometries(tr_vlay, logger=log)
        mstore.addMapLayer(tr_vlay)
        #=======================================================================
        # #clean it up
        #=======================================================================
        #remove unwanted fields
        """
        the raw transects have a 'fid' based on the dike fid (which is now non-unique)
        TR_ID is a new feature id (for the transects)
        """
        tr_vlay = self.deletecolumn(tr_vlay, tr_colns, logger=log, invert=True)
        mstore.addMapLayer(tr_vlay)

        tr_vlay.setName('%s_%s_transects' % (self.tag, dike_vlay.name()))

        log.info('got %i transects' % tr_vlay.dataProvider().featureCount())
        #=======================================================================
        # crest el----
        #=======================================================================
        #===================================================================
        # point on dike crest
        #===================================================================
        """gets a point for the vertex at the START of the line.
        should work fine for right/left.. but not for 'BOTH'
        """
        """this is cleaner for handling transects on either side... 
        but can result in MULTIPLE intersects for some geometries
        
        d = { 'INPUT' : tr_vlay, 'INPUT_FIELDS' : [], 'INTERSECT' : dvlay, 
             'INTERSECT_FIELDS' : ['fid'], 'INTERSECT_FIELDS_PREFIX' : 'dike_',
              'OUTPUT' : 'TEMPORARY_OUTPUT' }

        algo_nm = 'native:lineintersections'"""

        #get the head/tail point of the transect
        d = {
            'INPUT': tr_vlay,
            'OUTPUT': 'TEMPORARY_OUTPUT',
            'VERTICES': {
                'Left': '0',
                'Right': '-1'
            }[tside]
        }

        algo_nm = 'qgis:extractspecificvertices'
        cPts_vlay = processing.run(algo_nm, d,
                                   feedback=self.feedback)['OUTPUT']
        mstore.addMapLayer(cPts_vlay)
        """
        view(cPts_vlay)
        """
        #count check
        assert tr_vlay.dataProvider().featureCount() == cPts_vlay.dataProvider(
        ).featureCount()

        #===================================================================
        # crest sample
        #===================================================================
        assert cPts_vlay.crs().authid() == dtm_rlay.crs().authid(
        ), 'CRS mismatch!'
        d = {
            'COLUMN_PREFIX': 'dtm',
            'INPUT': cPts_vlay,
            'OUTPUT': 'TEMPORARY_OUTPUT',
            'RASTERCOPY': dtm_rlay
        }
        algo_nm = 'qgis:rastersampling'
        cPts_vlay = processing.run(algo_nm, d,
                                   feedback=self.feedback)['OUTPUT']
        mstore.addMapLayer(cPts_vlay)

        #=======================================================================
        # clean up
        #=======================================================================
        cPts_vlay = vlay_rename_fields(cPts_vlay, {'dtm1': self.celn})

        tr_colns.append(self.celn)
        cPts_vlay = self.deletecolumn(cPts_vlay,
                                      tr_colns,
                                      logger=log,
                                      invert=True,
                                      layname='%s_cPts' % (tr_vlay.name()))
        #mstore.addMapLayer(cPts_vlay)

        #=======================================================================
        # join back
        #=======================================================================
        """easier to keep all the data on the transects
        
        self.vlay_write(cPts_vlay)
        view(tr_vlay)
        """

        d = {
            'DISCARD_NONMATCHING': False,
            'FIELD': tr_fid,
            'FIELDS_TO_COPY': [self.celn],
            'FIELD_2': tr_fid,
            'INPUT': tr_vlay,
            'INPUT_2': cPts_vlay,
            'METHOD': 1,
            'OUTPUT': 'TEMPORARY_OUTPUT',
            'PREFIX': ''
        }
        algo_nm = 'native:joinattributestable'
        tr_vlay = processing.run(algo_nm, d, feedback=self.feedback)['OUTPUT']
        #=======================================================================
        # output
        #=======================================================================
        tr_vlay.setName('%s_%s_transects' % (self.tag, dike_vlay.name()))
        if write_tr:
            self.vlay_write(tr_vlay, logger=log)
        self.tr_vlay = tr_vlay  #set for loading by the dialog
        mstore.removeAllMapLayers()  #clear the store

        log.info('joined crest elevations')
        #=======================================================================
        # get wsls----
        #=======================================================================
        res_d = dict()
        dxcol = None

        #comColns = [self.sdistn, self.celn, self.segID, self.dikeID] #common columns

        geo_d = vlay_get_fdata(cPts_vlay,
                               rekey=tr_fid,
                               geo_obj=True,
                               logger=log)

        log.info('building %i cross profile sets' % len(noFailr_d))
        for eTag, wsl_rlay in noFailr_d.items():

            #===================================================================
            # drape---
            #===================================================================
            d = {
                'BAND': 1,
                'INPUT': tr_vlay,
                'NODATA': nullSamp,
                'OUTPUT': 'TEMPORARY_OUTPUT',
                'RASTER': wsl_rlay,
                'SCALE': 1,
            }
            algo_nm = 'native:setzfromraster'
            tri_vlay = processing.run(algo_nm, d,
                                      feedback=self.feedback)['OUTPUT']
            mstore.addMapLayer(tri_vlay)
            #===================================================================
            # extract z
            #===================================================================
            ofnl = [f.name() for f in tri_vlay.fields()]
            """because of the nullvalue handling... we should only be looking for a maximum here"""
            d = {
                'COLUMN_PREFIX': 'z_',
                'INPUT': tri_vlay,
                'OUTPUT': 'TEMPORARY_OUTPUT',
                'SUMMARIES': [stat_pars_d['Maximum']],
            }
            algo_nm = 'native:extractzvalues'
            tri_vlay = processing.run(algo_nm, d,
                                      feedback=self.feedback)['OUTPUT']
            mstore.addMapLayer(tri_vlay)

            #get new fieldName
            wslField = list(
                set([f.name() for f in tri_vlay.fields()]).difference(ofnl))[0]

            #===================================================================
            # collect----
            #===================================================================
            """
            view(tri_vlay)
            """
            df = vlay_get_fdf(tri_vlay,
                              logger=log).rename(columns={wslField: self.wsln})

            #clear out bad samples
            boolidx = df[self.wsln] == nullSamp
            df.loc[boolidx, self.wsln] = np.nan
            log.debug('\'%s\' droped %i (of %i) bad wsl samples' %
                      (eTag, boolidx.sum(), len(boolidx)))

            #calc freeboard
            df[self.fbn] = df[self.celn] - df[self.wsln]

            df.loc[:, (self.fbn, self.celn,
                       self.wsln)] = df.loc[:, (self.fbn, self.celn,
                                                self.wsln)].round(self.prec)

            #===================================================================
            # #re-assemble layer
            #===================================================================
            #building from points (should be keyed by 'TR_ID')
            res_d[eTag] = self.vlay_new_df2(df,
                                            geo_d=geo_d,
                                            logger=log,
                                            gkey=tr_fid,
                                            layname='%s_%s_expo' %
                                            (dike_vlay.name(), eTag))

            mstore.removeAllMapLayers()  #clear the store

            #===================================================================
            # add to master data
            #===================================================================
            dxi = pd.concat([df.loc[:, (self.fbn, self.wsln)].T],
                            keys=[eTag],
                            names=['eTag']).T

            if dxcol is None:
                #add a dummy level
                dxcol = dxi
            else:
                dxcol = dxcol.join(dxi)

        #=======================================================================
        # clean up data
        #=======================================================================
        #join back in common columns
        """pulling from last loaded transect
        
        see get_fb_smry() for additional fields that we add to the summary results
            (used by the vuln model)
        """
        boolcol = df.columns.isin(dxcol.columns.levels[1])
        dxcol = dxcol.join(
            pd.concat([df.loc[:, ~boolcol].T], keys=['common'],
                      names=['eTag']).T)

        #typeset
        for coln, dtype in df.dtypes.to_dict().items():
            dxcol.loc[:, idx[:, coln]] = dxcol.loc[:, idx[:,
                                                          coln]].astype(dtype)

        #=======================================================================
        # wrap----
        #=======================================================================
        log.info('finished building exposure on %i events' % len(res_d))
        self.expo_vlay_d = res_d
        self.expo_dxcol = dxcol

        return self.expo_dxcol, self.expo_vlay_d