コード例 #1
0
def tri_grid_point_cloud(input_path, variable, cell_size, output_path):
    cell_size = float(cell_size)
    if variable:
        variable = 'z'

    df = None
    if input_path.lower().endswith('.las'):
        las_asc = input_path + ".asc"
        os.system('las2txt.exe "%s" "%s"' % (input_path, las_asc))
        df = pd_load_asc_grid(las_asc)
        os.remove(las_asc)
    elif input_path.lower().endswith('.asc'):
        df = pd_load_asc_grid(input_path)
    else:
        df = pd_load_dataframe(input_path)

    vs = VoxelSpace(df, [cell_size, cell_size])

    vs.calculate_mean(variable)

    if output_path.lower().endswith('.00t'):
        output_grid = output_path + '.00g'
        vs.save_vulcan_grid(output_grid)
        os.system('trigrid "%s" "%s"' % (output_grid, output_path))
        os.remove(output_grid)
    elif output_path.lower().endswith('.00g'):
        vs.save_vulcan_grid(output_path)
    else:
        pd_save_dataframe(
            pd.DataFrame(vs.dump_xyz(), columns=['x', 'y', variable]),
            output_path)
コード例 #2
0
def main(input_points, mode, cell_size, convert_to_triangles, output, display):
    df = pd_load_dataframe(input_points)
    mesh = vtk_df_to_mesh(df)
    if not cell_size:
        cell_size = 10
    mesh = mesh.elevation()

    if mode == 'outline':
        grid = mesh.outline(True)
    elif mode == 'delaunay_2d':
        grid = mesh.delaunay_2d()
    elif mode == 'delaunay_3d':
        grid = mesh.delaunay_3d()
    elif mode == 'grid':
        grid = grid_points_2d(mesh, float(cell_size))
        if int(convert_to_triangles):
            grid = grid.delaunay_2d()
    else:
        grid = grid_points_rbf(mesh, float(cell_size), mode)

    if re.search(r'vt.$', output, re.IGNORECASE):
        pv_save(grid, output)
    elif output:
        df = vtk_mesh_to_df(grid)
        pd_save_dataframe(df, output)

    if int(display):
        vtk_plot_meshes([mesh, grid])
コード例 #3
0
def db_join_support(target_db, target_hid, target_from, target_to, source_db,
                    source_hid, source_from, source_to, variables, output):
    v_lut = [{}, {}]
    v_lut[0]['hid'] = target_hid or 'hid'
    v_lut[1]['hid'] = source_hid or 'hid'
    v_lut[0]['from'] = target_from or 'from'
    v_lut[1]['from'] = source_from or 'from'
    v_lut[0]['to'] = target_to or 'to'
    v_lut[1]['to'] = source_to or 'to'

    dfs = [pd_load_dataframe(target_db), pd_load_dataframe(source_db)]

    dfs[0]['tmp_target_from'] = dfs[0][v_lut[0]['from']]
    odf = pd_join_interval(dfs, v_lut)
    odf.reset_index(drop=1, inplace=True)
    # pd_join_interval modifies the input array which is bad behavior
    # but datasets may be huge so its best to just cleanup after
    dfs[0].reset_index(drop=1, inplace=True)

    variables = commalist().parse(variables)

    ttf = 'tmp_target_from'
    vl_a = [[ttf], [v_lut[0]['hid']]] + [[_[0] + '=' + _[0], _[1]]
                                         for _ in variables]

    odf = pd_breakdown(odf, vl_a)
    odf = pd.merge(dfs[0], odf, 'outer', [v_lut[0]['hid'], ttf])
    odf.drop(ttf, 1, inplace=True)

    if output:
        #odf.reset_index(drop=True, inplace=True)
        pd_save_dataframe(odf, output)
    else:
        print(odf.to_string())
コード例 #4
0
ファイル: db_append.py プロジェクト: pemn/db_append
def db_append(input_path, output_path):
    odf = None
    for i_path in input_path.split(';'):
        print(i_path)
        idf = pd_load_dataframe(i_path)
        idf['filename'] = os.path.basename(i_path)
        if odf is None:
            odf = idf
        else:
            odf = odf.append(idf)

    pd_save_dataframe(odf, output_path)
コード例 #5
0
def db_create_from_to(input_path, v_hid, v_depth, output, keep_null_values):

    df = pd_load_dataframe(input_path)

    pd_create_from_to(df, v_hid, v_depth, True)

    if not int(keep_null_values):
        df.fillna(-99, inplace=True)

    if output:
        pd_save_dataframe(df, output)
    else:
        print(df.to_string(index=False))
コード例 #6
0
def db_custom_20210513(input_path, support, output_path):
    las = None
    print(input_path)
    if input_path.lower().endswith('las'):
        # the default 4000 bytes are not enough, -1 = full file
        f, c = lasio.open_file(input_path, autodetect_encoding_chars=-1)
        las = lasio.read(f)
        df = las.df()
        df.reset_index(inplace=True)
    else:
        df = pd_load_dataframe(input_path)

    #print("criando suporte")
    #   10 = 1000
    #    1 = 100
    #  0.1 = 10
    # 0.01 = 1
    df['DEPT_RL'] = df.eval("DEPT * 100 // (%s / 0.01)" % support,
                            engine='python')

    #print("breakdown usando suporte")
    vl = [['DEPT_RL'], ['DEPT=DEPT', 'max']]
    vpre = {'DEPT_RL', 'DEPT'}
    for v in ['furo', 'holeid', 'hid', 'nomrev']:
        for c in [str.lower, str.upper, str.capitalize]:
            if c(v) in df:
                vl.insert(0, [c(v) + '=' + c(v)])
                vpre.add(c(v))
                break
    for v in df.columns:
        if v not in vpre:
            vl.append([v + '=' + v, 'mean'])
    #print(vl)
    df = pd_breakdown(df, vl)
    #print(df)
    #python bm_breakdown.py %work_xlsx% "" "filename;DEPT_RL;DEPT=DEPT,max;CADE=CADE,mean;BIT GRC1=BIT GRC1,mean;DD3L=DD3L,mean;DD3B=DD3B,mean;DENL=DENL,mean;DENB=DENB,mean;GRDE=GRDE,mean;CCO1=CCO1,mean;CO1C=CO1C,mean;DD3G=DD3G,mean;GC1G=GC1G,mean;DD3C=DD3C,mean;GTMP=GTMP,mean;GRDO=GRDO,mean;DNBO=DNBO,mean;DNLO=DNLO,mean;CCLF=CCLF,mean;VAL_EXP CADMED=VAL_EXP CADMED,mean;CODREV=CODREV,mean;DIAM=DIAM,mean;WLEV=WLEV,mean" 0 %work_xlsx%

    #print("removendo colunas temporarias")
    df.reset_index(inplace=True)
    df.drop('DEPT_RL', 1, inplace=True)

    if las is None or not output_path.lower().endswith('las'):
        pd_save_dataframe(df, output_path)
    else:
        las_set_data(las, df)
        las.write(output_path)
コード例 #7
0
def db_join_interval(left_db, left_hid, left_from, left_to, right_db,
                     right_hid, right_from, right_to, output):
    v_lut = [{}, {}]
    v_lut[0]['hid'] = left_hid or 'DHID'
    v_lut[1]['hid'] = right_hid or 'DHID'
    v_lut[0]['from'] = left_from
    v_lut[1]['from'] = right_from
    v_lut[0]['to'] = left_to
    v_lut[1]['to'] = right_to

    dfs = [pd_load_dataframe(left_db), pd_load_dataframe(right_db)]

    odf = pd_join_interval(dfs, v_lut)

    if output:
        odf.reset_index(drop=True, inplace=True)
        pd_save_dataframe(odf, output)
    else:
        print(odf.to_string())
コード例 #8
0
ファイル: db_join.py プロジェクト: pemn/db_join_support
def db_join(input_path, condition, primary_key, output_path, lookup_mode):
    if len(primary_key) == 0:
        primary_key = None
    elif "," in primary_key:
        primary_key = primary_key.split(',')

    header = dict()
    tables = []
    for i_path in input_path.split(';'):
        idf = pd_load_dataframe(i_path, condition)
        for v in idf.columns:
            if v not in header:
                header[v] = len(header)
        tables.append(idf)

    odf = tables[0]

    if (int(lookup_mode)):
        for i in range(1, len(tables)):
            for j in odf.index:
                if np.isnan(odf.loc[j, primary_key]):
                    continue
                flag = None
                for k in tables[i].index:
                    flag = k
                    if odf.loc[j, primary_key] < tables[i].loc[k, primary_key]:
                        break
                if flag is not None:
                    for cn in tables[i].columns:
                        if cn != primary_key:
                            odf.loc[j, cn] = tables[i].loc[flag, cn]
    else:
        for i in range(1, len(tables)):
            # {left, right, outer, inner, cross}, default inner
            odf = pd.merge(odf, tables[i], 'outer', primary_key)

    pd_save_dataframe(odf, output_path)
コード例 #9
0
ファイル: bm_breakdown.py プロジェクト: pemn/db_join_support
def main(*args):
    pd_save_dataframe(bm_breakdown(args[0], args[1], args[2], args[3]),
                      args[4])
コード例 #10
0
def pd_save_gdal(df, output_path, layer_attribute='layer', driver_name=None):
    if driver_name is None:
        driver_name = detect_ogr_driver(output_path)

    if driver_name not in gdal_formats:
        if layer_attribute and layer_attribute != 'layer':
            df['layer'] = df[layer_attribute]
        return pd_save_dataframe(df, output_path)

    try:
        from osgeo import ogr
    except:
        return pd_save_dataframe(df, output_path)
    print("save using ogr driver", driver_name)

    import osgeo.osr as osr

    # use OGR specific exceptions
    ogr.UseExceptions()

    # Create the output
    dvr = ogr.GetDriverByName(driver_name)
    ods = dvr.CreateDataSource(output_path)
    poly = None
    lyr = ods.CreateLayer('')
    if lyr.TestCapability('CreateField'):
        if lyr.GetLayerDefn().GetFieldIndex('Layer') == -1:
            lyr.CreateField(ogr.FieldDefn('Layer', ogr.OFTString))
        for f in df.columns:
            if len(f) > 1:
                t = ogr.OFTString
                if df[f].dtype != np.object:
                    t = ogr.OFTReal
                lyr.CreateField(ogr.FieldDefn(f, t))
    # start from the bottom of the dataframe to simplify polygon creation
    for row in df.index[::-1]:
        l = None
        if layer_attribute in df:
            l = df.loc[row, layer_attribute]
        if not l or (isinstance(l, float) and np.isnan(l)):
            l = os.path.splitext(os.path.basename(output_path))[0]

        n, x, y, z = df.loc[row, ['n', 'x', 'y', 'z']].astype(np.float)
        if poly is None:
            ptype = ''
            if 'type' in df:
                ptype = str.upper(df.loc[row, 'type'])
            print(ptype)
            if ptype.find('POINT') >= 0:
                poly = ogr.Geometry(ogr.wkbPointZM)
            elif ptype == 'LINEARRING' or ptype.find('POLY') >= 0:
                poly = ogr.Geometry(ogr.wkbLinearRing)
            else:
                poly = ogr.Geometry(ogr.wkbLineStringZM)

        poly.SetPoint(int(n), x, y, z)

        if n == 0.0:
            feature = ogr.Feature(lyr.GetLayerDefn())
            ffDefn = feature.GetDefnRef()
            for i in range(ffDefn.GetFieldCount()):
                f = ffDefn.GetFieldDefn(i).GetName()
                if f in df:
                    feature.SetField(f, str(df.loc[row, f]))
                elif f.lower() in df:
                    feature.SetField(f, df.loc[row, f.lower()])
            feature.SetField('Layer', l)
            feature.SetGeometry(poly)
            lyr.CreateFeature(feature)
            poly = None
コード例 #11
0
def gis_convert_epsg(input_path, x, y, z, convert_clock_to_decimal,
                     convert_lookup, srs_column, srs_lookup,
                     custom_systems_enable, custom_systems_zip, srs_input,
                     srs_output, output_path):
    print("# gis_convert_epsg")
    if len(x) == 0:
        x = 'x'
    if len(y) == 0:
        y = 'y'
    if len(z) == 0:
        z = 'z'

    df = pd_load_dataframe(input_path)

    if int(convert_clock_to_decimal):
        for row in df.index:
            for col in [x, y]:
                vc = df.loc[row, col]
                vd = clock_to_decimal(vc)
                print(row, vc, "=>", vd)
                df.loc[row, col] = vd

    if int(convert_lookup):
        df_lookup = pd_load_dataframe(srs_lookup)
        df_lookup.set_index(df_lookup.columns[0], inplace=True)
        for raw_srs in df[srs_column].unique():
            srs_input_row = None
            # check this row has a specific srs which is on the lookup table
            if raw_srs in df_lookup.index:
                srs_input_row = df_lookup.at[raw_srs, df_lookup.columns[0]]
            else:
                srs_input_row = sanitize_srs(raw_srs)
                if srs_input_row is None and srs_input:
                    # rows that do not have a valid srs, default to the global epsg
                    srs_input_row = srs_input
            print(raw_srs, srs_input_row)
            if srs_input_row is not None:
                df.update(
                    gis_project_df(df.loc[df[srs_column] == raw_srs].copy(),
                                   srs_input_row, srs_output, x, y, z))

    else:
        # global conversion
        if int(custom_systems_enable):
            from zipfile import ZipFile
            zf = ZipFile(custom_systems_zip)
            for f in (srs_input, srs_output):
                if f in zf.namelist():
                    print(f)
                    zf.extract(f)
            zf.close()

        df = gis_project_df(df, srs_input, srs_output, x, y, z)

    if output_path:
        pd_save_dataframe(df, output_path)
        if output_path.lower().endswith('shp'):
            gis_create_prj(output_path, srs_output)
    else:
        print(df)

    print("# gis_convert_epsg finished")