コード例 #1
0
def db_join_support(target_db, target_hid, target_from, target_to, source_db,
                    source_hid, source_from, source_to, variables, output):
    v_lut = [{}, {}]
    v_lut[0]['hid'] = target_hid or 'hid'
    v_lut[1]['hid'] = source_hid or 'hid'
    v_lut[0]['from'] = target_from or 'from'
    v_lut[1]['from'] = source_from or 'from'
    v_lut[0]['to'] = target_to or 'to'
    v_lut[1]['to'] = source_to or 'to'

    dfs = [pd_load_dataframe(target_db), pd_load_dataframe(source_db)]

    dfs[0]['tmp_target_from'] = dfs[0][v_lut[0]['from']]
    odf = pd_join_interval(dfs, v_lut)
    odf.reset_index(drop=1, inplace=True)
    # pd_join_interval modifies the input array which is bad behavior
    # but datasets may be huge so its best to just cleanup after
    dfs[0].reset_index(drop=1, inplace=True)

    variables = commalist().parse(variables)

    ttf = 'tmp_target_from'
    vl_a = [[ttf], [v_lut[0]['hid']]] + [[_[0] + '=' + _[0], _[1]]
                                         for _ in variables]

    odf = pd_breakdown(odf, vl_a)
    odf = pd.merge(dfs[0], odf, 'outer', [v_lut[0]['hid'], ttf])
    odf.drop(ttf, 1, inplace=True)

    if output:
        #odf.reset_index(drop=True, inplace=True)
        pd_save_dataframe(odf, output)
    else:
        print(odf.to_string())
コード例 #2
0
def tri_grid_point_cloud(input_path, variable, cell_size, output_path):
    cell_size = float(cell_size)
    if variable:
        variable = 'z'

    df = None
    if input_path.lower().endswith('.las'):
        las_asc = input_path + ".asc"
        os.system('las2txt.exe "%s" "%s"' % (input_path, las_asc))
        df = pd_load_asc_grid(las_asc)
        os.remove(las_asc)
    elif input_path.lower().endswith('.asc'):
        df = pd_load_asc_grid(input_path)
    else:
        df = pd_load_dataframe(input_path)

    vs = VoxelSpace(df, [cell_size, cell_size])

    vs.calculate_mean(variable)

    if output_path.lower().endswith('.00t'):
        output_grid = output_path + '.00g'
        vs.save_vulcan_grid(output_grid)
        os.system('trigrid "%s" "%s"' % (output_grid, output_path))
        os.remove(output_grid)
    elif output_path.lower().endswith('.00g'):
        vs.save_vulcan_grid(output_path)
    else:
        pd_save_dataframe(
            pd.DataFrame(vs.dump_xyz(), columns=['x', 'y', variable]),
            output_path)
コード例 #3
0
def main(input_points, mode, cell_size, convert_to_triangles, output, display):
    df = pd_load_dataframe(input_points)
    mesh = vtk_df_to_mesh(df)
    if not cell_size:
        cell_size = 10
    mesh = mesh.elevation()

    if mode == 'outline':
        grid = mesh.outline(True)
    elif mode == 'delaunay_2d':
        grid = mesh.delaunay_2d()
    elif mode == 'delaunay_3d':
        grid = mesh.delaunay_3d()
    elif mode == 'grid':
        grid = grid_points_2d(mesh, float(cell_size))
        if int(convert_to_triangles):
            grid = grid.delaunay_2d()
    else:
        grid = grid_points_rbf(mesh, float(cell_size), mode)

    if re.search(r'vt.$', output, re.IGNORECASE):
        pv_save(grid, output)
    elif output:
        df = vtk_mesh_to_df(grid)
        pd_save_dataframe(df, output)

    if int(display):
        vtk_plot_meshes([mesh, grid])
コード例 #4
0
def db_downhole_chart(input_path, condition, v_hid, v_from, v_to, v_lito,
                      variables, scd, output_path, output_window, page_charts):
    lito_rgb = VulcanScd(scd, 'DRILL_COLOUR', v_lito)
    if int(page_charts) < 1:
        page_charts = 1
    else:
        page_charts = int(page_charts)

    variables = list(filter(len, variables.split(';')))

    idf = pd_load_dataframe(input_path, condition, None,
                            [v_hid, v_from, v_to, v_lito] + variables)

    idf.set_index(v_hid, inplace=True)

    hid_count = len(idf.index.unique())

    page_cols = min(page_charts, hid_count)

    pdf = PdfPages(output_path)
    pagec = 0

    # 11.69,8.27
    # print(plt.rcParams["figure.figsize"])
    fig = None
    lito_legend = [
        mpatches.Patch(color=lito_rgb[_], label=_)
        for _ in idf[v_lito].unique()
    ]
    v_minmax = [(idf[_].min(), idf[_].max()) for _ in [v_to] + variables]
    for hid in idf.index.unique():
        if pagec % page_cols == 0:
            if pagec:
                pdf.savefig()
            if pagec < hid_count:
                fig, gs = plt.subplots(len(variables) + 1,
                                       page_cols,
                                       sharey=True,
                                       figsize=np.multiply(
                                           plt.rcParams["figure.figsize"], 2))
                fig.legend(handles=lito_legend,
                           labels=[_.get_label() for _ in lito_legend])

        print(hid)
        plot_downhole(idf.loc[hid], hid, gs, pagec % page_cols, v_from, v_to,
                      v_lito, variables, lito_rgb, v_minmax)

        pagec += 1

    if pagec % page_cols == 0:
        pdf.savefig()

    pdf.close()

    if (int(output_window)):
        plt.show(True)
コード例 #5
0
def db_join_interval(left_db, left_hid, left_from, left_to, right_db,
                     right_hid, right_from, right_to, output):
    v_lut = [{}, {}]
    v_lut[0]['hid'] = left_hid or 'DHID'
    v_lut[1]['hid'] = right_hid or 'DHID'
    v_lut[0]['from'] = left_from
    v_lut[1]['from'] = right_from
    v_lut[0]['to'] = left_to
    v_lut[1]['to'] = right_to

    dfs = [pd_load_dataframe(left_db), pd_load_dataframe(right_db)]

    odf = pd_join_interval(dfs, v_lut)

    if output:
        odf.reset_index(drop=True, inplace=True)
        pd_save_dataframe(odf, output)
    else:
        print(odf.to_string())
コード例 #6
0
ファイル: db_append.py プロジェクト: pemn/db_append
def db_append(input_path, output_path):
    odf = None
    for i_path in input_path.split(';'):
        print(i_path)
        idf = pd_load_dataframe(i_path)
        idf['filename'] = os.path.basename(i_path)
        if odf is None:
            odf = idf
        else:
            odf = odf.append(idf)

    pd_save_dataframe(odf, output_path)
コード例 #7
0
ファイル: bm_breakdown.py プロジェクト: pemn/db_join_support
def bm_breakdown(input_path, condition, vl_s, keep_null=False):
    ''' 
  File entry point for the breakdown process
  Input: path to input file, condition string, variable list string
  Output: dataframe with result
  '''
    vl_a = commalist().parse(vl_s)
    print("# bm_breakdown", input_path, file=sys.stderr)
    if input_path.lower().endswith('.isis'):
        idf = pd_load_dataframe(input_path, condition,
                                table_field(vl_a[0][0], True), None, keep_null)
        vl_a = table_field(vl_a)
    else:
        vl_s = set()
        for row in vl_a:
            # extract all unique variables from the breakdown mask
            # skip the operation column
            vl_s.update(
                [row[j].split(_LABEL)[0] for j in range(len(row)) if j != 1])

        idf = pd_load_dataframe(input_path, condition, None, vl_s, keep_null)
    return pd_breakdown(idf, vl_a)
コード例 #8
0
def db_create_from_to(input_path, v_hid, v_depth, output, keep_null_values):

    df = pd_load_dataframe(input_path)

    pd_create_from_to(df, v_hid, v_depth, True)

    if not int(keep_null_values):
        df.fillna(-99, inplace=True)

    if output:
        pd_save_dataframe(df, output)
    else:
        print(df.to_string(index=False))
コード例 #9
0
def pd_load_gdal(input_path, table_name=None, driver_name=None):
    if driver_name is None:
        driver_name = detect_ogr_driver(input_path)
    if driver_name not in gdal_formats:
        return pd_load_dataframe(input_path, '', table_name)

    # import OGR
    try:
        from osgeo import ogr
    except:
        return pd_load_dataframe(input_path, '', table_name)
    print("load using ogr driver", driver_name)
    import osgeo.osr as osr
    dvr = ogr.GetDriverByName(driver_name)

    ids = dvr.Open(input_path)

    if ids is None:
        raise Exception("Invalid input file or format")

    row = 0
    print("LayerCount", ids.GetLayerCount())
    rows = []
    for l in range(ids.GetLayerCount()):
        lyr = ids.GetLayer(l)
        lyrDefn = lyr.GetLayerDefn()
        layer = lyr.GetName()
        print("layer", layer)

        for feature in lyr:
            extract_geometry_points(rows, feature, feature.GetGeometryRef(),
                                    layer)
    df = pd.DataFrame.from_records(rows)

    df['layer'] = lyr.GetName()
    return df
コード例 #10
0
def db_custom_20210513(input_path, support, output_path):
    las = None
    print(input_path)
    if input_path.lower().endswith('las'):
        # the default 4000 bytes are not enough, -1 = full file
        f, c = lasio.open_file(input_path, autodetect_encoding_chars=-1)
        las = lasio.read(f)
        df = las.df()
        df.reset_index(inplace=True)
    else:
        df = pd_load_dataframe(input_path)

    #print("criando suporte")
    #   10 = 1000
    #    1 = 100
    #  0.1 = 10
    # 0.01 = 1
    df['DEPT_RL'] = df.eval("DEPT * 100 // (%s / 0.01)" % support,
                            engine='python')

    #print("breakdown usando suporte")
    vl = [['DEPT_RL'], ['DEPT=DEPT', 'max']]
    vpre = {'DEPT_RL', 'DEPT'}
    for v in ['furo', 'holeid', 'hid', 'nomrev']:
        for c in [str.lower, str.upper, str.capitalize]:
            if c(v) in df:
                vl.insert(0, [c(v) + '=' + c(v)])
                vpre.add(c(v))
                break
    for v in df.columns:
        if v not in vpre:
            vl.append([v + '=' + v, 'mean'])
    #print(vl)
    df = pd_breakdown(df, vl)
    #print(df)
    #python bm_breakdown.py %work_xlsx% "" "filename;DEPT_RL;DEPT=DEPT,max;CADE=CADE,mean;BIT GRC1=BIT GRC1,mean;DD3L=DD3L,mean;DD3B=DD3B,mean;DENL=DENL,mean;DENB=DENB,mean;GRDE=GRDE,mean;CCO1=CCO1,mean;CO1C=CO1C,mean;DD3G=DD3G,mean;GC1G=GC1G,mean;DD3C=DD3C,mean;GTMP=GTMP,mean;GRDO=GRDO,mean;DNBO=DNBO,mean;DNLO=DNLO,mean;CCLF=CCLF,mean;VAL_EXP CADMED=VAL_EXP CADMED,mean;CODREV=CODREV,mean;DIAM=DIAM,mean;WLEV=WLEV,mean" 0 %work_xlsx%

    #print("removendo colunas temporarias")
    df.reset_index(inplace=True)
    df.drop('DEPT_RL', 1, inplace=True)

    if las is None or not output_path.lower().endswith('las'):
        pd_save_dataframe(df, output_path)
    else:
        las_set_data(las, df)
        las.write(output_path)
コード例 #11
0
ファイル: db_join.py プロジェクト: pemn/db_join_support
def db_join(input_path, condition, primary_key, output_path, lookup_mode):
    if len(primary_key) == 0:
        primary_key = None
    elif "," in primary_key:
        primary_key = primary_key.split(',')

    header = dict()
    tables = []
    for i_path in input_path.split(';'):
        idf = pd_load_dataframe(i_path, condition)
        for v in idf.columns:
            if v not in header:
                header[v] = len(header)
        tables.append(idf)

    odf = tables[0]

    if (int(lookup_mode)):
        for i in range(1, len(tables)):
            for j in odf.index:
                if np.isnan(odf.loc[j, primary_key]):
                    continue
                flag = None
                for k in tables[i].index:
                    flag = k
                    if odf.loc[j, primary_key] < tables[i].loc[k, primary_key]:
                        break
                if flag is not None:
                    for cn in tables[i].columns:
                        if cn != primary_key:
                            odf.loc[j, cn] = tables[i].loc[flag, cn]
    else:
        for i in range(1, len(tables)):
            # {left, right, outer, inner, cross}, default inner
            odf = pd.merge(odf, tables[i], 'outer', primary_key)

    pd_save_dataframe(odf, output_path)
コード例 #12
0
def gis_convert_epsg(input_path, x, y, z, convert_clock_to_decimal,
                     convert_lookup, srs_column, srs_lookup,
                     custom_systems_enable, custom_systems_zip, srs_input,
                     srs_output, output_path):
    print("# gis_convert_epsg")
    if len(x) == 0:
        x = 'x'
    if len(y) == 0:
        y = 'y'
    if len(z) == 0:
        z = 'z'

    df = pd_load_dataframe(input_path)

    if int(convert_clock_to_decimal):
        for row in df.index:
            for col in [x, y]:
                vc = df.loc[row, col]
                vd = clock_to_decimal(vc)
                print(row, vc, "=>", vd)
                df.loc[row, col] = vd

    if int(convert_lookup):
        df_lookup = pd_load_dataframe(srs_lookup)
        df_lookup.set_index(df_lookup.columns[0], inplace=True)
        for raw_srs in df[srs_column].unique():
            srs_input_row = None
            # check this row has a specific srs which is on the lookup table
            if raw_srs in df_lookup.index:
                srs_input_row = df_lookup.at[raw_srs, df_lookup.columns[0]]
            else:
                srs_input_row = sanitize_srs(raw_srs)
                if srs_input_row is None and srs_input:
                    # rows that do not have a valid srs, default to the global epsg
                    srs_input_row = srs_input
            print(raw_srs, srs_input_row)
            if srs_input_row is not None:
                df.update(
                    gis_project_df(df.loc[df[srs_column] == raw_srs].copy(),
                                   srs_input_row, srs_output, x, y, z))

    else:
        # global conversion
        if int(custom_systems_enable):
            from zipfile import ZipFile
            zf = ZipFile(custom_systems_zip)
            for f in (srs_input, srs_output):
                if f in zf.namelist():
                    print(f)
                    zf.extract(f)
            zf.close()

        df = gis_project_df(df, srs_input, srs_output, x, y, z)

    if output_path:
        pd_save_dataframe(df, output_path)
        if output_path.lower().endswith('shp'):
            gis_create_prj(output_path, srs_output)
    else:
        print(df)

    print("# gis_convert_epsg finished")