def _import_roff_v2(self, pfile, name):
    """Import ROFF format, version 2 (improved version)"""

    # This routine do first a scan for all keywords. Then it grabs
    # the relevant data by only reading relevant portions of the input file

    fhandle, _pclose = _get_fhandle(pfile)

    kwords = utils.scan_keywords(fhandle, fformat="roff")

    for kwd in kwords:
        logger.info(kwd)

    # byteswap:
    byteswap = _rkwquery(fhandle, kwords, "filedata!byteswaptest", -1)

    ncol = _rkwquery(fhandle, kwords, "dimensions!nX", byteswap)
    nrow = _rkwquery(fhandle, kwords, "dimensions!nY", byteswap)
    nlay = _rkwquery(fhandle, kwords, "dimensions!nZ", byteswap)
    logger.info("Dimensions in ROFF file %s %s %s", ncol, nrow, nlay)

    # get the actual parameter:
    vals = _rarraykwquery(fhandle, kwords, "parameter!name!" + name, byteswap,
                          ncol, nrow, nlay)

    self._values = vals
    self._name = name

    return 0
def _scan_ecl_keywords(pfile, maxkeys=100000, dataframe=False):

    # In case pfile is not a file name but a swig pointer to a file handle,
    # the file must not be closed

    ultramax = int(1000000 / 9)  # cf *swig_bnd_char_1m in cxtgeo.i
    if maxkeys > ultramax:
        raise ValueError("maxkeys value is too large, must be < {}".format(ultramax))

    rectypes = _cxtgeo.new_intarray(maxkeys)
    reclens = _cxtgeo.new_longarray(maxkeys)
    recstarts = _cxtgeo.new_longarray(maxkeys)

    fhandle, pclose = _get_fhandle(pfile)

    nkeys, keywords = _cxtgeo.grd3d_scan_eclbinary(
        fhandle, rectypes, reclens, recstarts, maxkeys, XTGDEBUG
    )

    _close_fhandle(fhandle, pclose)

    keywords = keywords.replace(" ", "")
    keywords = keywords.split("|")

    # record types translation (cf: grd3d_scan_eclbinary.c in cxtgeo)
    rct = {
        "1": "INTE",
        "2": "REAL",
        "3": "DOUB",
        "4": "CHAR",
        "5": "LOGI",
        "6": "MESS",
        "-1": "????",
    }

    rc = []
    rl = []
    rs = []
    for i in range(nkeys):
        rc.append(rct[str(_cxtgeo.intarray_getitem(rectypes, i))])
        rl.append(_cxtgeo.longarray_getitem(reclens, i))
        rs.append(_cxtgeo.longarray_getitem(recstarts, i))

    _cxtgeo.delete_intarray(rectypes)
    _cxtgeo.delete_longarray(reclens)
    _cxtgeo.delete_longarray(recstarts)

    result = list(zip(keywords, rc, rl, rs))

    if dataframe:
        cols = ["KEYWORD", "TYPE", "NITEMS", "BYTESTART"]
        df = pd.DataFrame.from_records(result, columns=cols)
        return df

    return result
def import_bgrdecl_prop(self, pfile, name="unknown", grid=None):
    """Import property from binary files with GRDECL layout"""

    fhandle, pclose = _get_fhandle(pfile)

    # scan file for properties; these have similar binary format as e.g. EGRID
    logger.info("Make kwlist by scanning")
    kwlist = utils.scan_keywords(fhandle,
                                 fformat="xecl",
                                 maxkeys=1000,
                                 dataframe=False,
                                 dates=False)
    bpos = {}
    bpos[name] = -1

    for kwitem in kwlist:
        kwname, kwtype, kwlen, kwbyte = kwitem
        logger.info("KWITEM: %s", kwitem)
        if name == kwname:
            bpos[name] = kwbyte
            break

    if bpos[name] == -1:
        raise xtgeo.KeywordNotFoundError(
            "Cannot find property name {} in file {}".format(name, pfile))
    self._ncol = grid.ncol
    self._nrow = grid.nrow
    self._nlay = grid.nlay

    values = eclbin_record(fhandle, kwname, kwlen, kwtype, kwbyte)
    if kwtype == "INTE":
        self._isdiscrete = True
        # make the code list
        uniq = np.unique(values).tolist()
        codes = dict(zip(uniq, uniq))
        codes = {key: str(val) for key, val in codes.items()}  # val: strings
        self.codes = codes

    else:
        self._isdiscrete = False
        values = values.astype(np.float64)  # cast REAL (float32) to float64
        self.codes = {}

    # property arrays from binary GRDECL will be for all cells, but they
    # are in Fortran order, so need to convert...

    actnum = grid.get_actnum().values
    allvalues = values.reshape(self.dimensions, order="F")
    allvalues = np.asanyarray(allvalues, order="C")
    allvalues = ma.masked_where(actnum < 1, allvalues)
    self.values = allvalues
    self._name = name

    _close_fhandle(fhandle, pclose)
    return 0
def scan_dates(pfile, fformat="unrst", maxdates=1000, dataframe=False):
    """Quick scan dates in a simulation restart file.

    Cf. grid_properties.py description
    """
    logger.info("Format supported as default is %s", fformat)

    seq = _cxtgeo.new_intarray(maxdates)
    day = _cxtgeo.new_intarray(maxdates)
    mon = _cxtgeo.new_intarray(maxdates)
    yer = _cxtgeo.new_intarray(maxdates)

    fhandle, pclose = _get_fhandle(pfile)

    nstat = _cxtgeo.grd3d_ecl_tsteps(fhandle, seq, day, mon, yer, maxdates, XTGDEBUG)

    _close_fhandle(fhandle, pclose)

    sq = []
    da = []
    for i in range(nstat):
        sq.append(_cxtgeo.intarray_getitem(seq, i))
        dday = _cxtgeo.intarray_getitem(day, i)
        dmon = _cxtgeo.intarray_getitem(mon, i)
        dyer = _cxtgeo.intarray_getitem(yer, i)
        date = "{0:4}{1:02}{2:02}".format(dyer, dmon, dday)
        da.append(int(date))

    for item in [seq, day, mon, yer]:
        _cxtgeo.delete_intarray(item)

    zdates = list(zip(sq, da))  # list for PY3

    if dataframe:
        cols = ["SEQNUM", "DATE"]
        df = pd.DataFrame.from_records(zdates, columns=cols)
        return df

    return zdates
Exemple #5
0
def import_ecl_output(props,
                      pfile,
                      names=None,
                      dates=None,
                      grid=None,
                      namestyle=0):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements

    logger.debug("'namestyle' is %s (not in use)", namestyle)

    if not grid:
        raise ValueError("Grid Geometry object is missing")

    if not names:
        raise ValueError("Name list is empty (None)")

    fhandle, pclose = _get_fhandle(pfile)

    # scan valid keywords
    kwlist = utils.scan_keywords(fhandle)

    usenames = list()

    if names == "all":
        nact = grid.nactive
        ntot = grid.ntotal

        for kw in kwlist:
            kwname, _tmp1, nlen, _bs1 = kw
            if nlen in (nact, ntot):
                usenames.append(kwname)
    else:
        usenames = list(names)

    logger.info("NAMES are %s", usenames)

    lookfornames = list(set(usenames))

    possiblekw = []
    for name in lookfornames:
        namefound = False
        for kwitem in kwlist:
            possiblekw.append(kwitem[0])
            if name == kwitem[0]:
                namefound = True
        if not namefound:
            if name == "SOIL":
                pass  # will check for SWAT and SGAS later
            else:
                raise ValueError(
                    "Keyword {} not found. Possible list: {}".format(
                        name, possiblekw))

    # check valid dates, and remove invalid entries (allowing that user
    # can be a bit sloppy on DATES)

    validdates = [None]
    if dates:
        dlist = utils.scan_dates(fhandle)

        validdates = []
        alldates = []
        for date in dates:
            for ditem in dlist:
                alldates.append(str(ditem[1]))
                if str(date) == str(ditem[1]):
                    validdates.append(date)

        if not validdates:
            msg = "No valid dates given (dates: {} vs {})".format(
                dates, alldates)
            xtg.error(msg)
            raise ValueError(msg)

        if len(dates) > len(validdates):
            invalidddates = list(set(dates).difference(validdates))
            msg = ("In file {}: Some dates not found: {}, but will continue "
                   "with dates: {}".format(pfile, invalidddates, validdates))
            xtg.warn(msg)
            # raise DateNotFoundError(msg)

    use2names = list(usenames)  # to make copy

    logger.info("Use names: %s", use2names)
    logger.info("Valid dates: %s", validdates)

    # now import each property
    firstproperty = True

    for date in validdates:
        # xprop = dict()
        # soil_ok = False

        for name in use2names:

            if date is None:
                date = None
                propname = name
                etype = 1
            else:
                propname = name + "_" + str(date)
                etype = 5

            prop = GridProperty()

            # use a private GridProperty function here, for convinience
            # (since filehandle)
            ier = _gridprop_import.import_eclbinary(prop,
                                                    fhandle,
                                                    name=name,
                                                    date=date,
                                                    grid=grid,
                                                    etype=etype)
            if ier != 0:
                raise ValueError(
                    "Something went wrong, IER = {} while "
                    "name={}, date={}, etype={}, propname={}".format(
                        ier, name, date, etype, propname))

            if firstproperty:
                ncol = prop.ncol
                nrow = prop.nrow
                nlay = prop.nlay
                firstproperty = False

            logger.info("Appended property %s", propname)
            props._names.append(propname)
            props._props.append(prop)

    props._ncol = ncol
    props._nrow = nrow
    props._nlay = nlay

    if validdates[0] != 0:
        props._dates = validdates

    _close_fhandle(fhandle, pclose)
def from_file(self,
              pfile,
              fformat=None,
              name="unknown",
              grid=None,
              date=None,
              _roffapiv=1):  # _roffapiv for devel.
    """Import grid property from file, and makes an instance of this."""

    # pylint: disable=too-many-branches, too-many-statements

    self._filesrc = pfile

    # it may be that pfile already is an open file; hence a filehandle
    # instead. Check for this, and skip tests if so
    pfile_is_not_fhandle = True
    _fhandle, pclose = _get_fhandle(pfile)
    if not pclose:
        pfile_is_not_fhandle = False

    if pfile_is_not_fhandle:
        if os.path.isfile(pfile):
            logger.debug("File %s exists OK", pfile)
        else:
            raise IOError("No such file: {}".format(pfile))

        # work on file extension
        _froot, fext = os.path.splitext(pfile)
        if fformat is None or fformat == "guess":
            if not fext:
                raise ValueError("File extension missing. STOP")

            fformat = fext.lower().replace(".", "")

        logger.debug("File name to be used is %s", pfile)
        logger.debug("File format is %s", fformat)

    ier = 0
    if fformat == "roff":
        logger.info("Importing ROFF...")
        ier = import_roff(self, pfile, name, grid=grid, _roffapiv=_roffapiv)

    elif fformat.lower() == "init":
        ier = import_eclbinary(self,
                               pfile,
                               name=name,
                               etype=1,
                               date=None,
                               grid=grid)

    elif fformat.lower() == "unrst":
        if date is None:
            raise ValueError("Restart file, but no date is given")

        if isinstance(date, str):
            if "-" in date:
                date = int(date.replace("-", ""))
            elif date == "first":
                date = 0
            elif date == "last":
                date = 9
            else:
                date = int(date)

        if not isinstance(date, int):
            raise RuntimeError("Date is not int format")

        ier = import_eclbinary(self,
                               pfile,
                               name=name,
                               etype=5,
                               date=date,
                               grid=grid)

    elif fformat.lower() == "grdecl":
        ier = import_grdecl_prop(self, pfile, name=name, grid=grid)

    elif fformat.lower() == "bgrdecl":
        ier = import_bgrdecl_prop(self, pfile, name=name, grid=grid)
    else:
        logger.warning("Invalid file format")
        raise SystemExit("Invalid file format")

    # if grid, then append this gridprop to the current grid object
    if ier == 0:
        if grid:
            grid.append_prop(self)
    elif ier == 22:
        raise xtgeo.DateNotFoundError(
            "Date {} not found when importing {}".format(date, name))
    elif ier == 23:
        raise xtgeo.KeywordNotFoundError(
            "Keyword {} not found for date {} when importing".format(
                name, date))
    elif ier == 24:
        raise xtgeo.KeywordFoundNoDateError(
            "Keyword {} found but not for date "
            "{} when importing".format(name, date))
    elif ier == 25:
        raise xtgeo.KeywordNotFoundError(
            "Keyword {} not found when importing".format(name))
    else:
        raise RuntimeError("Something went wrong, code {}".format(ier))

    return self
def _import_eclbinary(self, pfile, name=None, etype=1, date=None, grid=None):
    """Import, private to this routine.

    Raises:
        DateNotFoundError: If restart do not contain requested date.
        KeywordFoundNoDateError: If keyword is found but not at given date.
        KeywordNotFoundError: If Keyword is not found.
        RuntimeError: Mismatch in grid vs property, etc.

    """
    # This function requires simplification!
    # pylint: disable=too-many-locals
    # pylint: disable=too-many-branches
    # pylint: disable=too-many-statements

    fhandle, pclose = _get_fhandle(pfile)

    nentry = 0

    datefound = True
    if etype == 5:
        datefound = False
        logger.info("Look for date %s", date)

        # scan for date and find SEQNUM entry number
        dtlist = utils.scan_dates(fhandle)
        if date == 0:
            date = dtlist[0][1]
        elif date == 9:
            date = dtlist[-1][1]

        logger.info("Redefined date is %s", date)

        for ientry, dtentry in enumerate(dtlist):
            if str(dtentry[1]) == str(date):
                datefound = True
                nentry = ientry
                break

        if not datefound:
            msg = "In {}: Date {} not found, nentry={}".format(
                pfile, date, nentry)
            xtg.warn(msg)
            raise xtgeo.DateNotFoundError(msg)

    # scan file for property
    logger.info("Make kwlist")
    kwlist = utils.scan_keywords(fhandle,
                                 fformat="xecl",
                                 maxkeys=100000,
                                 dataframe=False,
                                 dates=True)

    # first INTEHEAD is needed to verify grid dimensions:
    for kwitem in kwlist:
        if kwitem[0] == "INTEHEAD":
            kwname, kwtype, kwlen, kwbyte, kwdate = kwitem
            break

    # read INTEHEAD record:
    intehead = eclbin_record(fhandle, kwname, kwlen, kwtype, kwbyte)
    ncol, nrow, nlay = intehead[8:11].tolist()

    self._ncol = ncol
    self._nrow = nrow
    self._nlay = nlay

    logger.info("Grid dimensions in INIT or RESTART file: %s %s %s", ncol,
                nrow, nlay)

    logger.info("Grid dimensions from GRID file: %s %s %s", grid.ncol,
                grid.nrow, grid.nlay)

    if grid.ncol != ncol or grid.nrow != nrow or grid.nlay != nlay:
        msg = "In {}: Errors in dimensions prop: {} {} {} vs grid: {} {} {} ".format(
            pfile, ncol, nrow, nlay, grid.ncol, grid.ncol, grid.nlay)
        raise RuntimeError(msg)

    # Restarts (etype == 5):
    # there are cases where keywords do not exist for all dates, e.g .'RV'.
    # The trick is to check for dates also...

    kwfound = False
    datefoundhere = False
    usedate = "0"
    restart = False

    if etype == 5:
        usedate = str(date)
        restart = True

    for kwitem in kwlist:
        kwname, kwtype, kwlen, kwbyte, kwdate = kwitem
        logger.debug("Keyword %s -  date: %s usedate: %s", kwname, kwdate,
                     usedate)
        if name == kwname:
            kwfound = True

        if name == kwname and usedate == str(kwdate):
            logger.info("Keyword %s ok at date %s", name, usedate)
            kwname, kwtype, kwlen, kwbyte, kwdate = kwitem
            datefoundhere = True
            break

    if restart:
        if datefound and not kwfound:
            msg = "For {}: Date <{}> is found, but not keyword <{}>".format(
                pfile, date, name)
            xtg.warn(msg)
            raise xtgeo.KeywordNotFoundError(msg)

        if not datefoundhere and kwfound:
            msg = "For {}: The keyword <{}> exists but not for " "date <{}>".format(
                pfile, name, date)
            xtg.warn(msg)
            raise xtgeo.KeywordFoundNoDateError(msg)
    else:
        if not kwfound:
            msg = "For {}: The keyword <{}> is not found".format(pfile, name)
            xtg.warn(msg)
            raise xtgeo.KeywordNotFoundError(msg)

    # read record:
    values = eclbin_record(fhandle, kwname, kwlen, kwtype, kwbyte)

    if kwtype == "INTE":
        self._isdiscrete = True
        use_undef = xtgeo.UNDEF_INT

        # make the code list
        uniq = np.unique(values).tolist()
        codes = dict(zip(uniq, uniq))
        codes = {key: str(val) for key, val in codes.items()}  # val: strings
        self.codes = codes

    else:
        self._isdiscrete = False
        values = values.astype(np.float64)  # cast REAL (float32) to float64
        use_undef = xtgeo.UNDEF
        self.codes = {}

    # arrays from Eclipse INIT or UNRST are usually for inactive values only.
    # Use the ACTNUM index array for vectorized numpy remapping
    actnum = grid.get_actnum().values
    allvalues = np.zeros((ncol * nrow * nlay), dtype=values.dtype) + use_undef

    msg = "\n"
    msg = msg + "grid.actnum_indices.shape[0] = {}\n".format(
        grid.actnum_indices.shape[0])
    msg = msg + "values.shape[0] = {}\n".format(values.shape[0])
    msg = msg + "ncol nrow nlay {} {} {}, nrow*nrow*nlay = {}\n".format(
        ncol, nrow, nlay, ncol * nrow * nlay)

    logger.info(msg)

    if grid.actnum_indices.shape[0] == values.shape[0]:
        allvalues[grid.get_actnum_indices(order="F")] = values
    elif values.shape[0] == ncol * nrow * nlay:  # often case for PORV array
        allvalues = values.copy()
    else:
        msg = ("BUG somehow... Is the file corrupt? If not contact "
               "the library developer(s)!\n" + msg)
        raise SystemExit(msg)

    allvalues = allvalues.reshape((ncol, nrow, nlay), order="F")
    allvalues = np.asanyarray(allvalues, order="C")
    allvalues = ma.masked_where(actnum < 1, allvalues)

    _close_fhandle(fhandle, pclose)

    self._values = allvalues

    if etype == 1:
        self._name = name
    else:
        self._name = name + "_" + str(date)
        self._date = date

    return 0
Exemple #8
0
def import_ecl_egrid(self, gfile):
    """Import, private to this routine.

    """
    fhandle, pclose = _get_fhandle(gfile)

    # scan file for property
    logger.info("Make kwlist by scanning")
    kwlist = utils.scan_keywords(fhandle,
                                 fformat="xecl",
                                 maxkeys=1000,
                                 dataframe=False,
                                 dates=False)
    bpos = {}
    for name in ("COORD", "ZCORN", "ACTNUM", "MAPAXES"):
        bpos[name] = -1  # initially

    for kwitem in kwlist:
        kwname, kwtype, kwlen, kwbyte = kwitem
        if kwname == "GRIDHEAD":
            # read GRIDHEAD record:
            gridhead = eclbin_record(fhandle, "GRIDHEAD", kwlen, kwtype,
                                     kwbyte)
            ncol, nrow, nlay = gridhead[1:4].tolist()
            logger.info("%s %s %s", ncol, nrow, nlay)
        elif kwname in ("COORD", "ZCORN", "ACTNUM"):
            bpos[kwname] = kwbyte
        elif kwname == "MAPAXES":  # not always present
            bpos[kwname] = kwbyte

    self._ncol = ncol
    self._nrow = nrow
    self._nlay = nlay

    logger.info("Grid dimensions in EGRID file: %s %s %s", ncol, nrow, nlay)

    # allocate dimensions:
    ntot = self._ncol * self._nrow * self._nlay
    ncoord = (self._ncol + 1) * (self._nrow + 1) * 2 * 3
    nzcorn = self._ncol * self._nrow * (self._nlay + 1) * 4

    self._p_coord_v = _cxtgeo.new_doublearray(ncoord)
    self._p_zcorn_v = _cxtgeo.new_doublearray(nzcorn)
    self._p_actnum_v = _cxtgeo.new_intarray(ntot)

    nact = _cxtgeo.grd3d_imp_ecl_egrid(
        fhandle,
        self._ncol,
        self._nrow,
        self._nlay,
        bpos["MAPAXES"],
        bpos["COORD"],
        bpos["ZCORN"],
        bpos["ACTNUM"],
        self._p_coord_v,
        self._p_zcorn_v,
        self._p_actnum_v,
        XTGDEBUG,
    )

    self._nactive = nact

    _close_fhandle(fhandle, pclose)
Exemple #9
0
def import_ecl_bgrdecl(self, gfile):
    """Import binary files with GRDECL layout"""

    fhandle, pclose = _get_fhandle(gfile)

    # scan file for properties; these have similar binary format as e.g. EGRID
    logger.info("Make kwlist by scanning")
    kwlist = utils.scan_keywords(fhandle,
                                 fformat="xecl",
                                 maxkeys=1000,
                                 dataframe=False,
                                 dates=False)
    bpos = {}
    needkwlist = ["SPECGRID", "COORD", "ZCORN", "ACTNUM"]
    optkwlist = ["MAPAXES"]
    for name in needkwlist + optkwlist:
        bpos[name] = -1  # initially

    for kwitem in kwlist:
        kwname, kwtype, kwlen, kwbyte = kwitem
        if kwname == "SPECGRID":
            # read grid geometry record:
            specgrid = eclbin_record(fhandle, "SPECGRID", kwlen, kwtype,
                                     kwbyte)
            ncol, nrow, nlay = specgrid[0:3].tolist()
            logger.info("%s %s %s", ncol, nrow, nlay)
        elif kwname in needkwlist:
            bpos[kwname] = kwbyte
        elif kwname == "MAPAXES":  # not always present
            bpos[kwname] = kwbyte

    self._ncol = ncol
    self._nrow = nrow
    self._nlay = nlay

    logger.info("Grid dimensions in binary GRDECL file: %s %s %s", ncol, nrow,
                nlay)

    # allocate dimensions:
    ntot = self._ncol * self._nrow * self._nlay
    ncoord = (self._ncol + 1) * (self._nrow + 1) * 2 * 3
    nzcorn = self._ncol * self._nrow * (self._nlay + 1) * 4

    self._p_coord_v = _cxtgeo.new_doublearray(ncoord)
    self._p_zcorn_v = _cxtgeo.new_doublearray(nzcorn)
    self._p_actnum_v = _cxtgeo.new_intarray(ntot)

    nact = _cxtgeo.grd3d_imp_ecl_egrid(
        fhandle,
        self._ncol,
        self._nrow,
        self._nlay,
        bpos["MAPAXES"],
        bpos["COORD"],
        bpos["ZCORN"],
        bpos["ACTNUM"],
        self._p_coord_v,
        self._p_zcorn_v,
        self._p_actnum_v,
        XTGDEBUG,
    )

    self._nactive = nact

    _close_fhandle(fhandle, pclose)