Ejemplo n.º 1
0
    def _open_file_checks(self, bdf_filename, basename=False):
        # type: (str, bool) -> None
        """
        Verifies that the BDF about to be opened:
           1.  Exists
           2.  Is Unique
           3.  Isn't an OP2
           4.  Is a File

        Parameters
        ----------
        bdf_filename : str
            the bdf filename to open
        basename : bool; default=False
            only take the basename of the bdf
        """
        if basename:
            bdf_filename_inc = os.path.join(self.include_dir,
                                            os.path.basename(bdf_filename))
        else:
            bdf_filename_inc = os.path.join(self.include_dir, bdf_filename)

        if not os.path.exists(_filename(bdf_filename_inc)):
            msg = 'No such bdf_filename: %r\n' % bdf_filename_inc
            msg += 'cwd: %r\n' % os.getcwd()
            msg += 'include_dir: %r\n' % self.include_dir
            msg += print_bad_path(bdf_filename_inc)
            print(msg)
            raise IOError(msg)
        elif bdf_filename_inc.endswith('.op2'):
            msg = 'Invalid filetype: bdf_filename=%r' % bdf_filename_inc
            print(msg)
            raise IOError(msg)
        bdf_filename = bdf_filename_inc

        if bdf_filename in self.active_filenames:
            msg = 'bdf_filename=%s is already active.\nactive_filenames=%s' \
                % (bdf_filename, self.active_filenames)
            print(msg)
            raise RuntimeError(msg)
        elif os.path.isdir(_filename(bdf_filename)):
            current_filename = self.active_filename if len(
                self.active_filenames) > 0 else 'None'
            msg = 'Found a directory: bdf_filename=%r\ncurrent_file=%s' % (
                bdf_filename_inc, current_filename)
            print(msg)
            raise IOError(msg)
        elif not os.path.isfile(_filename(bdf_filename)):
            msg = 'Not a file: bdf_filename=%r' % bdf_filename
            print(msg)
            raise IOError(msg)
Ejemplo n.º 2
0
    def _open_file(self,
                   bdf_filename,
                   basename=False,
                   check=True,
                   encoding=None):
        """
        Opens a new bdf_filename with the proper encoding and include directory

        Parameters
        ----------
        bdf_filename : str
            the filename to open
        basename : bool (default=False)
            should the basename of bdf_filename be appended to the include directory
        check : bool; default=True
            you can disable the checks
        """
        if encoding is None:
            encoding = self.encoding
        if basename:
            bdf_filename_inc = os.path.join(self.include_dir,
                                            os.path.basename(bdf_filename))
        else:
            bdf_filename_inc = os.path.join(self.include_dir, bdf_filename)

        self._validate_open_file(bdf_filename, bdf_filename_inc, check)

        self.log.debug('opening %r' % bdf_filename_inc)
        self.active_filenames.append(bdf_filename_inc)

        #print('ENCODING - _open_file=%r' % self.encoding)
        #self._check_pynastran_header(lines)
        bdf_file = open(_filename(bdf_filename_inc), 'r', encoding=encoding)
        return bdf_file
Ejemplo n.º 3
0
def load_csv(out_filename, encoding='latin1'):
    """
    The GUI CSV loading function.

    Considers:
      - extension in determining how to load a file (e.g. commas or not)
      - header line of file for information regarding data types
    """
    ext = os.path.splitext(out_filename)[1].lower()
    if ext not in ['.csv', '.dat', '.txt']:
        raise NotImplementedError(
            'extension=%r is not supported (use .dat, .txt, or .csv)' % ext)

    with open(_filename(out_filename), 'r', encoding=encoding) as file_obj:
        names, fmt_dict, dtype, delimiter = _load_format_header(
            file_obj, ext, force_float=False)

        try:
            #A = loadtxt(file_obj, dtype=dtype, delimiter=delimiter)
            A = loadtxt_nice(file_obj,
                             dtype=dtype,
                             comments='#',
                             delimiter=delimiter)
        except:
            traceback.print_exc(file=sys.stdout)
            msg = 'extension=%r nheaders=%s delimiter=%r dtype=%s' % (
                ext, len(names), delimiter, dtype)
            raise RuntimeError(msg)
    return A, fmt_dict, names
Ejemplo n.º 4
0
    def _open_file(self, bdf_filename, basename=False, check=True):
        """
        Opens a new bdf_filename with the proper encoding and include directory

        Parameters
        ----------
        bdf_filename : str
            the filename to open
        basename : bool (default=False)
            should the basename of bdf_filename be appended to the include directory
        """
        if basename:
            bdf_filename_inc = os.path.join(self.include_dir,
                                            os.path.basename(bdf_filename))
        else:
            bdf_filename_inc = os.path.join(self.include_dir, bdf_filename)

        self._validate_open_file(bdf_filename, bdf_filename_inc, check)

        self.log.debug('opening %r' % bdf_filename_inc)
        self.active_filenames.append(bdf_filename_inc)

        #print('ENCODING - _open_file=%r' % self.encoding)
        bdf_file = codec_open(_filename(bdf_filename_inc),
                              'r',
                              encoding=self.encoding)
        return bdf_file
Ejemplo n.º 5
0
    def read_cart3d(self, infilename, result_names=None):
        """extracts the points, elements, and Cp"""
        self.infilename = infilename
        self.log.info("---starting reading cart3d file...%r---" % self.infilename)

        self.infilename = infilename
        if is_binary_file(infilename):
            with open(infilename, 'rb') as self.infile:
                npoints, nelements, nresults = self._read_header_binary()
                self.points = self._read_points_binary(npoints)
                self.elements = self._read_elements_binary(nelements)
                self.regions = self._read_regions_binary(nelements)
                # TODO: loads
        else:
            with codec_open(_filename(infilename), 'r', encoding=self._encoding) as self.infile:
                npoints, nelements, nresults = self._read_header_ascii()
                self.points = self._read_points_ascii(npoints)
                self.elements = self._read_elements_ascii(nelements)
                self.regions = self._read_regions_ascii(nelements)
                self._read_results_ascii(0, self.infile, nresults, result_names=result_names)

        self.log.debug("npoints=%s nelements=%s" % (self.npoints, self.nelements))
        self.log.info("---finished reading cart3d file...%r---" % self.infilename)
        assert self.npoints > 0, 'npoints=%s' % self.npoints
        assert self.nelements > 0, 'nelements=%s' % self.nelements
Ejemplo n.º 6
0
def load_deflection_csv(out_filename, encoding='latin1'):
    """
    The GUI deflection CSV loading function.

    Considers:
      - extension in determining how to load a file (e.g. commas or not)
      - header line of file for information regarding data types
    """
    ext = os.path.splitext(out_filename)[1].lower()
    if ext not in ['.csv', '.dat', '.txt']:
        raise NotImplementedError(
            'extension=%r is not supported (use .dat, .txt, or .csv)' % ext)

    with open(_filename(out_filename), 'r', encoding=encoding) as file_obj:
        names, fmt_dict, dtype, delimiter = _load_format_header(
            file_obj, ext, force_float=False)

        try:
            #A = np.loadtxt(file_obj, dtype=dtype, delimiter=delimiter)
            A = loadtxt_nice(file_obj, comments='#', delimiter=delimiter)
        except:
            traceback.print_exc(file=sys.stdout)
            msg = 'extension=%r nheaders=%s delimiter=%r dtype=%s' % (
                ext, len(names), delimiter, dtype)
            raise RuntimeError(msg)

    names_without_index = names[1:]
    fmt_dict_without_index = {
        key: fmt_dict[key]
        for key in names_without_index
    }

    nnames_without_index = len(names_without_index)
    nexpected_results = 1 + 3 * nnames_without_index

    try:
        _nrows, ncols = A.shape
    except ValueError:
        msg = ('A should be (nnodes, 1+ndeflection_results); '
               'A.shape=%s nexpected_results=%s names=%s' %
               (str(A.shape), nexpected_results, names))
        raise ValueError(msg)

    if ncols != nexpected_results:
        msg = 'A.shape=%s ncols=%s nexpected_results=%s names=%s nnames_without_index=%s' % (
            str(A.shape), ncols, nexpected_results, names,
            nnames_without_index)
        raise ValueError(msg)

    B = {}
    for i, name in enumerate(names_without_index):
        B[name] = A[:, 1 + 3 * i:1 + 3 * i + 3]

    assert len(B) == len(
        fmt_dict_without_index), 'B.keys()=%s fmt_dict.keys()=%s' % (list(
            B.keys()), list(fmt_dict_without_index.keys()))
    assert len(B) == len(
        names_without_index), 'B.keys()=%s names.keys()=%s' % (list(
            B.keys()), names_without_index)
    return B, fmt_dict_without_index, names_without_index
Ejemplo n.º 7
0
    def _validate_open_file(self, bdf_filename, bdf_filename_inc, check=True):
        """
        checks that the file doesn't have obvious errors
         - hasn't been used
         - not a directory
         - is a file

        Parameters
        ----------
        bdf_filename : str
           the current bdf filename
        bdf_filename_inc : str
           the next bdf filename
        check : bool; default=True
            you can disable the checks

        Raises
        ------
        RuntimeError : file is active
        IOError : Invalid file type
        """
        if check:
            if not os.path.exists(_filename(bdf_filename_inc)):
                msg = 'No such bdf_filename: %r\n' % bdf_filename_inc
                msg += 'cwd: %r\n' % os.getcwd()
                msg += 'include_dir: %r\n' % self.include_dir
                msg += print_bad_path(bdf_filename_inc)
                raise IOError(msg)
            elif bdf_filename_inc.endswith('.op2'):
                raise IOError('Invalid filetype: bdf_filename=%r' %
                              bdf_filename_inc)

            bdf_filename = bdf_filename_inc
            if bdf_filename in self.active_filenames:
                msg = 'bdf_filename=%s is already active.\nactive_filenames=%s' \
                    % (bdf_filename, self.active_filenames)
                raise RuntimeError(msg)
            elif os.path.isdir(_filename(bdf_filename)):
                current_fname = self.active_filename if len(
                    self.active_filenames) > 0 else 'None'
                raise IOError(
                    'Found a directory: bdf_filename=%r\ncurrent_file=%s' %
                    (bdf_filename_inc, current_fname))
            elif not os.path.isfile(_filename(bdf_filename)):
                raise IOError('Not a file: bdf_filename=%r' % bdf_filename)
Ejemplo n.º 8
0
    def _dump_file(self, bdf_dump_filename, lines, i):
        # type: (str, List[str], int) -> None
        """
        Writes a BDF up to some failed line index

        Parameters
        ----------
        bdf_dump_filename : str
            the bdf filename to dump
        lines : List[str]
            the entire list of lines
        i : int
            the last index to write
        """
        with open(_filename(bdf_dump_filename), 'w',
                  encoding=self.encoding) as crash_file:
            for line in lines[:i]:
                crash_file.write(line)
Ejemplo n.º 9
0
def load_deflection_csv(out_filename, encoding='latin1'):
    """
    The GUI deflection CSV loading function.

    Considers:
      - extension in determining how to load a file (e.g. commas or not)
      - header line of file for information regarding data types
    """
    ext = os.path.splitext(out_filename)[1].lower()
    if ext not in ['.csv', '.dat', '.txt']:
        raise NotImplementedError(
            'extension=%r is not supported (use .dat, .txt, or .csv)' % ext)

    with codec_open(_filename(out_filename), 'r',
                    encoding=encoding) as file_obj:
        names, fmt_dict, dtype, delimiter = _load_format_header(
            file_obj, ext, force_float=False)
        nnames = len(names)

        try:
            #A = np.loadtxt(file_obj, dtype=dtype, delimiter=delimiter)
            A = loadtxt_nice(file_obj, delimiter=delimiter)
        except:
            traceback.print_exc(file=sys.stdout)
            msg = 'extension=%r nheaders=%s delimiter=%r dtype=%s' % (
                ext, len(names), delimiter, dtype)
            raise RuntimeError(msg)

        try:
            nrows, ncols = A.shape
        except ValueError:
            msg = 'A should be (nnodes, 3); A.shape=%s nnames*3=%s names=%s' % (
                str(A.shape), nnames * 3, names)
            raise ValueError(msg)

        if ncols != (nnames * 3):
            msg = 'A.shape=%s ncols=%s nnames*3=%s names=%s' % (str(
                A.shape), ncols, nnames * 3, names)
            raise RuntimeError(msg)
    B = {}
    for i, name in enumerate(names):
        B[name] = A[:, 3 * i:3 * i + 3]
    return B, fmt_dict, names
Ejemplo n.º 10
0
    def read_cart3d(self, infilename, result_names=None):
        """extracts the points, elements, and Cp"""
        self.infilename = infilename
        self.log.info("---reading cart3d...%r---" % self.infilename)

        self.infilename = infilename
        if is_binary_file(infilename):
            with open(infilename, 'rb') as self.infile:
                try:
                    npoints, nelements, nresults = self._read_header_binary()
                    self.points = self._read_points_binary(npoints)
                    self.elements = self._read_elements_binary(nelements)
                    self.regions = self._read_regions_binary(nelements)
                    # TODO: loads
                except:
                    msg = 'failed reading %r' % infilename
                    self.log.error(msg)
                    raise

        else:
            with codec_open(_filename(infilename),
                            'r',
                            encoding=self._encoding) as self.infile:
                try:
                    npoints, nelements, nresults = self._read_header_ascii()
                    self.points = self._read_points_ascii(npoints)
                    self.elements = self._read_elements_ascii(nelements)
                    self.regions = self._read_regions_ascii(nelements)
                    self._read_results_ascii(0,
                                             self.infile,
                                             nresults,
                                             result_names=result_names)
                except:
                    msg = 'failed reading %r' % infilename
                    self.log.error(msg)
                    raise

        self.log.debug("npoints=%s nelements=%s" %
                       (self.npoints, self.nelements))
        assert self.npoints > 0, 'npoints=%s' % self.npoints
        assert self.nelements > 0, 'nelements=%s' % self.nelements
Ejemplo n.º 11
0
def load_deflection_csv(out_filename, encoding='latin1'):
    """
    The GUI deflection CSV loading function.

    Considers:
      - extension in determining how to load a file (e.g. commas or not)
      - header line of file for information regarding data types
    """
    ext = os.path.splitext(out_filename)[1].lower()
    if ext not in ['.csv', '.dat', '.txt']:
        raise NotImplementedError('extension=%r is not supported (use .dat, .txt, or .csv)' % ext)

    with codec_open(_filename(out_filename), 'r', encoding=encoding) as file_obj:
        names, fmt_dict, dtype, delimiter = _load_format_header(file_obj, ext, force_float=False)
        nnames = len(names)

        try:
            #A = np.loadtxt(file_obj, dtype=dtype, delimiter=delimiter)
            A = loadtxt_nice(file_obj, delimiter=delimiter)
        except:
            traceback.print_exc(file=sys.stdout)
            msg = 'extension=%r nheaders=%s delimiter=%r dtype=%s' % (
                ext, len(names), delimiter, dtype)
            raise RuntimeError(msg)

        try:
            nrows, ncols = A.shape
        except ValueError:
            msg = 'A should be (nnodes, 3); A.shape=%s nnames*3=%s names=%s' % (
                str(A.shape), nnames*3, names)
            raise ValueError(msg)

        if ncols != (nnames * 3):
            msg = 'A.shape=%s ncols=%s nnames*3=%s names=%s' % (
                str(A.shape), ncols, nnames*3, names)
            raise RuntimeError(msg)
    B = {}
    for i, name in enumerate(names):
        B[name] = A[:, 3*i:3*i+3]
    return B, fmt_dict, names
Ejemplo n.º 12
0
def _show_bad_file(self, bdf_filename, encoding, nlines_previous=10):
    # type: (Union[str, StringIO]) -> None
    """
    Prints the 10 lines before the UnicodeDecodeError occurred.

    Parameters
    ----------
    bdf_filename : str
        the filename to print the lines of
    encoding : str
        the file encoding
    nlines_previous : int; default=10
        the number of lines to show
    """
    lines = []  # type: List[str]
    print('ENCODING - show_bad_file=%r' % encoding)

    with codec_open(_filename(bdf_filename), 'r',
                    encoding=encoding) as bdf_file:
        iline = 0
        nblank = 0
        while 1:
            try:
                line = bdf_file.readline().rstrip()
            except UnicodeDecodeError:
                iline0 = max([iline - nlines_previous, 0])
                self.log.error('filename=%s' % bdf_filename)
                for iline1, line in enumerate(lines[iline0:iline]):
                    self.log.error('lines[%i]=%r' % (iline0 + iline1, line))
                msg = "\n%s encoding error on line=%s of %s; not '%s'" % (
                    encoding, iline, bdf_filename, encoding)
                raise RuntimeError(msg)
            if line:
                nblank = 0
            else:
                nblank += 1
            if nblank == 20:
                raise RuntimeError('20 blank lines')
            iline += 1
            lines.append(line)
Ejemplo n.º 13
0
def load_csv(out_filename, encoding='latin1'):
    """
    The GUI CSV loading function.

    Considers:
      - extension in determining how to load a file (e.g. commas or not)
      - header line of file for information regarding data types
    """
    ext = os.path.splitext(out_filename)[1].lower()
    if ext not in ['.csv', '.dat', '.txt']:
        raise NotImplementedError('extension=%r is not supported (use .dat, .txt, or .csv)' % ext)

    with codec_open(_filename(out_filename), 'r', encoding=encoding) as file_obj:
        names, fmt_dict, dtype, delimiter = _load_format_header(file_obj, ext, force_float=False)
        try:
            #A = loadtxt(file_obj, dtype=dtype, delimiter=delimiter)
            A = loadtxt_nice(file_obj, dtype=dtype, delimiter=delimiter)
        except:
            traceback.print_exc(file=sys.stdout)
            msg = 'extension=%r nheaders=%s delimiter=%r dtype=%s' % (
                ext, len(names), delimiter, dtype)
            raise RuntimeError(msg)
    return A, fmt_dict, names
Ejemplo n.º 14
0
def load_user_geom(fname, encoding='latin1'):
    """
    Loads a file of the form:

    # all supported cards
    #  - GRID
    #  - BAR
    #  - TRI
    #  - QUAD
    #
    # doesn't support:
    #  - solid elements
    #  - element properties
    #  - custom colors
    #  - coordinate systems
    #  - materials
    #  - loads
    #  - results

    #    id  x    y    z
    GRID, 1, 0.2, 0.3, 0.3
    GRID, 2, 1.2, 0.3, 0.3
    GRID, 3, 2.2, 0.3, 0.3
    GRID, 4, 5.2, 0.3, 0.3
    grid, 5, 5.2, 1.3, 2.3  # case insensitive

    #    ID, nodes
    BAR,  1, 1, 2

    #   eid, n1,n2,n3,n4
    TRI,  2, 1, 2, 3
    # this is a comment

    #   eid, n1,n2,n3,n4
    QUAD, 3, 1, 5, 3, 4
    QUAD, 4, 1, 2, 3, 4  # this is after a blank line
    """
    with codec_open(_filename(fname), 'r', encoding=encoding) as user_geom:
        lines = user_geom.readlines()

    grid_ids = []
    xyz = []
    bars = []
    tris = []
    quads = []
    #lines2 = []
    for line in lines:
        line2 = line.strip().split('#')[0].upper()
        if line2:
            sline = line2.split(',')
            if line2.startswith('GRID'):
                assert len(sline) == 5, sline
                grid_ids.append(sline[1])
                xyz.append(sline[2:])
            elif line2.startswith('BAR'):
                assert len(sline) == 4, sline
                bars.append(sline[1:])
            elif line2.startswith('TRI'):
                assert len(sline) == 5, sline
                tris.append(sline[1:])
            elif line2.startswith('QUAD'):
                assert len(sline) == 6, sline
                quads.append(sline[1:])
            else:
                print(sline)

    grid_ids = np.array(grid_ids, dtype='int32')
    xyz = np.array(xyz, dtype='float32')
    tris = np.array(tris, dtype='int32')
    quads = np.array(quads, dtype='int32')
    bars = np.array(bars, dtype='int32')
    return grid_ids, xyz, bars, tris, quads
Ejemplo n.º 15
0
def load_csv(out_filename, encoding='latin1'):
    """
    The GUI CSV loading function.

    Considers:
      - extension in determining how to load a file (e.g. commas or not)
      - header line of file for information regarding data types
    """
    ext = os.path.splitext(out_filename)[1].lower()
    if ext not in ['.csv', '.dat', '.txt']:
        raise NotImplementedError('extension=%r is not supported (use .dat, .txt, or .csv)' % ext)

    with codec_open(_filename(out_filename), 'r', encoding=encoding) as file_obj:
        header_line = file_obj.readline().strip()
        if not header_line.startswith('#'):
            msg = 'Expected file of the form:\n'
            if ext in ['.dat', '.txt']:
                msg += '# var1 var2\n'
                msg += '1 2\n'
                msg += '3 4\n'
                msg += '\nor:\n'
                msg += '# var1(%i) var2(%f)\n'
                msg += '1 2.1\n'
                msg += '3 4.2\n'
            elif ext == '.csv':
                msg += '# var1, var2\n'
                msg += '1, 2\n'
                msg += '3, 4\n'
                msg += '\nor:\n'
                msg += '# var1(%i), var2(%f)\n'
                msg += '1, 2.1\n'
                msg += '3, 4.2\n'
            else:
                msg = 'extension=%r is not supported (use .dat, .txt, or .csv)' % ext
                raise NotImplementedError(msg)
            raise SyntaxError(msg)

        header_line = header_line.lstrip('# \t').strip()
        if ext in ['.dat', '.txt']:
            headers = header_line.split()
        elif ext == '.csv':
            headers = header_line.split(',')
        else:
            msg = 'extension=%r is not supported (use .dat, .txt, or .csv)' % ext
            raise NotImplementedError(msg)
        headers = [header.strip() for header in headers if header.strip()]

        fmt_dict = {}
        names = []
        dtype_fmts = []
        for iheader, header in enumerate(headers):
            # TODO: works for making a \n, but screws up the sidebar
            #       and scale
            header2 = header.strip()#.replace('\\n', '\n')
            dtype_fmt = 'float'

            str_fmt = '%.3f'
            if header2.endswith(')') and '%' in header2:
                header2_temp, fmt_temp = header2[:-1].rsplit('(', 1)
                header2_temp = header2_temp.strip()
                fmt = fmt_temp.strip()
                #('S1', 'i4', 'f4')
                if '%' in fmt:
                    #fmt_temp = fmt_temp.replace('%', '%%')
                    if 'i' in fmt:
                        fmt % 5
                        dtype_fmt = 'int32'
                        str_fmt = '%i'

                    elif 'g' in fmt or 'e' in fmt or 'f' in fmt or 's' in fmt:
                        dtype_fmt = 'float32'
                        str_fmt = fmt
                    else:
                        raise TypeError('iheader=%s header=%r fmt=%r' % (iheader, header2, fmt))
                else:
                    # default to float32
                    dtype_fmt = 'float32'
            else:
                dtype_fmt = 'float32'
                header2_temp = header2

            names.append(header2_temp)
            dtype_fmts.append(dtype_fmt)
            fmt_dict[header2_temp] = str_fmt

        if ext in ['.dat', '.txt']:
            delimiter = None
        elif ext == '.csv':
            delimiter = ','
        else:
            raise NotImplementedError('extension=%r is not supported (use .dat, .txt, or .csv)' % ext)

        dtype = {
            'names': tuple(names),
            'formats': tuple(dtype_fmts),
        }
        try:
            #A = loadtxt(file_obj, dtype=dtype, delimiter=delimiter)
            A = loadtxt_nice(file_obj, dtype=dtype, delimiter=delimiter)
        except:
            traceback.print_exc(file=sys.stdout)
            msg = 'extension=%r nheaders=%s delimiter=%r dtype=%s' % (ext, len(names), delimiter, dtype)
            raise RuntimeError(msg)

    return A, fmt_dict, names
Ejemplo n.º 16
0
def loadtxt_nice(filename, delimiter=None, skiprows=0, comment='#', dtype=np.float64,
                 converters=None, usecols=None, unpack=False,
                 ndmin=0,):
    """
    Reimplmenentation of numpy's loadtxt that doesn't complain about
    training commas (or other delimiter) that vary from  one line to
    the other.  It also provides better error messages.

    Parameters
    ----------
    filename : varies
        str : the filename to load
        file : the file object to load
        cStringIO/StringIO : a file-like object
    delimiter : str; default=None (any whitespace)
        the field splitter (e.g. comma or tab)
    skiprows : int; default=1
        the number of rows to skip
    comment : str, default='#'
        the comment line
    dtype : numpy.dtype; default=None (float)
        allows for alternate casting
        int32, float32, ...
        dtype = {
            names : ('A', 'B', 'C'),
            formats : ('int32', 'float32', 'float64'),
        }
    usecols : sequence; default=None
        Which columns to read, with 0 being the first.  For example,
        ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
        The default, None, results in all columns being read.
    unpack : bool, optional
        If True, the returned array is transposed, so that arguments may be
        unpacked using ``x, y, z = loadtxt(...)``.  When used with a structured
        data-type, arrays are returned for each field.  Default is False.

    converters : dict; default=None
        not supported
        crashes if not None
        A dictionary mapping column number to a function that will convert
        that column to a float.  E.g., if column 0 is a date string:
        ``converters = {0: datestr2num}``.  Converters can also be used to
        provide a default value for missing data (but see also `genfromtxt`):
        ``converters = {3: lambda s: float(s.strip() or 0)}``.  Default: None.

    ndmin : int, optional
        crashes if not 0
        The returned array will have at least `ndmin` dimensions.
        Otherwise mono-dimensional axes will be squeezed.
        Legal values: 0 (default), 1 or 2.

    Returns
    -------
    data : (nrows, ncols) ndarray
        the data object
    """
    if converters is not None:
        raise NotImplementedError('converters=%r must be None' % converters)
    #if ndmin is not [0, 2]: ## TODO: remove 2
        #raise NotImplementedError('ndmin=%r must be 0' % ndmin)

    if delimiter is None:
        ending_characters = '\n\r \t'
    else:
        ending_characters = '\n\r \t' + delimiter

    data = []
    if isinstance(filename, StringIO):
        lines = filename.getvalue().split('\n')[skiprows:]
        filename = None
    elif is_file_obj(filename):
        lines = filename.readlines()[skiprows:]
        filename = filename.name
    else:
        with codec_open(_filename(filename), 'r') as file_obj:
            if skiprows:
                lines = file_obj.readlines()[skiprows:]
            else:
                lines = file_obj.readlines()

    if usecols:
        for usecol in usecols:
            assert isinstance(usecol, int), 'usecol=%s usecols=%s' % (usecol, usecols)
        assert len(np.unique(usecols)), 'usecols=%s must be unique' % (usecols)
        for line in lines:
            if line.startswith(comment):
                continue
            sline = line.strip(delimiter).split(delimiter)
            data.append([sline[i] for i in usecols])
    else:
        for line in lines:
            if line.startswith(comment):
                continue
            sline = line.strip(delimiter).split(delimiter)
            data.append(sline)
    del lines

    #print(data)
    allowed_dtypes = ['float32', 'float64', 'float128', np.float64, 'int32', 'int64', 'int128']
    if dtype in allowed_dtypes:
        assert dtype in allowed_dtypes, 'dtype=%r allowed_dtypes=[%s]' % (dtype, ', '.join(allowed_dtypes))
        X = np.array(data, dtype=dtype)
    elif isinstance(dtype, dict):
        a = np.array(data, dtype=object)

        X = {}
        names = dtype['names']
        nnames = len(names)
        assert len(set(names)) == nnames, 'non-unique headers in %s' % str(names)
        for icol, name, dtypei in zip(count(), dtype['names'], dtype['formats']):
            assert dtypei in allowed_dtypes, 'dtype=%r allowed_dtypes=[%s]' % (dtypei, ', '.join(allowed_dtypes))
            try:
                X[name] = np.asarray(a[:, icol], dtype=dtypei)
            except IndexError:
                # the number of columns in A is not consistent
                ncols = [len(datai) for datai in data]
                ucols = np.unique(ncols)
                msg = 'The number of columns is not consistent; expected=%s; actual=%s' % (nnames, ucols)
                raise IndexError(msg)
            except ValueError:
                print(a)
                # we only allow floats
                msg = ''
                if dtypei in ['float32', 'float64', 'float128', np.float64]:
                    for irow, val in zip(count(), a[:, icol]):
                        try:
                            float(val)
                        except:
                            msg += 'for name=%r, row=%s -> val=%r (expected float)\n' % (name, irow, val)
                            is_failed = True
                elif dtypei in ['int32', 'int64', 'int128']:
                    for irow, val in zip(count(), a[:, icol]):
                        try:
                            int(val)
                        except:
                            msg += 'for name=%r, row=%s -> val=%r (expected int)\n' % (name, irow, val)
                            is_failed = True
                else:
                    raise NotImplementedError(dtype)
                if is_failed:
                    raise RuntimeError(msg)

        #print('A =', A)
    else:
        raise NotImplementedError('dtype_else=%s' % dtype)
        #return np.array(data, dtype=dtype)

    #if usecols is not None:
        #raise NotImplementedError('usecols=%s must be None' % str(usecols))
    #if unpack is not False:
        #raise NotImplementedError('unpack=%r must be False' % unpack)

    if not isinstance(dtype, dict):
        # Tweak the size and shape of the arrays - remove extraneous dimensions
        if X.ndim > ndmin:
            X = np.squeeze(X)
        # and ensure we have the minimum number of dimensions asked for
        # - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
        if X.ndim < ndmin:
            if ndmin == 1:
                X = np.atleast_1d(X)
            elif ndmin == 2:
                X = np.atleast_2d(X).T

    if unpack:
        #print(X)
        if isinstance(dtype, dict) > 1:
            if ndmin == 0:
                # For structured arrays, return an array for each field.
                return (np.squeeze(X[name]) for name in dtype.names)
            else:
                raise RuntimeError('I think this can never happen...type(dtype)=dict; ndmin=%s' % ndmin)
        else:
            #print('X = ', X)
            #raise NotImplementedError('unpack=%s dtypes=%s' % (unpack, dtype))
            #if ndmin == 0: # and A.shape[0] == 1
                #out = (np.squeeze(X[:, i]) for i in range(X.shape[1]))
                #return out
            #else:
                #return (X[:, i] for i in range(X.shape[1]))
            #return (X[:, i] for i in range(X.shape[1]))
            return X.T
    else:
        return X
Ejemplo n.º 17
0
def load_user_geom(fname, encoding='latin1'):
    """
    Loads a file of the form:

    # all supported cards
    #  - GRID
    #  - BAR
    #  - TRI
    #  - QUAD
    #
    # doesn't support:
    #  - solid elements
    #  - element properties
    #  - custom colors
    #  - coordinate systems
    #  - materials
    #  - loads
    #  - results

    #    id  x    y    z
    GRID, 1, 0.2, 0.3, 0.3
    GRID, 2, 1.2, 0.3, 0.3
    GRID, 3, 2.2, 0.3, 0.3
    GRID, 4, 5.2, 0.3, 0.3
    grid, 5, 5.2, 1.3, 2.3  # case insensitive

    #    ID, nodes
    BAR,  1, 1, 2

    #   eid, n1,n2,n3,n4
    TRI,  2, 1, 2, 3
    # this is a comment

    #   eid, n1,n2,n3,n4
    QUAD, 3, 1, 5, 3, 4
    QUAD, 4, 1, 2, 3, 4  # this is after a blank line
    """
    if fname.lower().endswith('.stl'):
        stl_filename = fname
        stl = read_stl(stl_filename)
        nnodes = stl.nodes.shape[0]
        ntris = stl.elements.shape[0]
        grid_ids = np.arange(1, nnodes + 1, dtype='int32')
        xyz = stl.nodes
        eids = np.arange(1, ntris + 1, dtype='int32')
        tris = np.vstack([eids, stl.elements.T + 1]).T
        #tris = stl.elements + 1
        #print(tris)
        quads = np.array([], dtype='int32')
        bars = np.array([], dtype='int32')
        return grid_ids, xyz, bars, tris, quads

    with open(_filename(fname), 'r', encoding=encoding) as user_geom:
        lines = user_geom.readlines()

    grid_ids = []
    xyz = []
    bars = []
    tris = []
    quads = []
    #lines2 = []
    for line in lines:
        line2 = line.strip().split('#')[0].upper()
        if line2:
            sline = line2.split(',')
            if line2.startswith('GRID'):
                assert len(sline) == 5, sline
                grid_ids.append(sline[1])
                xyz.append(sline[2:])
            elif line2.startswith('BAR'):
                assert len(sline) == 4, sline
                bars.append(sline[1:])
            elif line2.startswith('TRI'):
                assert len(sline) == 5, sline
                tris.append(sline[1:])
            elif line2.startswith('QUAD'):
                assert len(sline) == 6, sline
                quads.append(sline[1:])
            else:
                self.log.warning(sline)

    grid_ids = np.array(grid_ids, dtype='int32')
    xyz = np.array(xyz, dtype='float32')
    tris = np.array(tris, dtype='int32')
    quads = np.array(quads, dtype='int32')
    bars = np.array(bars, dtype='int32')
    return grid_ids, xyz, bars, tris, quads
Ejemplo n.º 18
0
def loadtxt_nice(
    filename,
    delimiter=None,
    skiprows=0,
    comment='#',
    dtype=np.float64,
    converters=None,
    usecols=None,
    unpack=False,
    ndmin=0,
):
    """
    Reimplmenentation of numpy's loadtxt that doesn't complain about
    training commas (or other delimiter) that vary from  one line to
    the other.  It also provides better error messages.

    Parameters
    ----------
    filename : varies
        str : the filename to load
        file : the file object to load
        cStringIO/StringIO : a file-like object
    delimiter : str; default=None (any whitespace)
        the field splitter (e.g. comma or tab)
    skiprows : int; default=1
        the number of rows to skip
    comment : str, default='#'
        the comment line
    dtype : numpy.dtype; default=None (float)
        allows for alternate casting
        int32, float32, ...
        dtype = {
            names : ('A', 'B', 'C'),
            formats : ('int32', 'float32', 'float64'),
        }
    usecols : sequence; default=None
        Which columns to read, with 0 being the first.  For example,
        ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
        The default, None, results in all columns being read.
    unpack : bool, optional
        If True, the returned array is transposed, so that arguments may be
        unpacked using ``x, y, z = loadtxt(...)``.  When used with a structured
        data-type, arrays are returned for each field.  Default is False.

    converters : dict; default=None
        not supported
        crashes if not None
        A dictionary mapping column number to a function that will convert
        that column to a float.  E.g., if column 0 is a date string:
        ``converters = {0: datestr2num}``.  Converters can also be used to
        provide a default value for missing data (but see also `genfromtxt`):
        ``converters = {3: lambda s: float(s.strip() or 0)}``.  Default: None.

    ndmin : int, optional
        crashes if not 0
        The returned array will have at least `ndmin` dimensions.
        Otherwise mono-dimensional axes will be squeezed.
        Legal values: 0 (default), 1 or 2.

    Returns
    -------
    data : (nrows, ncols) ndarray
        the data object
    """
    if converters is not None:
        raise NotImplementedError('converters=%r must be None' % converters)
    #if ndmin is not [0, 2]: ## TODO: remove 2
    #raise NotImplementedError('ndmin=%r must be 0' % ndmin)

    #if delimiter is None:
    #ending_characters = '\n\r \t'
    #else:
    #ending_characters = '\n\r \t' + delimiter

    data = []
    if isinstance(filename, StringIO):
        lines = filename.getvalue().split('\n')[skiprows:]
        filename = None
    elif is_file_obj(filename):
        lines = filename.readlines()[skiprows:]
        filename = filename.name
    else:
        with codec_open(_filename(filename), 'r') as file_obj:
            if skiprows:
                lines = file_obj.readlines()[skiprows:]
            else:
                lines = file_obj.readlines()

    if usecols:
        for usecol in usecols:
            assert isinstance(usecol,
                              int), 'usecol=%s usecols=%s' % (usecol, usecols)
        assert len(np.unique(usecols)), 'usecols=%s must be unique' % (usecols)
        for line in lines:
            if line.startswith(comment):
                continue
            sline = line.strip(delimiter).split(delimiter)
            data.append([sline[i] for i in usecols])
    else:
        for line in lines:
            if line.startswith(comment):
                continue
            sline = line.strip(delimiter).split(delimiter)
            data.append(sline)
    del lines

    #print(data)
    allowed_dtypes = [
        'float32', 'float64', 'float128', np.float64, 'int32', 'int64',
        'int128'
    ]
    if dtype in allowed_dtypes:
        if dtype not in allowed_dtypes:
            raise RuntimeError('dtype=%r allowed_dtypes=[%s]' %
                               (dtype, ', '.join(allowed_dtypes)))
        X = np.array(data, dtype=dtype)
    elif isinstance(dtype, dict):
        a = np.array(data, dtype=object)

        X = {}
        names = dtype['names']
        nnames = len(names)
        assert len(
            set(names)) == nnames, 'non-unique headers in %s' % str(names)
        for icol, name, dtypei in zip(count(), dtype['names'],
                                      dtype['formats']):
            if dtypei not in allowed_dtypes:
                raise RuntimeError('dtype=%r allowed_dtypes=[%s]' %
                                   (dtypei, ', '.join(allowed_dtypes)))
            try:
                X[name] = np.asarray(a[:, icol], dtype=dtypei)
            except IndexError:
                # the number of columns in A is not consistent
                ncols = [len(datai) for datai in data]
                ucols = np.unique(ncols)
                msg = 'The number of columns is not consistent; expected=%s; actual=%s' % (
                    nnames, ucols)
                raise IndexError(msg)
            except ValueError:
                print(a)
                # we only allow floats
                msg = ''
                if dtypei in ['float32', 'float64', 'float128', np.float64]:
                    for irow, val in zip(count(), a[:, icol]):
                        try:
                            float(val)
                        except ValueError:
                            msg += 'for name=%r, row=%s -> val=%r (expected float)\n' % (
                                name, irow, val)
                            is_failed = True
                elif dtypei in ['int32', 'int64', 'int128']:
                    for irow, val in zip(count(), a[:, icol]):
                        try:
                            int(val)
                        except ValueError:
                            msg += 'for name=%r, row=%s -> val=%r (expected int)\n' % (
                                name, irow, val)
                            is_failed = True
                else:
                    raise NotImplementedError(dtype)
                if is_failed:
                    raise RuntimeError(msg)

        #print('A =', A)
    else:
        raise NotImplementedError('dtype_else=%s' % dtype)
        #return np.array(data, dtype=dtype)

    #if usecols is not None:
    #raise NotImplementedError('usecols=%s must be None' % str(usecols))
    #if unpack is not False:
    #raise NotImplementedError('unpack=%r must be False' % unpack)

    if not isinstance(dtype, dict):
        # Tweak the size and shape of the arrays - remove extraneous dimensions
        if X.ndim > ndmin:
            X = np.squeeze(X)
        # and ensure we have the minimum number of dimensions asked for
        # - has o be in this order for the odd case ndmin=1, X.squeeze().ndim=0
        if X.ndim < ndmin:
            if ndmin == 1:
                X = np.atleast_1d(X)
            elif ndmin == 2:
                X = np.atleast_2d(X).T

    if unpack:
        #print(X)
        if isinstance(dtype, dict) > 1:
            if ndmin == 0:
                # For structured arrays, return an array for each field.
                return (np.squeeze(X[name]) for name in dtype.names)
            else:
                msg = 'I think this can never happen...type(dtype)=dict; ndmin=%s' % ndmin
                raise RuntimeError(msg)
        else:
            #print('X = ', X)
            #raise NotImplementedError('unpack=%s dtypes=%s' % (unpack, dtype))
            #if ndmin == 0: # and A.shape[0] == 1
            #out = (np.squeeze(X[:, i]) for i in range(X.shape[1]))
            #return out
            #else:
            #return (X[:, i] for i in range(X.shape[1]))
            #return (X[:, i] for i in range(X.shape[1]))
            return X.T
    else:
        return X
Ejemplo n.º 19
0
def loadtxt_nice(filename, delimiter=None, skiprows=0, comments='#', dtype=np.float64,
                 converters=None, usecols=None, unpack=False,
                 ndmin=0,):
    """
    Reimplementation of numpy's loadtxt that doesn't complain about
    training commas (or other delimiter) that vary from  one line to
    the other.  It also provides better error messages when failing to
    load files.

    Parameters
    ----------
    filename : varies
        str : the filename to load
        file : the file object to load
        cStringIO/StringIO : a file-like object
    delimiter : str; default=None (any whitespace)
        the field splitter (e.g. comma or tab)
    skiprows : int; default=1
        the number of rows to skip
    comments : str, default='#'
        the comment line
    dtype : numpy.dtype; default=None (float)
        allows for alternate casting
        int32, float32, ...
        dtype = {
            names : ('A', 'B', 'C'),
            formats : ('int32', 'float32', 'float64'),
        }
    usecols : sequence; default=None
        Which columns to read, with 0 being the first.  For example,
        ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
        The default, None, results in all columns being read.
    unpack : bool, optional
        If True, the returned array is transposed, so that arguments may be
        unpacked using ``x, y, z = loadtxt(...)``.  When used with a structured
        data-type, arrays are returned for each field.  Default is False.

    converters : dict; default=None
        not supported
        crashes if not None
        A dictionary mapping column number to a function that will convert
        that column to a float.  E.g., if column 0 is a date string:
        ``converters = {0: datestr2num}``.  Converters can also be used to
        provide a default value for missing data (but see also `genfromtxt`):
        ``converters = {3: lambda s: float(s.strip() or 0)}``.  Default: None.

    ndmin : int, optional
        crashes if not 0
        The returned array will have at least `ndmin` dimensions.
        Otherwise mono-dimensional axes will be squeezed.
        Legal values: 0 (default), 1 or 2.

    Returns
    -------
    data : (nrows, ncols) ndarray
        the data object
    """
    complex_dtypes = [
        'complex64', 'complex128', np.complex128, np.complex64,
    ]
    if dtype in complex_dtypes:
        return np.loadtxt(
            filename, dtype=dtype, comments=comments, delimiter=delimiter,
            converters=converters,
            skiprows=skiprows, usecols=usecols,
            unpack=unpack, ndmin=ndmin,
            encoding='bytes')

    if converters is not None:
        raise NotImplementedError('converters=%r must be None' % converters)

    #if ndmin is not [0, 2]: ## TODO: remove 2
        #raise NotImplementedError('ndmin=%r must be 0' % ndmin)

    #if delimiter is None:
        #ending_characters = '\n\r \t'
    #else:
        #ending_characters = '\n\r \t' + delimiter

    data = []
    if isinstance(filename, StringIO):
        lines = filename.getvalue().split('\n')[skiprows:]
        filename = None
    elif isinstance(filename, str):
        with open(_filename(filename), 'r') as file_obj:
            if skiprows:
                lines = file_obj.readlines()[skiprows:]
            else:
                lines = file_obj.readlines()
    elif is_file_obj(filename):
        lines = filename.readlines()[skiprows:]
        filename = filename.name
    else:  # pragma: no cover
        raise TypeError('filename=%s is not a file-like object; type=%s' % (filename, type(filename)))

    if usecols:
        for usecol in usecols:
            assert isinstance(usecol, int), 'usecol=%s usecols=%s' % (usecol, usecols)
        assert len(np.unique(usecols)), 'usecols=%s must be unique' % (usecols)
        for line in lines:
            if line.startswith(comments):
                continue
            sline = line.strip(delimiter).split(delimiter)
            data.append([sline[i] for i in usecols])
    else:
        for line in lines:
            if line.startswith(comments):
                continue
            sline = line.strip(delimiter).split(delimiter)
            data.append(sline)
    del lines

    allowed_float_dtypes = [
        np.float64, np.float32, 'float32', 'float64', 'f4',
        np.int32, np.int64, 'int32', 'int64', 'i4',
        #'float128', np.float128,
        #'int128', np.int128,
    ]
    #if dtype not in allowed_float_dtypes:  # pragma: no cover
        #'dtype=%r allowed_float_dtypes=[%s]' % (
            #dtype, ', '.join(allowed_float_dtypes))
        #raise RuntimeError(msg)

    if dtype in allowed_float_dtypes:
        X = np.array(data, dtype=dtype)
    #elif dtype in complex_dtypes:
        #return np.loadtxt(fname, dtype=dtype, comments=comments, delimiter=delimiter,
                          #converters=converters,
                          #skiprows=skiprows, usecols=usecols,
                          #unpack=unpack, ndmin=ndmin,
                          #encoding='bytes')
        #if dtype not in allowed_complex_dtypes:  # pragma: no cover
            #'dtype=%r allowed_complex_dtypes=[%s]' % (
                #dtype, ', '.join(allowed_complex_dtypes))
            #raise RuntimeError(msg)
        #data2 = (d.strip('()'))
    elif isinstance(dtype, dict):
        X = _loadtxt_as_dict(data, dtype, allowed_float_dtypes)
        #print('A =', A)
    elif isinstance(dtype, list): # tuple
        assert len(data) == len(dtype)
        X = []
        for name_dtypei, datai in zip(dtype, data):
            unused_name, dtypei = name_dtypei
            xi = np.array(datai, dtype=dtypei)
            X.append(xi)
        unpack = False
        dtype = {name_dtypei[0] : name_dtypei[1] for name_dtypei in dtype}
        #X = np.vstack(X)#.astype(dtype)
        #X = X.astype(dtype)
        #print('X =', X)
        #dtype = np.dtype(dtype)
        #X = np.array(data, dtype=dtype)
    else:
        raise NotImplementedError('dtype_else=%s' % dtype)
        #return np.array(data, dtype=dtype)

    #if usecols is not None:
        #raise NotImplementedError('usecols=%s must be None' % str(usecols))
    #if unpack is not False:
        #raise NotImplementedError('unpack=%r must be False' % unpack)

    if not isinstance(dtype, dict):
        # Tweak the size and shape of the arrays - remove extraneous dimensions
        if X.ndim > ndmin:
            X = np.squeeze(X)
        # and ensure we have the minimum number of dimensions asked for
        # - has o be in this order for the odd case ndmin=1, X.squeeze().ndim=0
        if X.ndim < ndmin:
            if ndmin == 1:
                X = np.atleast_1d(X)
            elif ndmin == 2:
                X = np.atleast_2d(X).T

    if unpack:
        #print(X)
        if isinstance(dtype, dict) > 1:
            if ndmin == 0:
                # For structured arrays, return an array for each field.
                return (np.squeeze(X[name]) for name in dtype.names)
            else:
                msg = 'I think this can never happen...type(dtype)=dict; ndmin=%s' % ndmin
                raise RuntimeError(msg)
        else:
            #print('X = ', X)
            #raise NotImplementedError('unpack=%s dtypes=%s' % (unpack, dtype))
            #if ndmin == 0: # and A.shape[0] == 1
                #out = (np.squeeze(X[:, i]) for i in range(X.shape[1]))
                #return out
            #else:
                #return (X[:, i] for i in range(X.shape[1]))
            #return (X[:, i] for i in range(X.shape[1]))
            return X.T
    return X