Esempio n. 1
0
 def __new__(
     cls,
     shape,
     dtype=None,
     buf=None,
     offset=0,
     strides=None,
     formats=None,
     names=None,
     titles=None,
     byteorder=None,
     aligned=False,
     mask=nomask,
     hard_mask=False,
     fill_value=None,
     keep_mask=True,
     copy=False,
     **options
 ):
     #
     self = recarray.__new__(
         cls,
         shape,
         dtype=dtype,
         buf=buf,
         offset=offset,
         strides=strides,
         formats=formats,
         names=names,
         titles=titles,
         byteorder=byteorder,
         aligned=aligned,
     )
     #
     mdtype = ma.make_mask_descr(self.dtype)
     if mask is nomask or not np.size(mask):
         if not keep_mask:
             self._mask = tuple([False] * len(mdtype))
     else:
         mask = np.array(mask, copy=copy)
         if mask.shape != self.shape:
             (nd, nm) = (self.size, mask.size)
             if nm == 1:
                 mask = np.resize(mask, self.shape)
             elif nm == nd:
                 mask = np.reshape(mask, self.shape)
             else:
                 msg = "Mask and data not compatible: data size is %i, " + "mask size is %i."
                 raise MAError(msg % (nd, nm))
             copy = True
         if not keep_mask:
             self.__setmask__(mask)
             self._sharedmask = True
         else:
             if mask.dtype == mdtype:
                 _mask = mask
             else:
                 _mask = np.array([tuple([m] * len(mdtype)) for m in mask], dtype=mdtype)
             self._mask = _mask
     return self
Esempio n. 2
0
    def __new__(cls,
                shape,
                dtype=None,
                buf=None,
                offset=0,
                strides=None,
                formats=None,
                names=None,
                titles=None,
                byteorder=None,
                aligned=False,
                mask=nomask,
                hard_mask=False,
                fill_value=None,
                keep_mask=True,
                copy=False,
                **options):

        self = recarray.__new__(
            cls,
            shape,
            dtype=dtype,
            buf=buf,
            offset=offset,
            strides=strides,
            formats=formats,
            names=names,
            titles=titles,
            byteorder=byteorder,
            aligned=aligned,
        )

        mdtype = ma.make_mask_descr(self.dtype)
        if mask is nomask or not np.size(mask):
            if not keep_mask:
                self._mask = tuple([False] * len(mdtype))
        else:
            mask = np.array(mask, copy=copy)
            if mask.shape != self.shape:
                (nd, nm) = (self.size, mask.size)
                if nm == 1:
                    mask = np.resize(mask, self.shape)
                elif nm == nd:
                    mask = np.reshape(mask, self.shape)
                else:
                    msg = "Mask and data not compatible: data size is %i, " + \
                          "mask size is %i."
                    raise MAError(msg % (nd, nm))
                copy = True
            if not keep_mask:
                self.__setmask__(mask)
                self._sharedmask = True
            else:
                if mask.dtype == mdtype:
                    _mask = mask
                else:
                    _mask = np.array([tuple([m] * len(mdtype)) for m in mask],
                                     dtype=mdtype)
                self._mask = _mask
        return self
Esempio n. 3
0
 def __array_finalize__(self, obj):
     # Make sure we have a _fieldmask by default ..
     _mask = getattr(obj, "_mask", None)
     if _mask is None:
         objmask = getattr(obj, "_mask", nomask)
         _dtype = ndarray.__getattribute__(self, "dtype")
         if objmask is nomask:
             _mask = ma.make_mask_none(self.shape, dtype=_dtype)
         else:
             mdescr = ma.make_mask_descr(_dtype)
             _mask = narray([tuple([m] * len(mdescr)) for m in objmask], dtype=mdescr).view(recarray)
     # Update some of the attributes
     _dict = self.__dict__
     _dict.update(_mask=_mask, _fieldmask=_mask)
     self._update_from(obj)
     if _dict["_baseclass"] == ndarray:
         _dict["_baseclass"] = recarray
     return
Esempio n. 4
0
 def __array_finalize__(self, obj):
     # Make sure we have a _fieldmask by default
     _mask = getattr(obj, '_mask', None)
     if _mask is None:
         objmask = getattr(obj, '_mask', nomask)
         _dtype = ndarray.__getattribute__(self, 'dtype')
         if objmask is nomask:
             _mask = ma.make_mask_none(self.shape, dtype=_dtype)
         else:
             mdescr = ma.make_mask_descr(_dtype)
             _mask = narray([tuple([m] * len(mdescr)) for m in objmask],
                            dtype=mdescr).view(recarray)
     # Update some of the attributes
     _dict = self.__dict__
     _dict.update(_mask=_mask)
     self._update_from(obj)
     if _dict['_baseclass'] == ndarray:
         _dict['_baseclass'] = recarray
     return
Esempio n. 5
0
    def view(self, dtype=None, type=None):
        """
        Returns a view of the mrecarray.

        """
        # OK, basic copy-paste from MaskedArray.view.
        if dtype is None:
            if type is None:
                output = ndarray.view(self)
            else:
                output = ndarray.view(self, type)
        # Here again.
        elif type is None:
            try:
                if issubclass(dtype, ndarray):
                    output = ndarray.view(self, dtype)
                    dtype = None
                else:
                    output = ndarray.view(self, dtype)
            # OK, there's the change
            except TypeError:
                dtype = np.dtype(dtype)
                # we need to revert to MaskedArray, but keeping the possibility
                # of subclasses (eg, TimeSeriesRecords), so we'll force a type
                # set to the first parent
                if dtype.fields is None:
                    basetype = self.__class__.__bases__[0]
                    output = self.__array__().view(dtype, basetype)
                    output._update_from(self)
                else:
                    output = ndarray.view(self, dtype)
                output._fill_value = None
        else:
            output = ndarray.view(self, dtype, type)
        # Update the mask, just like in MaskedArray.view
        if (getattr(output, '_mask', nomask) is not nomask):
            mdtype = ma.make_mask_descr(output.dtype)
            output._mask = self._mask.view(mdtype, ndarray)
            output._mask.shape = output.shape
        return output
Esempio n. 6
0
    def view(self, dtype=None, type=None):
        """
        Returns a view of the mrecarray.

        """
        # OK, basic copy-paste from MaskedArray.view.
        if dtype is None:
            if type is None:
                output = ndarray.view(self)
            else:
                output = ndarray.view(self, type)
        # Here again.
        elif type is None:
            try:
                if issubclass(dtype, ndarray):
                    output = ndarray.view(self, dtype)
                    dtype = None
                else:
                    output = ndarray.view(self, dtype)
            # OK, there's the change
            except TypeError:
                dtype = np.dtype(dtype)
                # we need to revert to MaskedArray, but keeping the possibility
                # of subclasses (eg, TimeSeriesRecords), so we'll force a type
                # set to the first parent
                if dtype.fields is None:
                    basetype = self.__class__.__bases__[0]
                    output = self.__array__().view(dtype, basetype)
                    output._update_from(self)
                else:
                    output = ndarray.view(self, dtype)
                output._fill_value = None
        else:
            output = ndarray.view(self, dtype, type)
        # Update the mask, just like in MaskedArray.view
        if (getattr(output, '_mask', nomask) is not nomask):
            mdtype = ma.make_mask_descr(output.dtype)
            output._mask = self._mask.view(mdtype, ndarray)
            output._mask.shape = output.shape
        return output
Esempio n. 7
0
def vstack(arrays, join_type='inner', col_name_map=None):
    """
    Stack structured arrays vertically (by rows)

    A ``join_type`` of 'exact' (default) means that the arrays must all
    have exactly the same column names (though the order can vary).  If
    ``join_type`` is 'inner' then the intersection of common columns will
    be output.  A value of 'outer' means the output will have the union of
    all columns, with array values being masked where no common values are
    available.

    Parameters
    ----------

    arrays : list of structured arrays
        Structured array(s) to stack by rows (vertically)
    join_type : str
        Join type ('inner' | 'exact' | 'outer'), default is 'exact'
    col_name_map : empty dict or None
        If passed as a dict then it will be updated in-place with the
        mapping of output to input column names.

    Examples
    --------

    To stack two structured arrays by rows do::

      >>> from astropy.table import np_utils
      >>> t1 = np.array([(1, 2),
      ...                (3, 4)], dtype=[('a', 'i4'), ('b', 'i4')])
      >>> t2 = np.array([(5, 6),
      ...                (7, 8)], dtype=[('a', 'i4'), ('b', 'i4')])
      >>> np_utils.vstack([t1, t2])
      array([(1, 2),
             (3, 4),
             (5, 6),
             (7, 8)],
            dtype=[('a', '<i4'), ('b', '<i4')])
    """
    # Store user-provided col_name_map until the end
    _col_name_map = col_name_map

    # Input validation
    if join_type not in ('inner', 'exact', 'outer'):
        raise ValueError(
            "`join_type` arg must be one of 'inner', 'exact' or 'outer'")

    _check_for_sequence_of_structured_arrays(arrays)

    # Trivial case of one input array
    if len(arrays) == 1:
        return arrays[0]

    # Start by assuming an outer match where all names go to output
    names = set(chain(*[arr.dtype.names for arr in arrays]))
    _col_name_map = get_col_name_map(arrays, names)

    # If col_name_map supplied as a dict input, then update.
    if isinstance(col_name_map, dict):
        col_name_map.update(_col_name_map)

    # If require_match is True then the output must have exactly the same
    # number of columns as each input array
    if join_type == 'exact':
        for names in _col_name_map.values():
            if any(x is None for x in names):
                raise TableMergeError('Inconsistent columns in input arrays '
                                      "(use 'inner' or 'outer' join_type to "
                                      "allow non-matching columns)")
        join_type = 'outer'

    # For an inner join, keep only columns where all input arrays have that column
    if join_type == 'inner':
        col_name_map = OrderedDict((name, in_names)
                                   for name, in_names in col_name_map.items()
                                   if all(x is not None for x in in_names))
        if len(col_name_map) == 0:
            raise TableMergeError('Input arrays have no columns in common')

    # If there are any output columns where one or more input arrays are missing
    # then the output must be masked.  If any input arrays are masked then
    # output is masked.
    masked = any(isinstance(arr, ma.MaskedArray) for arr in arrays)
    for names in col_name_map.values():
        if any(x is None for x in names):
            masked = True
            break

    lens = [len(arr) for arr in arrays]
    n_rows = sum(lens)
    out_descrs = get_descrs(arrays, col_name_map)
    if masked:
        # Make a masked array with all values initially masked.  Note
        # that setting an array value automatically unmasks it.
        # See comment in hstack for heritage of this code.
        out = ma.masked_array(np.zeros(n_rows, out_descrs),
                              mask=np.ones(n_rows,
                                           ma.make_mask_descr(out_descrs)))
    else:
        out = np.empty(n_rows, dtype=out_descrs)

    for out_name, in_names in col_name_map.items():
        idx0 = 0
        for name, array in izip(in_names, arrays):
            idx1 = idx0 + len(array)
            if name in array.dtype.names:
                out[out_name][idx0:idx1] = array[name]
            idx0 = idx1

    # If col_name_map supplied as a dict input, then update.
    if isinstance(_col_name_map, collections.Mapping):
        _col_name_map.update(col_name_map)

    return out
Esempio n. 8
0
def genfromtxt(fname, dtype=float, comments='#', delimiter=None, skiprows=0,
               converters=None, missing='', missing_values=None, usecols=None,
               names=None, excludelist=None, deletechars=None,
               case_sensitive=True, unpack=None, usemask=False, loose=True):
    """
    Load data from a text file.

    Each line past the first `skiprows` ones is split at the `delimiter`
    character, and characters following the `comments` character are discarded.
    


    Parameters
    ----------
    fname : file or string
        File or filename to read.  If the filename extension is `.gz` or `.bz2`,
        the file is first decompressed.
    dtype : data-type
        Data type of the resulting array.  If this is a flexible data-type,
        the resulting array will be 1-dimensional, and each row will be
        interpreted as an element of the array. In this case, the number
        of columns used must match the number of fields in the data-type,
        and the names of each field will be set by the corresponding name
        of the dtype.
        If None, the dtypes will be determined by the contents of each
        column, individually.
    comments : {string}, optional
        The character used to indicate the start of a comment.
        All the characters occurring on a line after a comment are discarded
    delimiter : {string}, optional
        The string used to separate values.  By default, any consecutive
        whitespace act as delimiter.
    skiprows : {int}, optional
        Numbers of lines to skip at the beginning of the file.
    converters : {None, dictionary}, optional
        A dictionary mapping column number to a function that will convert
        values in the column to a number. Converters can also be used to
        provide a default value for missing data:
        ``converters = {3: lambda s: float(s or 0)}``.
    missing : {string}, optional
        A string representing a missing value, irrespective of the column where
        it appears (e.g., `'missing'` or `'unused'`).
    missing_values : {None, dictionary}, optional
        A dictionary mapping a column number to a string indicating whether the
        corresponding field should be masked.
    usecols : {None, sequence}, optional
        Which columns to read, with 0 being the first.  For example,
        ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
    names : {None, True, string, sequence}, optional
        If `names` is True, the field names are read from the first valid line
        after the first `skiprows` lines.
        If `names` is a sequence or a single-string of comma-separated names,
        the names will be used to define the field names in a flexible dtype.
        If `names` is None, the names of the dtype fields will be used, if any.
    excludelist : {sequence}, optional
        A list of names to exclude. This list is appended to the default list
        ['return','file','print']. Excluded names are appended an underscore:
        for example, `file` would become `file_`.
    deletechars : {string}, optional
        A string combining invalid characters that must be deleted from the names.
    case_sensitive : {True, False, 'upper', 'lower'}, optional
        If True, field names are case_sensitive.
        If False or 'upper', field names are converted to upper case.
        If 'lower', field names are converted to lower case.
    unpack : {bool}, optional
        If True, the returned array is transposed, so that arguments may be
        unpacked using ``x, y, z = loadtxt(...)``
    usemask : {bool}, optional
        If True, returns a masked array.
        If False, return a regular standard array.

    Returns
    -------
    out : MaskedArray
        Data read from the text file.

    Notes
    --------
    * When spaces are used as delimiters, or when no delimiter has been given
      as input, there should not be any missing data between two fields.
    * When the variable are named (either by a flexible dtype or with `names`,
      there must not be any header in the file (else a :exc:ValueError exception
      is raised).

    Warnings
    --------
    * Individual values are not stripped of spaces by default.
      When using a custom converter, make sure the function does remove spaces.

    See Also
    --------
    numpy.loadtxt : equivalent function when no data is missing.

    """
    #
    if usemask:
        from numpy.ma import MaskedArray, make_mask_descr
    # Check the input dictionary of converters
    user_converters = converters or {}
    if not isinstance(user_converters, dict):
        errmsg = "The input argument 'converter' should be a valid dictionary "\
                 "(got '%s' instead)"
        raise TypeError(errmsg % type(user_converters))
    # Check the input dictionary of missing values
    user_missing_values = missing_values or {}
    if not isinstance(user_missing_values, dict):
        errmsg = "The input argument 'missing_values' should be a valid "\
                 "dictionary (got '%s' instead)"
        raise TypeError(errmsg % type(missing_values))
    defmissing = [_.strip() for _ in missing.split(',')] + ['']

    # Initialize the filehandle, the LineSplitter and the NameValidator
#    fhd = _to_filehandle(fname)
    if isinstance(fname, basestring):
        fhd = np.lib._datasource.open(fname)
    elif not hasattr(fname, 'read'):
        raise TypeError("The input should be a string or a filehandle. "\
                        "(got %s instead)" % type(fname))
    else:
        fhd = fname
    split_line = LineSplitter(delimiter=delimiter, comments=comments, 
                              autostrip=False)._handyman
    validate_names = NameValidator(excludelist=excludelist,
                                   deletechars=deletechars,
                                   case_sensitive=case_sensitive)

    # Get the first valid lines after the first skiprows ones
    for i in xrange(skiprows):
        fhd.readline()
    first_values = None
    while not first_values:
        first_line = fhd.readline()
        if first_line == '':
            raise IOError('End-of-file reached before encountering data.')
        if names is True:
            first_values = first_line.strip().split(delimiter)
        else:
            first_values = split_line(first_line)
    if names is True:
        fval = first_values[0].strip()
        if fval in comments:
            del first_values[0]

    # Check the columns to use
    if usecols is not None:
        usecols = list(usecols)
    nbcols = len(usecols or first_values)

    # Check the names and overwrite the dtype.names if needed
    if dtype is not None:
        dtype = np.dtype(dtype)
    dtypenames = getattr(dtype, 'names', None)
    if names is True:
        names = validate_names([_.strip() for _ in first_values])
        first_line =''
    elif _is_string_like(names):
        names = validate_names([_.strip() for _ in names.split(',')])
    elif names:
        names = validate_names(names)
    elif dtypenames:
        dtype.names = validate_names(dtypenames)
    if names and dtypenames:
        dtype.names = names

    # If usecols is a list of names, convert to a list of indices
    if usecols:
        for (i, current) in enumerate(usecols):
            if _is_string_like(current):
                usecols[i] = names.index(current)

    # If user_missing_values has names as keys, transform them to indices
    missing_values = {}
    for (key, val) in user_missing_values.iteritems():
        # If val is a list, flatten it. In any case, add missing &'' to the list
        if isinstance(val, (list, tuple)):
            val = [str(_) for _ in val]
        else:
            val = [str(val),]
        val.extend(defmissing)
        if _is_string_like(key):
            try:
                missing_values[names.index(key)] = val
            except ValueError:
                pass
        else:
            missing_values[key] = val


    # Initialize the default converters
    if dtype is None:
        # Note: we can't use a [...]*nbcols, as we would have 3 times the same
        # ... converter, instead of 3 different converters.
        converters = [StringConverter(None,
                              missing_values=missing_values.get(_, defmissing))
                      for _ in range(nbcols)]
    else:
        flatdtypes = flatten_dtype(dtype)
        # Initialize the converters
        if len(flatdtypes) > 1:
            # Flexible type : get a converter from each dtype
            converters = [StringConverter(dt,
                              missing_values=missing_values.get(i, defmissing),
                              locked=True)
                          for (i, dt) in enumerate(flatdtypes)]
        else:
            # Set to a default converter (but w/ different missing values)
            converters = [StringConverter(dtype,
                              missing_values=missing_values.get(_, defmissing),
                              locked=True)
                          for _ in range(nbcols)]
    missing_values = [_.missing_values for _ in converters]

    # Update the converters to use the user-defined ones
    uc_update = []
    for (i, conv) in user_converters.iteritems():
        # If the converter is specified by column names, use the index instead
        if _is_string_like(i):
            i = names.index(i)
        if usecols:
            try:
                i = usecols.index(i)
            except ValueError:
                # Unused converter specified
                continue
        converters[i].update(conv, default=None, 
                             missing_values=missing_values[i],
                             locked=True)
        uc_update.append((i, conv))
    # Make sure we have the corrected keys in user_converters...
    user_converters.update(uc_update)

    # Reset the names to match the usecols
    if (not first_line) and usecols:
        names = [names[_] for _ in usecols]

    rows = []
    append_to_rows = rows.append
    if usemask:
        masks = []
        append_to_masks = masks.append
    # Parse each line
    for line in itertools.chain([first_line,], fhd):
        values = split_line(line)
        # Skip an empty line
        if len(values) == 0:
            continue
        # Select only the columns we need
        if usecols:
            values = [values[_] for _ in usecols]
        # Check whether we need to update the converter
        if dtype is None:
            for (converter, item) in zip(converters, values):
                converter.upgrade(item)
        # Store the values
        append_to_rows(tuple(values))
        if usemask:
            append_to_masks(tuple([val.strip() in mss 
                                   for (val, mss) in zip(values,
                                                         missing_values)]))

    # Convert each value according to the converter:
    # We want to modify the list in place to avoid creating a new one...
    if loose:
        conversionfuncs = [conv._loose_call for conv in converters]
    else:
        conversionfuncs = [conv._strict_call for conv in converters]
    for (i, vals) in enumerate(rows):
        rows[i] = tuple([convert(val)
                         for (convert, val) in zip(conversionfuncs, vals)])

    # Reset the dtype
    data = rows
    if dtype is None:
        # Get the dtypes from the types of the converters
        coldtypes = [conv.type for conv in converters]
        # Find the columns with strings...
        strcolidx = [i for (i, v) in enumerate(coldtypes)
                     if v in (type('S'), np.string_)]
        # ... and take the largest number of chars.
        for i in strcolidx:
            coldtypes[i] = "|S%i" % max(len(row[i]) for row in data)
        #
        if names is None:
            # If the dtype is uniform, don't define names, else use ''
            base = set([c.type for c in converters if c._checked])
            
            if len(base) == 1:
                (ddtype, mdtype) = (list(base)[0], np.bool)
            else:
                ddtype = [('', dt) for dt in coldtypes]
                mdtype = [('', np.bool) for dt in coldtypes]
        else:
            ddtype = zip(names, coldtypes)
            mdtype = zip(names, [np.bool] * len(coldtypes))
        output = np.array(data, dtype=ddtype)
        if usemask:
            outputmask = np.array(masks, dtype=mdtype)
    else:
        # Overwrite the initial dtype names if needed
        if names and dtype.names:
            dtype.names = names
        flatdtypes = flatten_dtype(dtype)
        # Case 1. We have a structured type
        if len(flatdtypes) > 1:
            # Nested dtype, eg  [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
            # First, create the array using a flattened dtype:
            # [('a', int), ('b1', int), ('b2', float)]
            # Then, view the array using the specified dtype.
            if has_nested_fields(dtype):
                if 'O' in (_.char for _ in flatdtypes):
                    errmsg = "Nested fields involving objects "\
                             "are not supported..."
                    raise NotImplementedError(errmsg)
                rows = np.array(data, dtype=[('', t) for t in flatdtypes])
                output = rows.view(dtype)
            else:
                output = np.array(data, dtype=dtype)
            # Now, process the rowmasks the same way
            if usemask:
                rowmasks = np.array(masks,
                                    dtype=np.dtype([('', np.bool)
                                                    for t in flatdtypes]))
                # Construct the new dtype
                mdtype = make_mask_descr(dtype)
                outputmask = rowmasks.view(mdtype)
        # Case #2. We have a basic dtype
        else:
            # We used some user-defined converters
            if user_converters:
                ishomogeneous = True
                descr = []
                for (i, ttype) in enumerate([conv.type for conv in converters]):
                    # Keep the dtype of the current converter
                    if i in user_converters:
                        ishomogeneous &= (ttype == dtype.type)
                        if ttype == np.string_:
                            ttype = "|S%i" % max(len(row[i]) for row in data)
                        descr.append(('', ttype))
                    else:
                        descr.append(('', dtype))
                # So we changed the dtype ?
                if not ishomogeneous:
                    # We have more than one field
                    if len(descr) > 1:
                        dtype = np.dtype(descr)
                    # We have only one field: drop the name if not needed.
                    else:
                        dtype = np.dtype(ttype)
            #
            output = np.array(data, dtype)
            if usemask:
                if dtype.names:
                    mdtype = [(_, np.bool) for _ in dtype.names]
                else:
                    mdtype = np.bool
                outputmask = np.array(masks, dtype=mdtype)
    # Try to take care of the missing data we missed
    if usemask and output.dtype.names:
        for (name, conv) in zip(names or (), converters):
            missing_values = [conv(_) for _ in conv.missing_values if _ != '']
            for mval in missing_values:
                outputmask[name] |= (output[name] == mval)
    # Construct the final array
    if usemask:
        output = output.view(MaskedArray)
        output._mask = outputmask
    if unpack:
        return output.squeeze().T
    return output.squeeze()
Esempio n. 9
0
def hstack(arrays, join_type='exact', uniq_col_name='{col_name}_{table_name}',
           table_names=None, col_name_map=None):
    """
    Stack structured arrays by horizontally (by columns)

    A ``join_type`` of 'exact' (default) means that the arrays must all
    have exactly the same number of rows.  If ``join_type`` is 'inner' then
    the intersection of rows will be output.  A value of 'outer' means
    the output will have the union of all rows, with array values being
    masked where no common values are available.

    Parameters
    ----------

    arrays : List of structured array objects
        Structured arrays to stack by columns (horizontally)
    join_type : str
        Join type ('inner' | 'exact' | 'outer'), default is 'exact'
    uniq_col_name : str or None
        String generate a unique output column name in case of a conflict.
        The default is '{col_name}_{table_name}'.
    table_names : list of str or None
        Two-element list of table names used when generating unique output
        column names.  The default is ['1', '2', ..].

    Examples
    --------

    To stack two arrays horizontally (by columns) do::

      >>> from astropy.table import np_utils
      >>> t1 = np.array([(1, 2),
      ...                (3, 4)], dtype=[(str('a'), 'i4'), (str('b'), 'i4')])
      >>> t2 = np.array([(5, 6),
      ...                (7, 8)], dtype=[(str('c'), 'i4'), (str('d'), 'i4')])
      >>> np_utils.hstack([t1, t2])
      array([(1, 2, 5, 6),
             (3, 4, 7, 8)],
            dtype=[('a', '<i4'), ('b', '<i4'), ('c', '<i4'), ('d', '<i4')])
    """
    # Store user-provided col_name_map until the end
    _col_name_map = col_name_map

    # Input validation
    if join_type not in ('inner', 'exact', 'outer'):
        raise ValueError("join_type arg must be either 'inner', 'exact' or 'outer'")
    _check_for_sequence_of_structured_arrays(arrays)

    if table_names is None:
        table_names = ['{0}'.format(ii + 1) for ii in range(len(arrays))]
    if len(arrays) != len(table_names):
        raise ValueError('Number of arrays must match number of table_names')

    # Trivial case of one input arrays
    if len(arrays) == 1:
        return arrays[0]

    col_name_map = get_col_name_map(arrays, [], uniq_col_name, table_names)

    # If require_match is True then all input arrays must have the same length
    arr_lens = [len(arr) for arr in arrays]
    if join_type == 'exact':
        if len(set(arr_lens)) > 1:
            raise TableMergeError("Inconsistent number of rows in input arrays "
                                  "(use 'inner' or 'outer' join_type to allow "
                                  "non-matching rows)")
        join_type = 'outer'

    # For an inner join, keep only columns where all input arrays have that column
    if join_type == 'inner':
        min_arr_len = min(arr_lens)
        arrays = [arr[:min_arr_len] for arr in arrays]
        arr_lens = [min_arr_len for arr in arrays]

    # If there are any output rows where one or more input arrays are missing
    # then the output must be masked.  If any input arrays are masked then
    # output is masked.
    masked = (any(isinstance(arr, ma.MaskedArray) for arr in arrays) or
              len(set(arr_lens)) > 1)

    n_rows = max(arr_lens)
    out_descrs = get_descrs(arrays, col_name_map)
    if masked:
        # Adapted from ma.all_masked() code.  Here the array is filled with
        # zeros instead of empty.  This avoids the bug reported here:
        # https://github.com/numpy/numpy/issues/3276
        out = ma.masked_array(np.zeros(n_rows, out_descrs),
                              mask=np.ones(n_rows, ma.make_mask_descr(out_descrs)))
    else:
        out = np.empty(n_rows, dtype=out_descrs)

    for out_name, in_names in six.iteritems(col_name_map):
        for name, array, arr_len in zip(in_names, arrays, arr_lens):
            if name is not None:
                out[out_name][:arr_len] = array[name]

    # If col_name_map supplied as a dict input, then update.
    if isinstance(_col_name_map, collections.Mapping):
        _col_name_map.update(col_name_map)

    return out
Esempio n. 10
0
def vstack(arrays, join_type='inner', col_name_map=None):
    """
    Stack structured arrays vertically (by rows)

    A ``join_type`` of 'exact' (default) means that the arrays must all
    have exactly the same column names (though the order can vary).  If
    ``join_type`` is 'inner' then the intersection of common columns will
    be output.  A value of 'outer' means the output will have the union of
    all columns, with array values being masked where no common values are
    available.

    Parameters
    ----------

    arrays : list of structured arrays
        Structured array(s) to stack by rows (vertically)
    join_type : str
        Join type ('inner' | 'exact' | 'outer'), default is 'exact'
    col_name_map : empty dict or None
        If passed as a dict then it will be updated in-place with the
        mapping of output to input column names.

    Examples
    --------

    To stack two structured arrays by rows do::

      >>> from astropy.table import np_utils
      >>> t1 = np.array([(1, 2),
      ...                (3, 4)], dtype=[(str('a'), 'i4'), (str('b'), 'i4')])
      >>> t2 = np.array([(5, 6),
      ...                (7, 8)], dtype=[(str('a'), 'i4'), (str('b'), 'i4')])
      >>> np_utils.vstack([t1, t2])
      array([(1, 2),
             (3, 4),
             (5, 6),
             (7, 8)],
            dtype=[('a', '<i4'), ('b', '<i4')])
    """
    # Store user-provided col_name_map until the end
    _col_name_map = col_name_map

    # Input validation
    if join_type not in ('inner', 'exact', 'outer'):
        raise ValueError("`join_type` arg must be one of 'inner', 'exact' or 'outer'")

    _check_for_sequence_of_structured_arrays(arrays)

    # Trivial case of one input array
    if len(arrays) == 1:
        return arrays[0]

    # Start by assuming an outer match where all names go to output
    names = set(chain(*[arr.dtype.names for arr in arrays]))
    col_name_map = get_col_name_map(arrays, names)

    # If require_match is True then the output must have exactly the same
    # number of columns as each input array
    if join_type == 'exact':
        for names in six.itervalues(col_name_map):
            if any(x is None for x in names):
                raise TableMergeError('Inconsistent columns in input arrays '
                                      "(use 'inner' or 'outer' join_type to "
                                      "allow non-matching columns)")
        join_type = 'outer'

    # For an inner join, keep only columns where all input arrays have that column
    if join_type == 'inner':
        col_name_map = OrderedDict((name, in_names) for name, in_names in six.iteritems(col_name_map)
                                   if all(x is not None for x in in_names))
        if len(col_name_map) == 0:
            raise TableMergeError('Input arrays have no columns in common')

    # If there are any output columns where one or more input arrays are missing
    # then the output must be masked.  If any input arrays are masked then
    # output is masked.
    masked = any(isinstance(arr, ma.MaskedArray) for arr in arrays)
    for names in six.itervalues(col_name_map):
        if any(x is None for x in names):
            masked = True
            break

    lens = [len(arr) for arr in arrays]
    n_rows = sum(lens)
    out_descrs = get_descrs(arrays, col_name_map)
    if masked:
        # Make a masked array with all values initially masked.  Note
        # that setting an array value automatically unmasks it.
        # See comment in hstack for heritage of this code.
        out = ma.masked_array(np.zeros(n_rows, out_descrs),
                              mask=np.ones(n_rows, ma.make_mask_descr(out_descrs)))
    else:
        out = np.empty(n_rows, dtype=out_descrs)

    for out_name, in_names in six.iteritems(col_name_map):
        idx0 = 0
        for name, array in zip(in_names, arrays):
            idx1 = idx0 + len(array)
            if name in array.dtype.names:
                out[out_name][idx0:idx1] = array[name]
            idx0 = idx1

    # If col_name_map supplied as a dict input, then update.
    if isinstance(_col_name_map, collections.Mapping):
        _col_name_map.update(col_name_map)

    return out
Esempio n. 11
0
def genfromtxt(fname,
               dtype=float,
               comments='#',
               delimiter=None,
               skip_header=0,
               skip_footer=0,
               converters=None,
               missing_values=None,
               filling_values=None,
               usecols=None,
               names=None,
               excludelist=None,
               deletechars=None,
               replace_space='_',
               autostrip=False,
               case_sensitive=True,
               defaultfmt="f%i",
               unpack=None,
               ndmin=0,
               usemask=False,
               loose=True,
               invalid_raise=True,
               max_rows=None):
    """
    Load data from a text file, with missing values handled as specified.

    Each line past the first `skip_header` lines is split at the `delimiter`
    character, and characters following the `comments` character are discarded.

    Parameters
    ----------
    fname : file, str, pathlib.Path, list of str, generator
        File, filename, list, or generator to read.  If the filename
        extension is `.gz` or `.bz2`, the file is first decompressed. Note
        that generators must return byte strings in Python 3k.  The strings
        in a list or produced by a generator are treated as lines.
    dtype : dtype, optional
        Data type of the resulting array.
        If None, the dtypes will be determined by the contents of each
        column, individually.
    comments : str, optional
        The character used to indicate the start of a comment.
        All the characters occurring on a line after a comment are discarded
    delimiter : str, int, or sequence, optional
        The string used to separate values.  By default, any consecutive
        whitespaces act as delimiter.  An integer or sequence of integers
        can also be provided as width(s) of each field.
    skiprows : int, optional
        `skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
    skip_header : int, optional
        The number of lines to skip at the beginning of the file.
    skip_footer : int, optional
        The number of lines to skip at the end of the file.
    converters : variable, optional
        The set of functions that convert the data of a column to a value.
        The converters can also be used to provide a default value
        for missing data: ``converters = {3: lambda s: float(s or 0)}``.
    missing : variable, optional
        `missing` was removed in numpy 1.10. Please use `missing_values`
        instead.
    missing_values : variable, optional
        The set of strings corresponding to missing data.
    filling_values : variable, optional
        The set of values to be used as default when the data are missing.
    usecols : sequence, optional
        Which columns to read, with 0 being the first.  For example,
        ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
    names : {None, True, str, sequence}, optional
        If `names` is True, the field names are read from the first valid line
        after the first `skip_header` lines.
        If `names` is a sequence or a single-string of comma-separated names,
        the names will be used to define the field names in a structured dtype.
        If `names` is None, the names of the dtype fields will be used, if any.
    excludelist : sequence, optional
        A list of names to exclude. This list is appended to the default list
        ['return','file','print']. Excluded names are appended an underscore:
        for example, `file` would become `file_`.
    deletechars : str, optional
        A string combining invalid characters that must be deleted from the
        names.
    defaultfmt : str, optional
        A format used to define default field names, such as "f%i" or "f_%02i".
    autostrip : bool, optional
        Whether to automatically strip white spaces from the variables.
    replace_space : char, optional
        Character(s) used in replacement of white spaces in the variables
        names. By default, use a '_'.
    case_sensitive : {True, False, 'upper', 'lower'}, optional
        If True, field names are case sensitive.
        If False or 'upper', field names are converted to upper case.
        If 'lower', field names are converted to lower case.
    unpack : bool, optional
        If True, the returned array is transposed, so that arguments may be
        unpacked using ``x, y, z = loadtxt(...)``
    usemask : bool, optional
        If True, return a masked array.
        If False, return a regular array.
    loose : bool, optional
        If True, do not raise errors for invalid values.
    invalid_raise : bool, optional
        If True, an exception is raised if an inconsistency is detected in the
        number of columns.
        If False, a warning is emitted and the offending lines are skipped.
    max_rows : int,  optional
        The maximum number of rows to read. Must not be used with skip_footer
        at the same time.  If given, the value must be at least 1. Default is
        to read the entire file.

        .. versionadded:: 1.10.0

    Returns
    -------
    out : ndarray
        Data read from the text file. If `usemask` is True, this is a
        masked array.

    See Also
    --------
    numpy.loadtxt : equivalent function when no data is missing.

    Notes
    -----
    * When spaces are used as delimiters, or when no delimiter has been given
      as input, there should not be any missing data between two fields.
    * When the variables are named (either by a flexible dtype or with `names`,
      there must not be any header in the file (else a ValueError
      exception is raised).
    * Individual values are not stripped of spaces by default.
      When using a custom converter, make sure the function does remove spaces.

    References
    ----------
    .. [1] NumPy User Guide, section `I/O with NumPy
           <http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.

    Examples
    ---------
    >>> from io import StringIO
    >>> import numpy as np

    Comma delimited file with mixed dtype

    >>> s = StringIO("1,1.3,abcde")
    >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
    ... ('mystring','S5')], delimiter=",")
    >>> data
    array((1, 1.3, 'abcde'),
          dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])

    Using dtype = None

    >>> s.seek(0) # needed for StringIO example only
    >>> data = np.genfromtxt(s, dtype=None,
    ... names = ['myint','myfloat','mystring'], delimiter=",")
    >>> data
    array((1, 1.3, 'abcde'),
          dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])

    Specifying dtype and names

    >>> s.seek(0)
    >>> data = np.genfromtxt(s, dtype="i8,f8,S5",
    ... names=['myint','myfloat','mystring'], delimiter=",")
    >>> data
    array((1, 1.3, 'abcde'),
          dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])

    An example with fixed-width columns

    >>> s = StringIO("11.3abcde")
    >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
    ...     delimiter=[1,3,5])
    >>> data
    array((1, 1.3, 'abcde'),
          dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])

    """
    if max_rows is not None:
        if skip_footer:
            raise ValueError(
                "The keywords 'skip_footer' and 'max_rows' can not be "
                "specified at the same time.")
        if max_rows < 1:
            raise ValueError("'max_rows' must be at least 1.")

    # Py3 data conversions to bytes, for convenience
    if comments is not None:
        comments = asbytes(comments)
    if isinstance(delimiter, unicode):
        delimiter = asbytes(delimiter)
    if isinstance(missing_values, (unicode, list, tuple)):
        missing_values = asbytes_nested(missing_values)

    #
    if usemask:
        from numpy.ma import MaskedArray, make_mask_descr
    # Check the input dictionary of converters
    user_converters = converters or {}
    if not isinstance(user_converters, dict):
        raise TypeError(
            "The input argument 'converter' should be a valid dictionary "
            "(got '%s' instead)" % type(user_converters))

    # Initialize the filehandle, the LineSplitter and the NameValidator
    own_fhd = False
    try:
        if is_pathlib_path(fname):
            fname = str(fname)
        if isinstance(fname, basestring):
            if sys.version_info[0] == 2:
                fhd = iter(np.lib._datasource.open(fname, 'rbU'))
            else:
                fhd = iter(np.lib._datasource.open(fname, 'rb'))
            own_fhd = True
        else:
            fhd = iter(fname)
    except TypeError:
        raise TypeError("fname must be a string, filehandle, list of strings, "
                        "or generator. Got %s instead." % type(fname))

    split_line = LineSplitter(delimiter=delimiter,
                              comments=comments,
                              autostrip=autostrip)._handyman
    validate_names = NameValidator(excludelist=excludelist,
                                   deletechars=deletechars,
                                   case_sensitive=case_sensitive,
                                   replace_space=replace_space)

    # Skip the first `skip_header` rows
    for i in range(skip_header):
        next(fhd)

    # Keep on until we find the first valid values
    first_values = None
    try:
        while not first_values:
            first_line = next(fhd)
            if names is True:
                if comments in first_line:
                    first_line = (b''.join(first_line.split(comments)[1:]))
            first_values = split_line(first_line)
    except StopIteration:
        # return an empty array if the datafile is empty
        first_line = b''
        first_values = []
        warnings.warn('genfromtxt: Empty input file: "%s"' % fname,
                      stacklevel=2)

    # Should we take the first values as names ?
    if names is True:
        fval = first_values[0].strip()
        if fval in comments:
            del first_values[0]

    # Check the columns to use: make sure `usecols` is a list
    if usecols is not None:
        try:
            usecols = [_.strip() for _ in usecols.split(",")]
        except AttributeError:
            try:
                usecols = list(usecols)
            except TypeError:
                usecols = [
                    usecols,
                ]
    nbcols = len(usecols or first_values)

    # Check the names and overwrite the dtype.names if needed
    if names is True:
        names = validate_names(
            [_bytes_to_name(_.strip()) for _ in first_values])
        first_line = b''
    elif _is_string_like(names):
        names = validate_names([_.strip() for _ in names.split(',')])
    elif names:
        names = validate_names(names)
    # Get the dtype
    if dtype is not None:
        dtype = easy_dtype(dtype,
                           defaultfmt=defaultfmt,
                           names=names,
                           excludelist=excludelist,
                           deletechars=deletechars,
                           case_sensitive=case_sensitive,
                           replace_space=replace_space)
    # Make sure the names is a list (for 2.5)
    if names is not None:
        names = list(names)

    if usecols:
        for (i, current) in enumerate(usecols):
            # if usecols is a list of names, convert to a list of indices
            if _is_string_like(current):
                usecols[i] = names.index(current)
            elif current < 0:
                usecols[i] = current + len(first_values)
        # If the dtype is not None, make sure we update it
        if (dtype is not None) and (len(dtype) > nbcols):
            descr = dtype.descr
            dtype = np.dtype([descr[_] for _ in usecols])
            names = list(dtype.names)
        # If `names` is not None, update the names
        elif (names is not None) and (len(names) > nbcols):
            names = [names[_] for _ in usecols]
    elif (names is not None) and (dtype is not None):
        names = list(dtype.names)

    # Process the missing values ...............................
    # Rename missing_values for convenience
    user_missing_values = missing_values or ()

    # Define the list of missing_values (one column: one list)
    missing_values = [list([b'']) for _ in range(nbcols)]

    # We have a dictionary: process it field by field
    if isinstance(user_missing_values, dict):
        # Loop on the items
        for (key, val) in user_missing_values.items():
            # Is the key a string ?
            if _is_string_like(key):
                try:
                    # Transform it into an integer
                    key = names.index(key)
                except ValueError:
                    # We couldn't find it: the name must have been dropped
                    continue
            # Redefine the key as needed if it's a column number
            if usecols:
                try:
                    key = usecols.index(key)
                except ValueError:
                    pass
            # Transform the value as a list of string
            if isinstance(val, (list, tuple)):
                val = [str(_) for _ in val]
            else:
                val = [
                    str(val),
                ]
            # Add the value(s) to the current list of missing
            if key is None:
                # None acts as default
                for miss in missing_values:
                    miss.extend(val)
            else:
                missing_values[key].extend(val)
    # We have a sequence : each item matches a column
    elif isinstance(user_missing_values, (list, tuple)):
        for (value, entry) in zip(user_missing_values, missing_values):
            value = str(value)
            if value not in entry:
                entry.append(value)
    # We have a string : apply it to all entries
    elif isinstance(user_missing_values, bytes):
        user_value = user_missing_values.split(b",")
        for entry in missing_values:
            entry.extend(user_value)
    # We have something else: apply it to all entries
    else:
        for entry in missing_values:
            entry.extend([str(user_missing_values)])

    # Process the filling_values ...............................
    # Rename the input for convenience
    user_filling_values = filling_values
    if user_filling_values is None:
        user_filling_values = []
    # Define the default
    filling_values = [None] * nbcols
    # We have a dictionary : update each entry individually
    if isinstance(user_filling_values, dict):
        for (key, val) in user_filling_values.items():
            if _is_string_like(key):
                try:
                    # Transform it into an integer
                    key = names.index(key)
                except ValueError:
                    # We couldn't find it: the name must have been dropped,
                    continue
            # Redefine the key if it's a column number and usecols is defined
            if usecols:
                try:
                    key = usecols.index(key)
                except ValueError:
                    pass
            # Add the value to the list
            filling_values[key] = val
    # We have a sequence : update on a one-to-one basis
    elif isinstance(user_filling_values, (list, tuple)):
        n = len(user_filling_values)
        if (n <= nbcols):
            filling_values[:n] = user_filling_values
        else:
            filling_values = user_filling_values[:nbcols]
    # We have something else : use it for all entries
    else:
        filling_values = [user_filling_values] * nbcols

    # Initialize the converters ................................
    if dtype is None:
        # Note: we can't use a [...]*nbcols, as we would have 3 times the same
        # ... converter, instead of 3 different converters.
        converters = [
            StringConverter(None, missing_values=miss, default=fill)
            for (miss, fill) in zip(missing_values, filling_values)
        ]
    else:
        dtype_flat = flatten_dtype(dtype, flatten_base=True)
        # Initialize the converters
        if len(dtype_flat) > 1:
            # Flexible type : get a converter from each dtype
            zipit = zip(dtype_flat, missing_values, filling_values)
            converters = [
                StringConverter(dt,
                                locked=True,
                                missing_values=miss,
                                default=fill) for (dt, miss, fill) in zipit
            ]
        else:
            # Set to a default converter (but w/ different missing values)
            zipit = zip(missing_values, filling_values)
            converters = [
                StringConverter(dtype,
                                locked=True,
                                missing_values=miss,
                                default=fill) for (miss, fill) in zipit
            ]
    # Update the converters to use the user-defined ones
    uc_update = []
    for (j, conv) in user_converters.items():
        # If the converter is specified by column names, use the index instead
        if _is_string_like(j):
            try:
                j = names.index(j)
                i = j
            except ValueError:
                continue
        elif usecols:
            try:
                i = usecols.index(j)
            except ValueError:
                # Unused converter specified
                continue
        else:
            i = j
        # Find the value to test - first_line is not filtered by usecols:
        if len(first_line):
            testing_value = first_values[j]
        else:
            testing_value = None
        converters[i].update(
            conv,
            locked=True,
            testing_value=testing_value,
            default=filling_values[i],
            missing_values=missing_values[i],
        )
        uc_update.append((i, conv))
    # Make sure we have the corrected keys in user_converters...
    user_converters.update(uc_update)

    # Fixme: possible error as following variable never used.
    #miss_chars = [_.missing_values for _ in converters]

    # Initialize the output lists ...
    # ... rows
    rows = []
    append_to_rows = rows.append
    # ... masks
    if usemask:
        masks = []
        append_to_masks = masks.append
    # ... invalid
    invalid = []
    append_to_invalid = invalid.append

    # Parse each line
    for (i, line) in enumerate(itertools.chain([
            first_line,
    ], fhd)):
        values = split_line(line)
        nbvalues = len(values)
        # Skip an empty line
        if nbvalues == 0:
            continue
        if usecols:
            # Select only the columns we need
            try:
                values = [values[_] for _ in usecols]
            except IndexError:
                append_to_invalid((i + skip_header + 1, nbvalues))
                continue
        elif nbvalues != nbcols:
            append_to_invalid((i + skip_header + 1, nbvalues))
            continue
        # Store the values
        append_to_rows(tuple(values))
        if usemask:
            append_to_masks(
                tuple([
                    v.strip() in m for (v, m) in zip(values, missing_values)
                ]))
        if len(rows) == max_rows:
            break

    if own_fhd:
        fhd.close()

    # Upgrade the converters (if needed)
    if dtype is None:
        for (i, converter) in enumerate(converters):
            current_column = [itemgetter(i)(_m) for _m in rows]
            try:
                converter.iterupgrade(current_column)
            except ConverterLockError:
                errmsg = "Converter #%i is locked and cannot be upgraded: " % i
                current_column = map(itemgetter(i), rows)
                for (j, value) in enumerate(current_column):
                    try:
                        converter.upgrade(value)
                    except (ConverterError, ValueError):
                        errmsg += "(occurred line #%i for value '%s')"
                        errmsg %= (j + 1 + skip_header, value)
                        raise ConverterError(errmsg)

    # Check that we don't have invalid values
    nbinvalid = len(invalid)
    if nbinvalid > 0:
        nbrows = len(rows) + nbinvalid - skip_footer
        # Construct the error message
        template = "    Line #%%i (got %%i columns instead of %i)" % nbcols
        if skip_footer > 0:
            nbinvalid_skipped = len(
                [_ for _ in invalid if _[0] > nbrows + skip_header])
            invalid = invalid[:nbinvalid - nbinvalid_skipped]
            skip_footer -= nbinvalid_skipped
#
#            nbrows -= skip_footer
#            errmsg = [template % (i, nb)
#                      for (i, nb) in invalid if i < nbrows]
#        else:
        errmsg = [template % (i, nb) for (i, nb) in invalid]
        if len(errmsg):
            errmsg.insert(0, "Some errors were detected !")
            errmsg = "\n".join(errmsg)
            # Raise an exception ?
            if invalid_raise:
                raise ValueError(errmsg)
            # Issue a warning ?
            else:
                warnings.warn(errmsg, ConversionWarning, stacklevel=2)

    # Strip the last skip_footer data
    if skip_footer > 0:
        rows = rows[:-skip_footer]
        if usemask:
            masks = masks[:-skip_footer]

    # Convert each value according to the converter:
    # We want to modify the list in place to avoid creating a new one...
    if loose:
        rows = list(
            zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
                  for (i, conv) in enumerate(converters)]))
    else:
        rows = list(
            zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
                  for (i, conv) in enumerate(converters)]))

    # Reset the dtype
    data = rows
    if dtype is None:
        # Get the dtypes from the types of the converters
        column_types = [conv.type for conv in converters]
        # Find the columns with strings...
        strcolidx = [
            i for (i, v) in enumerate(column_types)
            if v in (type('S'), np.string_)
        ]
        # ... and take the largest number of chars.
        for i in strcolidx:
            column_types[i] = "|S%i" % max(len(row[i]) for row in data)
        #
        if names is None:
            # If the dtype is uniform, don't define names, else use ''
            base = set([c.type for c in converters if c._checked])
            if len(base) == 1:
                (ddtype, mdtype) = (list(base)[0], np.bool)
            else:
                ddtype = [(defaultfmt % i, dt)
                          for (i, dt) in enumerate(column_types)]
                if usemask:
                    mdtype = [(defaultfmt % i, np.bool)
                              for (i, dt) in enumerate(column_types)]
        else:
            ddtype = list(zip(names, column_types))
            mdtype = list(zip(names, [np.bool] * len(column_types)))
        output = np.array(data, dtype=ddtype)
        if usemask:
            outputmask = np.array(masks, dtype=mdtype)
    else:
        # Overwrite the initial dtype names if needed
        if names and dtype.names:
            dtype.names = names
        # Case 1. We have a structured type
        if len(dtype_flat) > 1:
            # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
            # First, create the array using a flattened dtype:
            # [('a', int), ('b1', int), ('b2', float)]
            # Then, view the array using the specified dtype.
            if 'O' in (_.char for _ in dtype_flat):
                if has_nested_fields(dtype):
                    raise NotImplementedError(
                        "Nested fields involving objects are not supported...")
                else:
                    output = np.array(data, dtype=dtype)
            else:
                rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
                output = rows.view(dtype)
            # Now, process the rowmasks the same way
            if usemask:
                rowmasks = np.array(masks,
                                    dtype=np.dtype([('', np.bool)
                                                    for t in dtype_flat]))
                # Construct the new dtype
                mdtype = make_mask_descr(dtype)
                outputmask = rowmasks.view(mdtype)
        # Case #2. We have a basic dtype
        else:
            # We used some user-defined converters
            if user_converters:
                ishomogeneous = True
                descr = []
                for i, ttype in enumerate([conv.type for conv in converters]):
                    # Keep the dtype of the current converter
                    if i in user_converters:
                        ishomogeneous &= (ttype == dtype.type)
                        if ttype == np.string_:
                            ttype = "|S%i" % max(len(row[i]) for row in data)
                        descr.append(('', ttype))
                    else:
                        descr.append(('', dtype))
                # So we changed the dtype ?
                if not ishomogeneous:
                    # We have more than one field
                    if len(descr) > 1:
                        dtype = np.dtype(descr)
                    # We have only one field: drop the name if not needed.
                    else:
                        dtype = np.dtype(ttype)
            #
            output = np.array(data, dtype)
            if usemask:
                if dtype.names:
                    mdtype = [(_, np.bool) for _ in dtype.names]
                else:
                    mdtype = np.bool
                outputmask = np.array(masks, dtype=mdtype)
    # Try to take care of the missing data we missed
    names = output.dtype.names
    if usemask and names:
        for (name, conv) in zip(names or (), converters):
            missing_values = [conv(_) for _ in conv.missing_values if _ != b'']
            for mval in missing_values:
                outputmask[name] |= (output[name] == mval)
    # Construct the final array
    if usemask:
        output = output.view(MaskedArray)
        output._mask = outputmask
    # Verify that the array has at least dimensions `ndmin`.
    # Check correctness of the values of `ndmin`
    if ndmin not in [0, 1, 2]:
        raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
    # Tweak the size and shape of the arrays - remove extraneous dimensions
    if output.ndim > ndmin:
        output = np.squeeze(output)
    # and ensure we have the minimum number of dimensions asked for
    # - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
    if output.ndim < ndmin:
        if ndmin == 1:
            output = np.atleast_1d(output)
        elif ndmin == 2:
            output = np.atleast_2d(output).T
    if unpack:
        return output.T
    return output
Esempio n. 12
0
def _vstack(arrays, join_type='outer', col_name_map=None):
    """
    Stack Tables vertically (by rows)

    A ``join_type`` of 'exact' (default) means that the arrays must all
    have exactly the same column names (though the order can vary).  If
    ``join_type`` is 'inner' then the intersection of common columns will
    be the output.  A value of 'outer' means the output will have the union of
    all columns, with array values being masked where no common values are
    available.

    Parameters
    ----------
    arrays : list of Tables
        Tables to stack by rows (vertically)
    join_type : str
        Join type ('inner' | 'exact' | 'outer'), default is 'outer'
    col_name_map : empty dict or None
        If passed as a dict then it will be updated in-place with the
        mapping of output to input column names.

    Returns
    -------
    stacked_table : `~astropy.table.Table` object
        New table containing the stacked data from the input tables.
    """
    # Store user-provided col_name_map until the end
    _col_name_map = col_name_map

    # Input validation
    if join_type not in ('inner', 'exact', 'outer'):
        raise ValueError(
            "`join_type` arg must be one of 'inner', 'exact' or 'outer'")

    # Trivial case of one input array
    if len(arrays) == 1:
        return arrays[0]

    for arr in arrays:
        if arr.has_mixin_columns:
            raise NotImplementedError(
                'vstack not available for tables with mixin columns')

    # Start by assuming an outer match where all names go to output
    names = set(itertools.chain(*[arr.colnames for arr in arrays]))
    col_name_map = get_col_name_map(arrays, names)

    # If require_match is True then the output must have exactly the same
    # number of columns as each input array
    if join_type == 'exact':
        for names in six.itervalues(col_name_map):
            if any(x is None for x in names):
                raise TableMergeError('Inconsistent columns in input arrays '
                                      "(use 'inner' or 'outer' join_type to "
                                      "allow non-matching columns)")
        join_type = 'outer'

    # For an inner join, keep only columns where all input arrays have that column
    if join_type == 'inner':
        col_name_map = OrderedDict(
            (name, in_names) for name, in_names in six.iteritems(col_name_map)
            if all(x is not None for x in in_names))
        if len(col_name_map) == 0:
            raise TableMergeError('Input arrays have no columns in common')

    # If there are any output columns where one or more input arrays are missing
    # then the output must be masked.  If any input arrays are masked then
    # output is masked.
    masked = any(getattr(arr, 'masked', False) for arr in arrays)
    for names in six.itervalues(col_name_map):
        if any(x is None for x in names):
            masked = True
            break

    lens = [len(arr) for arr in arrays]
    n_rows = sum(lens)
    out = _get_out_class(arrays)(masked=masked)
    out_descrs = get_descrs(arrays, col_name_map)
    for out_descr in out_descrs:
        name = out_descr[0]
        dtype = out_descr[1:]
        if masked:
            out[name] = ma.array(data=np.zeros(n_rows, dtype),
                                 mask=np.ones(n_rows,
                                              ma.make_mask_descr(dtype)))
        else:
            out[name] = np.empty(n_rows, dtype=dtype)

    for out_name, in_names in six.iteritems(col_name_map):
        idx0 = 0
        for name, array in zip(in_names, arrays):
            idx1 = idx0 + len(array)
            if name in array.colnames:
                out[out_name][idx0:idx1] = array[name]
            idx0 = idx1

    # If col_name_map supplied as a dict input, then update.
    if isinstance(_col_name_map, collections.Mapping):
        _col_name_map.update(col_name_map)

    return out
Esempio n. 13
0
def genfromtxt(fname,
               dtype=float,
               comments='#',
               delimiter=None,
               skiprows=0,
               converters=None,
               missing='',
               missing_values=None,
               usecols=None,
               names=None,
               excludelist=None,
               deletechars=None,
               case_sensitive=True,
               unpack=None,
               usemask=False,
               loose=True):
    """
    Load data from a text file.

    Each line past the first `skiprows` ones is split at the `delimiter`
    character, and characters following the `comments` character are discarded.
    


    Parameters
    ----------
    fname : file or string
        File or filename to read.  If the filename extension is `.gz` or `.bz2`,
        the file is first decompressed.
    dtype : data-type
        Data type of the resulting array.  If this is a flexible data-type,
        the resulting array will be 1-dimensional, and each row will be
        interpreted as an element of the array. In this case, the number
        of columns used must match the number of fields in the data-type,
        and the names of each field will be set by the corresponding name
        of the dtype.
        If None, the dtypes will be determined by the contents of each
        column, individually.
    comments : {string}, optional
        The character used to indicate the start of a comment.
        All the characters occurring on a line after a comment are discarded
    delimiter : {string}, optional
        The string used to separate values.  By default, any consecutive
        whitespace act as delimiter.
    skiprows : {int}, optional
        Numbers of lines to skip at the beginning of the file.
    converters : {None, dictionary}, optional
        A dictionary mapping column number to a function that will convert
        values in the column to a number. Converters can also be used to
        provide a default value for missing data:
        ``converters = {3: lambda s: float(s or 0)}``.
    missing : {string}, optional
        A string representing a missing value, irrespective of the column where
        it appears (e.g., `'missing'` or `'unused'`).
    missing_values : {None, dictionary}, optional
        A dictionary mapping a column number to a string indicating whether the
        corresponding field should be masked.
    usecols : {None, sequence}, optional
        Which columns to read, with 0 being the first.  For example,
        ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
    names : {None, True, string, sequence}, optional
        If `names` is True, the field names are read from the first valid line
        after the first `skiprows` lines.
        If `names` is a sequence or a single-string of comma-separated names,
        the names will be used to define the field names in a flexible dtype.
        If `names` is None, the names of the dtype fields will be used, if any.
    excludelist : {sequence}, optional
        A list of names to exclude. This list is appended to the default list
        ['return','file','print']. Excluded names are appended an underscore:
        for example, `file` would become `file_`.
    deletechars : {string}, optional
        A string combining invalid characters that must be deleted from the names.
    case_sensitive : {True, False, 'upper', 'lower'}, optional
        If True, field names are case_sensitive.
        If False or 'upper', field names are converted to upper case.
        If 'lower', field names are converted to lower case.
    unpack : {bool}, optional
        If True, the returned array is transposed, so that arguments may be
        unpacked using ``x, y, z = loadtxt(...)``
    usemask : {bool}, optional
        If True, returns a masked array.
        If False, return a regular standard array.

    Returns
    -------
    out : MaskedArray
        Data read from the text file.

    Notes
    --------
    * When spaces are used as delimiters, or when no delimiter has been given
      as input, there should not be any missing data between two fields.
    * When the variable are named (either by a flexible dtype or with `names`,
      there must not be any header in the file (else a :exc:ValueError exception
      is raised).

    Warnings
    --------
    * Individual values are not stripped of spaces by default.
      When using a custom converter, make sure the function does remove spaces.

    See Also
    --------
    numpy.loadtxt : equivalent function when no data is missing.

    """
    #
    if usemask:
        from numpy.ma import MaskedArray, make_mask_descr
    # Check the input dictionary of converters
    user_converters = converters or {}
    if not isinstance(user_converters, dict):
        errmsg = "The input argument 'converter' should be a valid dictionary "\
                 "(got '%s' instead)"
        raise TypeError(errmsg % type(user_converters))
    # Check the input dictionary of missing values
    user_missing_values = missing_values or {}
    if not isinstance(user_missing_values, dict):
        errmsg = "The input argument 'missing_values' should be a valid "\
                 "dictionary (got '%s' instead)"
        raise TypeError(errmsg % type(missing_values))
    defmissing = [_.strip() for _ in missing.split(',')] + ['']

    # Initialize the filehandle, the LineSplitter and the NameValidator
    #    fhd = _to_filehandle(fname)
    if isinstance(fname, basestring):
        fhd = np.lib._datasource.open(fname)
    elif not hasattr(fname, 'read'):
        raise TypeError("The input should be a string or a filehandle. "\
                        "(got %s instead)" % type(fname))
    else:
        fhd = fname
    split_line = LineSplitter(delimiter=delimiter,
                              comments=comments,
                              autostrip=False)._handyman
    validate_names = NameValidator(excludelist=excludelist,
                                   deletechars=deletechars,
                                   case_sensitive=case_sensitive)

    # Get the first valid lines after the first skiprows ones
    for i in xrange(skiprows):
        fhd.readline()
    first_values = None
    while not first_values:
        first_line = fhd.readline()
        if first_line == '':
            raise IOError('End-of-file reached before encountering data.')
        if names is True:
            first_values = first_line.strip().split(delimiter)
        else:
            first_values = split_line(first_line)
    if names is True:
        fval = first_values[0].strip()
        if fval in comments:
            del first_values[0]

    # Check the columns to use
    if usecols is not None:
        usecols = list(usecols)
    nbcols = len(usecols or first_values)

    # Check the names and overwrite the dtype.names if needed
    if dtype is not None:
        dtype = np.dtype(dtype)
    dtypenames = getattr(dtype, 'names', None)
    if names is True:
        names = validate_names([_.strip() for _ in first_values])
        first_line = ''
    elif _is_string_like(names):
        names = validate_names([_.strip() for _ in names.split(',')])
    elif names:
        names = validate_names(names)
    elif dtypenames:
        dtype.names = validate_names(dtypenames)
    if names and dtypenames:
        dtype.names = names

    # If usecols is a list of names, convert to a list of indices
    if usecols:
        for (i, current) in enumerate(usecols):
            if _is_string_like(current):
                usecols[i] = names.index(current)

    # If user_missing_values has names as keys, transform them to indices
    missing_values = {}
    for (key, val) in user_missing_values.iteritems():
        # If val is a list, flatten it. In any case, add missing &'' to the list
        if isinstance(val, (list, tuple)):
            val = [str(_) for _ in val]
        else:
            val = [
                str(val),
            ]
        val.extend(defmissing)
        if _is_string_like(key):
            try:
                missing_values[names.index(key)] = val
            except ValueError:
                pass
        else:
            missing_values[key] = val

    # Initialize the default converters
    if dtype is None:
        # Note: we can't use a [...]*nbcols, as we would have 3 times the same
        # ... converter, instead of 3 different converters.
        converters = [
            StringConverter(None,
                            missing_values=missing_values.get(_, defmissing))
            for _ in range(nbcols)
        ]
    else:
        flatdtypes = flatten_dtype(dtype)
        # Initialize the converters
        if len(flatdtypes) > 1:
            # Flexible type : get a converter from each dtype
            converters = [
                StringConverter(dt,
                                missing_values=missing_values.get(
                                    i, defmissing),
                                locked=True)
                for (i, dt) in enumerate(flatdtypes)
            ]
        else:
            # Set to a default converter (but w/ different missing values)
            converters = [
                StringConverter(dtype,
                                missing_values=missing_values.get(
                                    _, defmissing),
                                locked=True) for _ in range(nbcols)
            ]
    missing_values = [_.missing_values for _ in converters]

    # Update the converters to use the user-defined ones
    uc_update = []
    for (i, conv) in user_converters.iteritems():
        # If the converter is specified by column names, use the index instead
        if _is_string_like(i):
            i = names.index(i)
        if usecols:
            try:
                i = usecols.index(i)
            except ValueError:
                # Unused converter specified
                continue
        converters[i].update(conv,
                             default=None,
                             missing_values=missing_values[i],
                             locked=True)
        uc_update.append((i, conv))
    # Make sure we have the corrected keys in user_converters...
    user_converters.update(uc_update)

    # Reset the names to match the usecols
    if (not first_line) and usecols:
        names = [names[_] for _ in usecols]

    rows = []
    append_to_rows = rows.append
    if usemask:
        masks = []
        append_to_masks = masks.append
    # Parse each line
    for line in itertools.chain([
            first_line,
    ], fhd):
        values = split_line(line)
        # Skip an empty line
        if len(values) == 0:
            continue
        # Select only the columns we need
        if usecols:
            values = [values[_] for _ in usecols]
        # Check whether we need to update the converter
        if dtype is None:
            for (converter, item) in zip(converters, values):
                converter.upgrade(item)
        # Store the values
        append_to_rows(tuple(values))
        if usemask:
            append_to_masks(
                tuple([
                    val.strip() in mss
                    for (val, mss) in zip(values, missing_values)
                ]))

    # Convert each value according to the converter:
    # We want to modify the list in place to avoid creating a new one...
    if loose:
        conversionfuncs = [conv._loose_call for conv in converters]
    else:
        conversionfuncs = [conv._strict_call for conv in converters]
    for (i, vals) in enumerate(rows):
        rows[i] = tuple(
            [convert(val) for (convert, val) in zip(conversionfuncs, vals)])

    # Reset the dtype
    data = rows
    if dtype is None:
        # Get the dtypes from the types of the converters
        coldtypes = [conv.type for conv in converters]
        # Find the columns with strings...
        strcolidx = [
            i for (i, v) in enumerate(coldtypes)
            if v in (type('S'), np.string_)
        ]
        # ... and take the largest number of chars.
        for i in strcolidx:
            coldtypes[i] = "|S%i" % max(len(row[i]) for row in data)
        #
        if names is None:
            # If the dtype is uniform, don't define names, else use ''
            base = set([c.type for c in converters if c._checked])

            if len(base) == 1:
                (ddtype, mdtype) = (list(base)[0], np.bool)
            else:
                ddtype = [('', dt) for dt in coldtypes]
                mdtype = [('', np.bool) for dt in coldtypes]
        else:
            ddtype = zip(names, coldtypes)
            mdtype = zip(names, [np.bool] * len(coldtypes))
        output = np.array(data, dtype=ddtype)
        if usemask:
            outputmask = np.array(masks, dtype=mdtype)
    else:
        # Overwrite the initial dtype names if needed
        if names and dtype.names:
            dtype.names = names
        flatdtypes = flatten_dtype(dtype)
        # Case 1. We have a structured type
        if len(flatdtypes) > 1:
            # Nested dtype, eg  [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
            # First, create the array using a flattened dtype:
            # [('a', int), ('b1', int), ('b2', float)]
            # Then, view the array using the specified dtype.
            if has_nested_fields(dtype):
                if 'O' in (_.char for _ in flatdtypes):
                    errmsg = "Nested fields involving objects "\
                             "are not supported..."
                    raise NotImplementedError(errmsg)
                rows = np.array(data, dtype=[('', t) for t in flatdtypes])
                output = rows.view(dtype)
            else:
                output = np.array(data, dtype=dtype)
            # Now, process the rowmasks the same way
            if usemask:
                rowmasks = np.array(masks,
                                    dtype=np.dtype([('', np.bool)
                                                    for t in flatdtypes]))
                # Construct the new dtype
                mdtype = make_mask_descr(dtype)
                outputmask = rowmasks.view(mdtype)
        # Case #2. We have a basic dtype
        else:
            # We used some user-defined converters
            if user_converters:
                ishomogeneous = True
                descr = []
                for (i, ttype) in enumerate([conv.type
                                             for conv in converters]):
                    # Keep the dtype of the current converter
                    if i in user_converters:
                        ishomogeneous &= (ttype == dtype.type)
                        if ttype == np.string_:
                            ttype = "|S%i" % max(len(row[i]) for row in data)
                        descr.append(('', ttype))
                    else:
                        descr.append(('', dtype))
                # So we changed the dtype ?
                if not ishomogeneous:
                    # We have more than one field
                    if len(descr) > 1:
                        dtype = np.dtype(descr)
                    # We have only one field: drop the name if not needed.
                    else:
                        dtype = np.dtype(ttype)
            #
            output = np.array(data, dtype)
            if usemask:
                if dtype.names:
                    mdtype = [(_, np.bool) for _ in dtype.names]
                else:
                    mdtype = np.bool
                outputmask = np.array(masks, dtype=mdtype)
    # Try to take care of the missing data we missed
    if usemask and output.dtype.names:
        for (name, conv) in zip(names or (), converters):
            missing_values = [conv(_) for _ in conv.missing_values if _ != '']
            for mval in missing_values:
                outputmask[name] |= (output[name] == mval)
    # Construct the final array
    if usemask:
        output = output.view(MaskedArray)
        output._mask = outputmask
    if unpack:
        return output.squeeze().T
    return output.squeeze()
import numpy as np
import numpy.ma as ma

dtype = np.dtype({'names': ['foo', 'bar'],
                  'formats': [np.float32, np.int]})

ma.make_mask_descr(dtype)
ma.make_mask_descr(np.float32)
Esempio n. 15
0
import numpy as np
import numpy.ma as ma

dtype = np.dtype({'names': ['foo', 'bar'], 'formats': [np.float32, np.int]})

ma.make_mask_descr(dtype)
ma.make_mask_descr(np.float32)
Esempio n. 16
0
def hstack(arrays,
           join_type='exact',
           uniq_col_name='{col_name}_{table_name}',
           table_names=None,
           col_name_map=None):
    """
    Stack structured arrays by horizontally (by columns)

    A ``join_type`` of 'exact' (default) means that the arrays must all
    have exactly the same number of rows.  If ``join_type`` is 'inner' then
    the intersection of rows will be output.  A value of 'outer' means
    the output will have the union of all rows, with array values being
    masked where no common values are available.

    Parameters
    ----------

    arrays : List of structured array objects
        Structured arrays to stack by columns (horizontally)
    join_type : str
        Join type ('inner' | 'exact' | 'outer'), default is 'exact'
    uniq_col_name : str or None
        String generate a unique output column name in case of a conflict.
        The default is '{col_name}_{table_name}'.
    table_names : list of str or None
        Two-element list of table names used when generating unique output
        column names.  The default is ['1', '2', ..].

    Examples
    --------

    To stack two arrays horizontally (by columns) do::

      >>> from astropy.table import np_utils
      >>> t1 = np.array([(1, 2),
      ...                (3, 4)], dtype=[('a', 'i4'), ('b', 'i4')])
      >>> t2 = np.array([(5, 6),
      ...                (7, 8)], dtype=[('c', 'i4'), ('d', 'i4')])
      >>> np_utils.hstack([t1, t2])
      array([(1, 2, 5, 6),
             (3, 4, 7, 8)],
            dtype=[('a', '<i4'), ('b', '<i4'), ('c', '<i4'), ('d', '<i4')])
    """
    # Store user-provided col_name_map until the end
    _col_name_map = col_name_map

    # Input validation
    if join_type not in ('inner', 'exact', 'outer'):
        raise ValueError(
            "join_type arg must be either 'inner', 'exact' or 'outer'")
    _check_for_sequence_of_structured_arrays(arrays)

    if table_names is None:
        table_names = ['{0}'.format(ii + 1) for ii in range(len(arrays))]
    if len(arrays) != len(table_names):
        raise ValueError('Number of arrays must match number of table_names')

    # Trivial case of one input arrays
    if len(arrays) == 1:
        return arrays[0]

    col_name_map = get_col_name_map(arrays, [], uniq_col_name, table_names)

    # If require_match is True then all input arrays must have the same length
    arr_lens = [len(arr) for arr in arrays]
    if join_type == 'exact':
        if len(set(arr_lens)) > 1:
            raise TableMergeError(
                "Inconsistent number of rows in input arrays "
                "(use 'inner' or 'outer' join_type to allow "
                "non-matching rows)")
        join_type = 'outer'

    # For an inner join, keep only columns where all input arrays have that column
    if join_type == 'inner':
        min_arr_len = min(arr_lens)
        arrays = [arr[:min_arr_len] for arr in arrays]
        arr_lens = [min_arr_len for arr in arrays]

    # If there are any output rows where one or more input arrays are missing
    # then the output must be masked.  If any input arrays are masked then
    # output is masked.
    masked = (any(isinstance(arr, ma.MaskedArray) for arr in arrays)
              or len(set(arr_lens)) > 1)

    n_rows = max(arr_lens)
    out_descrs = get_descrs(arrays, col_name_map)
    if masked:
        # Adapted from ma.all_masked() code.  Here the array is filled with
        # zeros instead of empty.  This avoids the bug reported here:
        # https://github.com/numpy/numpy/issues/3276
        out = ma.masked_array(np.zeros(n_rows, out_descrs),
                              mask=np.ones(n_rows,
                                           ma.make_mask_descr(out_descrs)))
    else:
        out = np.empty(n_rows, dtype=out_descrs)

    for out_name, in_names in col_name_map.items():
        for name, array, arr_len in izip(in_names, arrays, arr_lens):
            if name is not None:
                out[out_name][:arr_len] = array[name]

    # If col_name_map supplied as a dict input, then update.
    if isinstance(_col_name_map, collections.Mapping):
        _col_name_map.update(col_name_map)

    return out
Esempio n. 17
0
def _vstack(arrays, join_type="inner", col_name_map=None):
    """
    Stack Tables vertically (by rows)

    A ``join_type`` of 'exact' (default) means that the arrays must all
    have exactly the same column names (though the order can vary).  If
    ``join_type`` is 'inner' then the intersection of common columns will
    be output.  A value of 'outer' means the output will have the union of
    all columns, with array values being masked where no common values are
    available.

    Parameters
    ----------
    arrays : list of Tables
        Tables to stack by rows (vertically)
    join_type : str
        Join type ('inner' | 'exact' | 'outer'), default is 'exact'
    col_name_map : empty dict or None
        If passed as a dict then it will be updated in-place with the
        mapping of output to input column names.

    Returns
    -------
    stacked_table : `~astropy.table.Table` object
        New table containing the stacked data from the input tables.
    """
    # Store user-provided col_name_map until the end
    _col_name_map = col_name_map

    # Input validation
    if join_type not in ("inner", "exact", "outer"):
        raise ValueError("`join_type` arg must be one of 'inner', 'exact' or 'outer'")

    # Trivial case of one input array
    if len(arrays) == 1:
        return arrays[0]

    for arr in arrays:
        if arr.has_mixin_columns:
            raise NotImplementedError("vstack not available for tables with mixin columns")

    # Start by assuming an outer match where all names go to output
    names = set(itertools.chain(*[arr.colnames for arr in arrays]))
    col_name_map = get_col_name_map(arrays, names)

    # If require_match is True then the output must have exactly the same
    # number of columns as each input array
    if join_type == "exact":
        for names in six.itervalues(col_name_map):
            if any(x is None for x in names):
                raise TableMergeError(
                    "Inconsistent columns in input arrays "
                    "(use 'inner' or 'outer' join_type to "
                    "allow non-matching columns)"
                )
        join_type = "outer"

    # For an inner join, keep only columns where all input arrays have that column
    if join_type == "inner":
        col_name_map = OrderedDict(
            (name, in_names) for name, in_names in six.iteritems(col_name_map) if all(x is not None for x in in_names)
        )
        if len(col_name_map) == 0:
            raise TableMergeError("Input arrays have no columns in common")

    # If there are any output columns where one or more input arrays are missing
    # then the output must be masked.  If any input arrays are masked then
    # output is masked.
    masked = any(getattr(arr, "masked", False) for arr in arrays)
    for names in six.itervalues(col_name_map):
        if any(x is None for x in names):
            masked = True
            break

    lens = [len(arr) for arr in arrays]
    n_rows = sum(lens)
    out = _get_out_class(arrays)(masked=masked)
    out_descrs = get_descrs(arrays, col_name_map)
    for out_descr in out_descrs:
        name = out_descr[0]
        dtype = out_descr[1:]
        if masked:
            out[name] = ma.array(data=np.zeros(n_rows, dtype), mask=np.ones(n_rows, ma.make_mask_descr(dtype)))
        else:
            out[name] = np.empty(n_rows, dtype=dtype)

    for out_name, in_names in six.iteritems(col_name_map):
        idx0 = 0
        for name, array in zip(in_names, arrays):
            idx1 = idx0 + len(array)
            if name in array.colnames:
                out[out_name][idx0:idx1] = array[name]
            idx0 = idx1

    # If col_name_map supplied as a dict input, then update.
    if isinstance(_col_name_map, collections.Mapping):
        _col_name_map.update(col_name_map)

    return out
Esempio n. 18
0
def _hstack(arrays,
            join_type='exact',
            uniq_col_name='{col_name}_{table_name}',
            table_names=None,
            col_name_map=None):
    """
    Stack tables horizontally (by columns)

    A ``join_type`` of 'exact' (default) means that the arrays must all
    have exactly the same number of rows.  If ``join_type`` is 'inner' then
    the intersection of rows will be output.  A value of 'outer' means
    the output will have the union of all rows, with array values being
    masked where no common values are available.

    Parameters
    ----------

    arrays : List of tables
        Tables to stack by columns (horizontally)
    join_type : str
        Join type ('inner' | 'exact' | 'outer'), default is 'exact'
    uniq_col_name : str or None
        String generate a unique output column name in case of a conflict.
        The default is '{col_name}_{table_name}'.
    table_names : list of str or None
        Two-element list of table names used when generating unique output
        column names.  The default is ['1', '2', ..].
    """
    from .table import Table

    # Store user-provided col_name_map until the end
    _col_name_map = col_name_map

    # Input validation
    if join_type not in ('inner', 'exact', 'outer'):
        raise ValueError(
            "join_type arg must be either 'inner', 'exact' or 'outer'")

    if table_names is None:
        table_names = ['{0}'.format(ii + 1) for ii in range(len(arrays))]
    if len(arrays) != len(table_names):
        raise ValueError('Number of arrays must match number of table_names')

    # Trivial case of one input arrays
    if len(arrays) == 1:
        return arrays[0]

    col_name_map = get_col_name_map(arrays, [], uniq_col_name, table_names)

    # If require_match is True then all input arrays must have the same length
    arr_lens = [len(arr) for arr in arrays]
    if join_type == 'exact':
        if len(set(arr_lens)) > 1:
            raise TableMergeError(
                "Inconsistent number of rows in input arrays "
                "(use 'inner' or 'outer' join_type to allow "
                "non-matching rows)")
        join_type = 'outer'

    # For an inner join, keep only the common rows
    if join_type == 'inner':
        min_arr_len = min(arr_lens)
        if len(set(arr_lens)) > 1:
            arrays = [arr[:min_arr_len] for arr in arrays]
        arr_lens = [min_arr_len for arr in arrays]

    # If there are any output rows where one or more input arrays are missing
    # then the output must be masked.  If any input arrays are masked then
    # output is masked.
    masked = any(getattr(arr, 'masked', False)
                 for arr in arrays) or len(set(arr_lens)) > 1

    n_rows = max(arr_lens)
    out = Table(masked=masked)
    out_descrs = get_descrs(arrays, col_name_map)

    for out_descr in out_descrs:
        name = out_descr[0]
        dtype = out_descr[1:]
        if masked:
            # Adapted from ma.all_masked() code.  Here the array is filled with
            # zeros instead of empty.  This avoids the bug reported here:
            # https://github.com/numpy/numpy/issues/3276
            out[name] = ma.array(data=np.zeros(n_rows, dtype),
                                 mask=np.ones(n_rows,
                                              ma.make_mask_descr(dtype)))
        else:
            out[name] = np.empty(n_rows, dtype=dtype)

    for out_name, in_names in six.iteritems(col_name_map):
        for name, array, arr_len in zip(in_names, arrays, arr_lens):
            if name is not None:
                out[out_name][:arr_len] = array[name]

    # If col_name_map supplied as a dict input, then update.
    if isinstance(_col_name_map, collections.Mapping):
        _col_name_map.update(col_name_map)

    return out