Esempio n. 1
0
 def recurse(val):
     n = 0
     for subval in six.itervalues(val):
         if isinstance(subval, dict):
             n += recurse(subval)
         else:
             n += 1
     return n
Esempio n. 2
0
    def recurse(tree, seen):
        if id(tree) in seen:
            return

        if isinstance(tree, dict):
            new_seen = seen | set([id(tree)])
            for val in six.itervalues(tree):
                recurse(val, new_seen)
        elif isinstance(tree, (list, tuple)):
            new_seen = seen | set([id(tree)])
            for val in tree:
                recurse(val, new_seen)

        callback(tree)
Esempio n. 3
0
    def recurse(tree, seen=[]):
        if id(tree) in seen:
            return

        if isinstance(tree, dict):
            new_seen = seen + [id(tree)]
            for val in six.itervalues(tree):
                recurse(val, seen + new_seen)
        elif isinstance(tree, (list, tuple)):
            new_seen = seen + [id(tree)]
            for val in tree:
                recurse(val, seen + new_seen)

        callback(tree)
Esempio n. 4
0
    def iter_errors(self, instance, _schema=None, _seen=set()):
        if id(instance) in _seen:
            return

        if _schema is None:
            schema = self.schema
        else:
            schema = _schema

        if ((isinstance(instance, dict) and '$ref' in instance) or
            isinstance(instance, reference.Reference)):
            return

        if _schema is None:
            tag = tagged.get_tag(instance)
            if tag is not None:
                schema_path = self.ctx.tag_to_schema_resolver(tag)
                if schema_path != tag:
                    s = load_schema(schema_path, self.ctx.url_mapping)
                    if s:
                        with self.resolver.in_scope(schema_path):
                            for x in self.orig_iter_errors(instance, s):
                                yield x

            if isinstance(instance, dict):
                new_seen = _seen | set([id(instance)])
                for val in six.itervalues(instance):
                    for x in self.iter_errors(val, _seen=new_seen):
                        yield x

            elif isinstance(instance, list):
                new_seen = _seen | set([id(instance)])
                for val in instance:
                    for x in self.iter_errors(val, _seen=new_seen):
                        yield x
        else:
            for x in self.orig_iter_errors(instance, _schema=schema):
                yield x
Esempio n. 5
0
    def iter_errors(self, instance, _schema=None, _seen=set()):
        if id(instance) in _seen:
            return

        if _schema is None:
            schema = self.schema
        else:
            schema = _schema

        if ((isinstance(instance, dict) and '$ref' in instance)
                or isinstance(instance, reference.Reference)):
            return

        if _schema is None:
            tag = tagged.get_tag(instance)
            if tag is not None:
                schema_path = self.ctx.tag_to_schema_resolver(tag)
                if schema_path != tag:
                    s = load_schema(schema_path, self.ctx.url_mapping)
                    if s:
                        with self.resolver.in_scope(schema_path):
                            for x in self.orig_iter_errors(instance, s):
                                yield x

            if isinstance(instance, dict):
                new_seen = _seen | set([id(instance)])
                for val in six.itervalues(instance):
                    for x in self.iter_errors(val, _seen=new_seen):
                        yield x

            elif isinstance(instance, list):
                new_seen = _seen | set([id(instance)])
                for val in instance:
                    for x in self.iter_errors(val, _seen=new_seen):
                        yield x
        else:
            for x in self.orig_iter_errors(instance, _schema=schema):
                yield x
Esempio n. 6
0
def _read_salt2_old(dirname, filenames=None):
    """Read old-style SALT2 files from a directory.

    A file named 'lightfile' must exist in the directory.
    """

    # Get list of files in directory.
    if not (os.path.exists(dirname) and os.path.isdir(dirname)):
        raise IOError("Not a directory: '{0}'".format(dirname))
    dirfilenames = os.listdir(dirname)

    # Read metadata from lightfile.
    if 'lightfile' not in dirfilenames:
        raise IOError("no lightfile in directory: '{0}'".format(dirname))
    with open(os.path.join(dirname, 'lightfile'), 'r') as lightfile:
        meta = OrderedDict()
        for line in lightfile.readlines():
            line = line.strip()
            if len(line) == 0:
                continue
            try:
                key, val = line.split()
            except ValueError:
                raise ValueError('expected space-separated key value pairs in '
                                 'lightfile: {0}'
                                 .format(os.path.join(dirname, 'lightfile')))
            meta[key] = _cast_str(val)

    # Get list of filenames to read.
    if filenames is None:
        filenames = dirfilenames
    if 'lightfile' in filenames:
        filenames.remove('lightfile')  # We already read the lightfile.
    fullfilenames = [os.path.join(dirname, f) for f in filenames]

    # Read data from files.
    data = None
    for fname in fullfilenames:
        with open(fname, 'r') as f:
            filemeta, filedata = _read_salt2(f)

        # Check that all necessary file metadata was defined.
        if not ('INSTRUMENT' in filemeta and 'BAND' in filemeta and
                'MAGSYS' in filemeta):
            raise ValueError('not all necessary global keys (INSTRUMENT, '
                             'BAND, MAGSYS) are defined in file {0}'
                             .format(fname))

        # Add the instrument/band to the file data, in anticipation of
        # aggregating it with other files.

        # PY3: next(iter(filedata.vlues()))
        firstcol = six.next(six.itervalues(filedata))
        data_length = len(firstcol)
        filter_name = '{0}::{1}'.format(filemeta.pop('INSTRUMENT'),
                                        filemeta.pop('BAND'))
        filedata['Filter'] = data_length * [filter_name]
        filedata['MagSys'] = data_length * [filemeta.pop('MAGSYS')]

        # If this if the first file, initialize data lists, otherwise if keys
        # match, append this file's data to the main data.
        if data is None:
            data = filedata
        elif set(filedata.keys()) == set(data.keys()):
            for key in data:
                data[key].extend(filedata[key])
        else:
            raise ValueError('column names do not match between files')

        # Append any extra metadata in this file to the master metadata.
        if len(filemeta) > 0:
            meta[filter_name] = filemeta

    return meta, data
Esempio n. 7
0
def _read_salt2_old(dirname, **kwargs):
    """Read old-style SALT2 files from a directory.

    A file named 'lightfile' must exist in the directory.
    """

    filenames = kwargs.get('filenames', None)

    # Get list of files in directory.
    if not (os.path.exists(dirname) and os.path.isdir(dirname)):
        raise IOError("Not a directory: '{0}'".format(dirname))
    dirfilenames = os.listdir(dirname)

    # Read metadata from lightfile.
    if 'lightfile' not in dirfilenames:
        raise IOError("no lightfile in directory: '{0}'".format(dirname))
    with open(os.path.join(dirname, 'lightfile'), 'r') as lightfile:
        meta = odict()
        for line in lightfile.readlines():
            line = line.strip()
            if len(line) == 0:
                continue
            try:
                key, val = line.split()
            except ValueError:
                raise ValueError('expected space-separated key value pairs in '
                                 'lightfile: {0}'.format(
                                     os.path.join(dirname, 'lightfile')))
            meta[key] = _cast_str(val)

    # Get list of filenames to read.
    if filenames is None:
        filenames = dirfilenames
    if 'lightfile' in filenames:
        filenames.remove('lightfile')  # We already read the lightfile.
    fullfilenames = [os.path.join(dirname, f) for f in filenames]

    # Read data from files.
    data = None
    for fname in fullfilenames:
        with open(fname, 'r') as f:
            filemeta, filedata = _read_salt2(f)

        # Check that all necessary file metadata was defined.
        if not ('INSTRUMENT' in filemeta and 'BAND' in filemeta
                and 'MAGSYS' in filemeta):
            raise ValueError(
                'not all necessary global keys (INSTRUMENT, '
                'BAND, MAGSYS) are defined in file {0}'.format(fname))

        # Add the instrument/band to the file data, in anticipation of
        # aggregating it with other files.

        # PY3: next(iter(filedata.vlues()))
        firstcol = six.next(six.itervalues(filedata))
        data_length = len(firstcol)
        filter_name = '{0}::{1}'.format(filemeta.pop('INSTRUMENT'),
                                        filemeta.pop('BAND'))
        filedata['Filter'] = data_length * [filter_name]
        filedata['MagSys'] = data_length * [filemeta.pop('MAGSYS')]

        # If this if the first file, initialize data lists, otherwise if keys
        # match, append this file's data to the main data.
        if data is None:
            data = filedata
        elif set(filedata.keys()) == set(data.keys()):
            for key in data:
                data[key].extend(filedata[key])
        else:
            raise ValueError('column names do not match between files')

        # Append any extra metadata in this file to the master metadata.
        if len(filemeta) > 0:
            meta[filter_name] = filemeta

    return meta, data
Esempio n. 8
0
def _get_synphot_cfgitems():
    """Iterator for ``synphot`` configuration items."""
    for c in itervalues(synconf.__dict__):
        if isinstance(c, ConfigItem):
            yield c
Esempio n. 9
0
 def write(self, *args, **kwargs):
     """Write to disk, using Table writer.  Can be restored with .read"""
     data = [col.filled(None) for col in six.itervalues(self.columns)]
     filled_table = Table(data=data, meta=deepcopy(self.meta))
     return Table.write(filled_table, *args, **kwargs)
Esempio n. 10
0
    def write(self, table, widths=None):
        """
        Write ``table`` as list of strings with optional specified widths

        Parameters
        ----------
        table: `~astropy.table.Table`
            Input table data
        widths: list
            A list of integer line widths

        Returns
        -------
        lines : list
            List of strings corresponding to ASCII table

        """
        # Set a default null value for all columns by adding at the end, which
        # is the position with the lowest priority.
        # We have to do it this late, because the fill_value
        # defined in the class can be overwritten by ui.write
        self.data.fill_values.append((core.masked, 'null'))

        # Check column names before altering
        self.header.cols = list(six.itervalues(table.columns))
        self.header.check_column_names(self.names, self.strict_names, self.guessing)

        core._apply_include_exclude_names(table, self.names, self.include_names, self.exclude_names)

        # Now use altered columns
        new_cols = list(six.itervalues(table.columns))
        # link information about the columns to the writer object (i.e. self)
        self.header.cols = new_cols
        self.data.cols = new_cols

        # Write header and data to lines list
        lines = []
        # Write meta information
        if 'comments' in table.meta:
            for comment in table.meta['comments']:
                if len(str(comment)) > 78:
                    warn('Comment string > 78 characters was automatically wrapped.',
                         AstropyUserWarning)
                for line in wrap(str(comment), 80, initial_indent='\\ ', subsequent_indent='\\ '):
                    lines.append(line)
        if 'keywords' in table.meta:
            keydict = table.meta['keywords']
            for keyword in keydict:
                try:
                    val = keydict[keyword]['value']
                    lines.append('\\{0}={1!r}'.format(keyword.strip(), val))
                    # meta is not standardized: Catch some common Errors.
                except TypeError:
                    pass

        # Usually, this is done in data.write, but since the header is written
        # first, we need that here.
        self.data._set_fill_values(self.data.cols)

        # get header and data as strings to find width of each column
        for i, col in enumerate(table.columns.values()):
            col.headwidth = max([len(vals[i]) for vals in self.header.str_vals()])
        # keep data_str_vals because they take some time to make
        data_str_vals = []
        col_str_iters = self.data.str_vals()
        for vals in zip(*col_str_iters):
            data_str_vals.append(vals)

        for i, col in enumerate(table.columns.values()):
            # FIXME: In Python 3.4, use max([], default=0).
            # See: https://docs.python.org/3/library/functions.html#max
            if data_str_vals:
                col.width = max([len(vals[i]) for vals in data_str_vals])
            else:
                col.width = 0

        if widths is None:
            widths = [max(col.width, col.headwidth) for col in table.columns.values()]
        # then write table
        self.header.write(lines, widths)
        self.data.write(lines, widths, data_str_vals)

        return lines
Esempio n. 11
0
 def write(self, *args, **kwargs):
     """Write to disk, using Table writer.  Can be restored with .read"""
     data = [col.filled(None) for col in six.itervalues(self.columns)]
     filled_table = Table(data=data, meta=deepcopy(self.meta))
     return Table.write(filled_table, *args, **kwargs)