def literal_eval(s): """ Wrapper around ``ast.literal_eval`` that returns its return value, if possible, but returns the original string in cases where ``ast.literal_eval`` raises an exception. """ try: return ast_literal_eval(s) except (ValueError, SyntaxError): return s
def clean_compression_algos(self): """ Convert SelectMultiple selected choices into a TextField with space separator """ compression_algos = self.cleaned_data.get('compression_algos') if not compression_algos: return "" try: """ Transform text formatted list into list""" algos_list = ast_literal_eval(compression_algos) except Exception: raise ValidationError("Invalid field.") else: """ And return select choices with space separator """ return ' '.join(algos_list)
def load(self, filename=None, text=None): f = None if text is None: f = open(filename, "r") else: f = io.StringIO(text) exclusive = f.readline().strip() == "True" inp = int(f.readline().strip()) hid = list(map(int, f.readline().strip().split(" "))) out = int(f.readline().strip()) weights = [] for i in range(0, len(hid) + 1): weights.append(np.matrix(ast_literal_eval(f.readline().strip()))) f.close() self.initialize(inp, hid, out, weights, exclusive)
def intervals(self, value): if not isinstance(value, (tuple, list)): msg = "intervals attribute must be a tuple or list, not {0!r}" raise ValueError(msg.format(value.__class__.__name__)) # Parse the intervals values = [] for interval in value: if isinstance(interval, str): i = interval.split() try: x = ast_literal_eval(i.pop(0)) except Exception: raise ValueError( "Unparseable interval: {0!r}".format(interval)) if interval: units = ' '.join(i) else: units = None try: d = Data(x, units) except Exception: raise ValueError( "Unparseable interval: {0!r}".format(interval)) else: try: d = Data.asdata(interval, copy=True) except Exception: raise ValueError( "Unparseable interval: {0!r}".format(interval)) # --- End: if if d.size != 1: raise ValueError( "Unparseable interval: {0!r}".format(interval)) if d.ndim > 1: d.squeeze(inplace=True) values.append(d) # --- End: for self.set_qualifier('interval', tuple(values))
def intervals(self, value): if not isinstance(value, (tuple, list)): raise ValueError( "intervals attribute must be a tuple or list, not " f"{value.__class__.__name__!r}") # Parse the intervals values = [] for interval in value: if isinstance(interval, str): i = interval.split() try: x = ast_literal_eval(i.pop(0)) except Exception: raise ValueError(f"Unparseable interval: {interval!r}") if interval: units = " ".join(i) else: units = None try: d = Data(x, units) except Exception: raise ValueError(f"Unparseable interval: {interval!r}") else: try: d = Data.asdata(interval, copy=True) except Exception: raise ValueError(f"Unparseable interval: {interval!r}") if d.size != 1: raise ValueError(f"Unparseable interval: {interval!r}") if d.ndim > 1: d.squeeze(inplace=True) values.append(d) self.set_qualifier("interval", tuple(values))
def create(cls, cell_methods_string=None): '''Parse a CF-like cell_methods string. :Parameters: cell_methods_string: `str` A CF cell_methods string. :Returns: `list` **Examples:** >>> c = CellMethod.create('lat: mean (interval: 1 hour)') ''' incorrect_interval = 'Cell method interval is incorrectly formatted' out = [] if not cell_methods_string: return out # ------------------------------------------------------------ # Split the cell_methods string into a list of strings ready # for parsing. For example: # # 'lat: mean (interval: 1 hour)' # # would be split up into: # # ['lat:', 'mean', '(', 'interval:', '1', 'hour', ')'] # ------------------------------------------------------------ cell_methods = re.sub('\((?=[^\s])', '( ', cell_methods_string) cell_methods = re.sub('(?<=[^\s])\)', ' )', cell_methods).split() while cell_methods: cm = cls() axes = [] while cell_methods: if not cell_methods[0].endswith(':'): break # TODO Check that "name" ends with colon? How? ('lat: mean # (area-weighted) or lat: mean (interval: 1 degree_north comment: # area-weighted)') axis = cell_methods.pop(0)[:-1] axes.append(axis) # --- End: while cm.set_axes(axes) if not cell_methods: out.append(cm) break # Method cm.set_method(cell_methods.pop(0)) if not cell_methods: out.append(cm) break # Climatological statistics, and statistics which apply to # portions of cells while cell_methods[0] in ('within', 'where', 'over'): attr = cell_methods.pop(0) cm.set_qualifier(attr, cell_methods.pop(0)) if not cell_methods: break # --- End: while if not cell_methods: out.append(cm) break # interval and comment intervals = [] if cell_methods[0].endswith('('): cell_methods.pop(0) if not (re.search('^(interval|comment):$', cell_methods[0])): cell_methods.insert(0, 'comment:') while not re.search('^\)$', cell_methods[0]): term = cell_methods.pop(0)[:-1] if term == 'interval': interval = cell_methods.pop(0) if cell_methods[0] != ')': units = cell_methods.pop(0) else: units = None try: parsed_interval = ast_literal_eval(interval) except (SyntaxError, ValueError): raise ValueError("{}: {!r}".format( incorrect_interval, interval)) try: data = Data(array=parsed_interval, units=units, copy=False) except Exception: raise ValueError("{}: {!r}".format( incorrect_interval, interval)) intervals.append(data) continue # --- End: if if term == 'comment': comment = [] while cell_methods: if cell_methods[0].endswith(')'): break if cell_methods[0].endswith(':'): break comment.append(cell_methods.pop(0)) # --- End: while cm.set_qualifier('comment', ' '.join(comment)) # --- End: while if cell_methods[0].endswith(')'): cell_methods.pop(0) # --- End: if n_intervals = len(intervals) if n_intervals > 1 and n_intervals != len(axes): raise ValueError("{} (doesn't match axes): {!r}".format( incorrect_interval, interval)) if intervals: cm.set_qualifier('interval', intervals) out.append(cm) # --- End: while return out
from ast import literal_eval as ast_literal_eval from re import compile as re_compile from setuptools import find_packages, setup PKG_NAME = 'list-cli' PKG_DESCRIPTION = 'List Management Application (CLI)' _version_re = re_compile(r'__version__\s+=\s+(.*)') with open('list/cli.py'.format(PKG_NAME), 'rb') as f: PKG_VERSION = str( ast_literal_eval( _version_re.search(f.read().decode('utf-8')).group(1))) setup( name=PKG_NAME, version=PKG_VERSION, url='https://github.com/jzaleski/list-cli', license='MIT', description=PKG_DESCRIPTION, long_description=PKG_DESCRIPTION, author='Jonathan W. Zaleski', author_email='*****@*****.**', packages=find_packages(), install_requires=[], entry_points={'console_scripts': ['list-cli=list.__main__:main']}, classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python :: 3',
def _create_data( self, ncvar, construct=None, unpacked_dtype=False, uncompress_override=None, parent_ncvar=None, ): """TODO. .. versionadded:: 3.0.0 :Parameters: ncvar: `str` The name of the netCDF variable that contains the data. construct: optional unpacked_dtype: `False` or `numpy.dtype`, optional uncompress_override: `bool`, optional :Returns: `Data` """ g = self.read_vars is_cfa_variable = ( g["cfa"] and construct.get_property("cf_role", None) == "cfa_variable" ) if not is_cfa_variable: # -------------------------------------------------------- # Create data for a normal netCDF variable # -------------------------------------------------------- return super()._create_data( ncvar=ncvar, construct=construct, unpacked_dtype=unpacked_dtype, uncompress_override=uncompress_override, parent_ncvar=parent_ncvar, ) # ------------------------------------------------------------ # Still here? Then create data for a CFA netCDF variable # ------------------------------------------------------------ # print (' Creating data from CFA variable', repr(ncvar), # repr(construct)) try: cfa_data = json.loads(construct.get_property("cfa_array")) except ValueError as error: raise ValueError( "Error during JSON-decoding of netCDF attribute 'cfa_array': " "{}".format(error) ) variable = g["variables"][ncvar] cfa_data["file"] = g["filename"] cfa_data["Units"] = construct.Units cfa_data["fill_value"] = construct.fill_value() cfa_data["_pmshape"] = cfa_data.pop("pmshape", ()) cfa_data["_pmaxes"] = cfa_data.pop("pmdimensions", ()) base = cfa_data.get("base", None) if base is not None: cfa_data["base"] = pathjoin(dirname(g["filename"]), base) ncdimensions = construct.get_property("cfa_dimensions", "").split() dtype = variable.dtype if dtype is str: # netCDF string types have a dtype of `str`, which needs # to be reset as a numpy.dtype, but we don't know what # without reading the data, so set it to None for now. dtype = None # UNICODE???? TODO if self._is_char(ncvar) and dtype.kind in "SU" and ncdimensions: strlen = g["nc"].dimensions[ncdimensions[-1]].size if strlen > 1: ncdimensions.pop() dtype = numpy_dtype("S{0}".format(strlen)) # --- End: if cfa_data["dtype"] = dtype cfa_data["_axes"] = ncdimensions cfa_data["shape"] = [ g["nc"].dimensions[ncdim].size for ncdim in ncdimensions ] for attrs in cfa_data["Partitions"]: # FORMAT sformat = attrs.get("subarray", {}).pop("format", "netCDF") if sformat is not None: attrs["format"] = sformat # DTYPE dtype = attrs.get("subarray", {}).pop("dtype", None) if dtype not in (None, "char"): attrs["subarray"]["dtype"] = numpy_dtype(dtype) # UNITS and CALENDAR units = attrs.pop("punits", None) calendar = attrs.pop("pcalendar", None) if units is not None or calendar is not None: attrs["Units"] = Units(units, calendar) # AXES pdimensions = attrs.pop("pdimensions", None) if pdimensions is not None: attrs["axes"] = pdimensions # REVERSE reverse = attrs.pop("reverse", None) if reverse is not None: attrs["reverse"] = reverse # LOCATION: Change to python indexing (i.e. range does not # include the final index) for r in attrs["location"]: r[1] += 1 # PART: Change to python indexing (i.e. slice range does # not include the final index) part = attrs.get("part", None) if part: p = [] for x in ast_literal_eval(part): if isinstance(x, list): if x[2] > 0: p.append(slice(x[0], x[1] + 1, x[2])) elif x[1] == 0: p.append(slice(x[0], None, x[2])) else: p.append(slice(x[0], x[1] - 1, x[2])) else: p.append(list(x)) # --- End: for attrs["part"] = p # --- End: for construct.del_property("cf_role") construct.del_property("cfa_array") construct.del_property("cfa_dimensions", None) out = self._create_Data(loadd=cfa_data) return out
def create(cls, cell_methods_string=None): """Parse a CF-like cell_methods string. :Parameters: cell_methods_string: `str` A CF cell_methods string. :Returns: `list` **Examples:** >>> c = CellMethod.create('lat: mean (interval: 1 hour)') """ incorrect_interval = "Cell method interval is incorrectly formatted" out = [] if not cell_methods_string: return out # ------------------------------------------------------------ # Split the cell_methods string into a list of strings ready # for parsing. For example: # # 'lat: mean (interval: 1 hour)' # # would be split up into: # # ['lat:', 'mean', '(', 'interval:', '1', 'hour', ')'] # ------------------------------------------------------------ cell_methods = re.sub("\((?=[^\s])", "( ", cell_methods_string) cell_methods = re.sub("(?<=[^\s])\)", " )", cell_methods).split() while cell_methods: cm = cls() axes = [] while cell_methods: if not cell_methods[0].endswith(":"): break # TODO Check that "name" ends with colon? How? ('lat: mean # (area-weighted) or lat: mean (interval: 1 degree_north comment: # area-weighted)') axis = cell_methods.pop(0)[:-1] axes.append(axis) cm.set_axes(axes) if not cell_methods: out.append(cm) break # Method cm.set_method(cell_methods.pop(0)) if not cell_methods: out.append(cm) break # Climatological statistics, and statistics which apply to # portions of cells while cell_methods[0] in ("within", "where", "over"): attr = cell_methods.pop(0) cm.set_qualifier(attr, cell_methods.pop(0)) if not cell_methods: break if not cell_methods: out.append(cm) break # interval and comment intervals = [] if cell_methods[0].endswith("("): cell_methods.pop(0) if not (re.search("^(interval|comment):$", cell_methods[0])): cell_methods.insert(0, "comment:") while not re.search("^\)$", cell_methods[0]): term = cell_methods.pop(0)[:-1] if term == "interval": interval = cell_methods.pop(0) if cell_methods[0] != ")": units = cell_methods.pop(0) else: units = None try: parsed_interval = ast_literal_eval(interval) except (SyntaxError, ValueError): raise ValueError( f"{incorrect_interval}: {interval!r}") try: data = Data(array=parsed_interval, units=units, copy=False) except Exception: raise ValueError( f"{incorrect_interval}: {interval!r}") intervals.append(data) continue if term == "comment": comment = [] while cell_methods: if cell_methods[0].endswith(")"): break if cell_methods[0].endswith(":"): break comment.append(cell_methods.pop(0)) cm.set_qualifier("comment", " ".join(comment)) if cell_methods[0].endswith(")"): cell_methods.pop(0) n_intervals = len(intervals) if n_intervals > 1 and n_intervals != len(axes): raise ValueError( f"{incorrect_interval} (doesn't match axes): {interval!r}") if intervals: cm.set_qualifier("interval", intervals) out.append(cm) return out