def get_kwarg(arg, cls, subclass=False): gotten = pop_kwarg(kwargs, arg) if not subclass: if gotten is not None and not isinstance(gotten, cls): if not isinstance(cls, tuple): raise TypeError("#{0} argument given to Computation() "\ "must be an instance of the #{1} class.".format(arg, classname(cls))) else: raise TypeError("#{0} argument given to Computation() "\ "must be one of (#{1}) .".format(arg, ', '.join(classname(cl) for cl in cls))) else: if gotten is not None and not issubclass(gotten, cls): if not isinstance(cls, tuple): raise TypeError("#{0} argument given to Computation() "\ "must be an subclass of the #{1} class.".format(arg, classname(cls))) else: raise TypeError("#{0} argument given to Computation() "\ "must be subclass of one of (#{1}) .".format(arg, ', '.join(classname(cl) for cl in cls))) return gotten
def __new__(cls, *args, **kwargs): """ TODO Move this to class level documentation, since it doesn't show up in Sphinx Takes several different forms. Given a number of arguments (possibly nested lists), a Tensor is created as expected. *This is the only form in which arguments may be specified without keywords.* `Tensor` can also be initialized by giving a list of dimensions for the `shape` (aliased as `dimension`) keyword argument, with a possible default value keyword argument `default_val`. Unless otherwise specified via the `dtype` keyword argument, it is assumed that the input data should be cast as a `numpy.float64`. """ # Pop off any special args or kwargs and store their values for later has_indices = False ret_val = None indices = None units = pop_kwarg(kwargs, 'units') name = kwargs.pop('name', None) #--------------------------------------------------------------------------------# # pop off any kwargs that the subclass's __init__ takes... subclass_kwargs = {} for supercls in cls.__mro__: if supercls is Tensor: break if hasattr(supercls, '__tensor_init__') and callable(supercls.__tensor_init__): if hasattr(supercls.__tensor_init__, 'getargspec'): argspec = supercls.__tensor_init__.getargspec() else: argspec = inspect.getargspec(supercls.__tensor_init__) for arg in argspec.args[1:]: subclass_kwargs[arg] = kwargs.pop(arg, None) #--------------------------------------------------------------------------------# # Check for indices... indices_kwarg = kwargs.pop('indices', None) if indices_kwarg is None and len(args) == 1 and isinstance(args[0], basestring): args = list(args) indices_kwarg = args.pop(0) args = tuple(args) index_range_set = pop_multikwarg(kwargs, 'index_range_set', 'in_set', 'set') if indices_kwarg is not None: has_indices = True indices = EinsumTensor.split_indices(indices_kwarg) if index_range_set is None: index_range_set = IndexRange.global_index_range_set shape = [] for idx in indices: if idx not in index_range_set.known_ranges: raise IndexError("unknown index '" + idx + "'") shape.append(index_range_set.known_ranges[idx].size) shape = tuple(shape) if "shape" in kwargs: kwshape = kwargs.pop('shape') if shape != kwshape: raise TypeError("inconsistent shape: indices '{}' indicate a shape of {}, but" " the keyword 'shape' was given with the shape {}".format( ",".join(indices), shape, kwshape )) kwargs['shape'] = shape #--------------------------------------------------------------------------------# # Now create a numpy.ndarray object... def_val = pop_kwarg(kwargs, 'default_val', 'default_value', 'default') or 0.0 # Set the default data type to float, unless the user specifies dtype = get_kwarg(kwargs, 'dtype') or float if not callable(dtype) and grendel.show_warnings: warn("dtype given to {0} constructor is not Callable and thus cannot be used for casting." \ " It is better to use callable types for dtype if possible; e.g. numpy.float64 instead" \ "of numpy.dtype('float64')".format(classname(cls))) kwargs['dtype'] = dtype # Typecast the default value if callable(dtype): def_val = dtype(def_val) # See if we have a shape... shape = pop_kwarg(kwargs, 'shape', 'dimension') # See if we have data... # This allows us to support the form Tensor(1, 2, 3, 4) if len(args) == 1 and isinstance(args[0], np.ndarray): data = args[0] else: data = listify_args(*args) if 'data' in kwargs: if data: raise TypeError("`data` may be specified as a keyword argument or as " \ "the regular arguments to {0} constructor, but not both.".format(classname(cls))) else: data = pop_kwarg('data') if shape and not isinstance(data, np.ndarray): has_content = False if len(args) != 0: has_content = True if not has_content: if def_val == 0.0: try: ret_val = np.zeros(shape=shape, **kwargs) except: raise else: ret_val = (np.ones(shape=shape, **kwargs) * def_val) else: if grendel.sanity_checking_enabled: # Check data length tmp = np.array(data) needed_data_size = 1 for dim in shape: needed_data_size *= dim try: tmp.reshape((needed_data_size,)) except ValueError: raise ValueError("Data provided to {0} constructor is incompatible with the shape {1}".format(classname(cls), shape)) # Check data type ret_val = np.array(data, **kwargs).reshape(shape) else: # Just pass on the data and any surviving kwargs try: if isinstance(data, np.ndarray) and ( len(kwargs) == 0 or (len(kwargs) == 1 and 'dtype' in kwargs and kwargs['dtype'] == data.dtype) ): # Just do a view ret_val = data.view(cls) else: # Otherwise, we need to call the numpy "constructor" (actually a factory function) of ndarray ret_val = np.array(data, **kwargs) except: # debugging breakpoint hook raise if shape and ret_val.shape != shape: raise ValueError("Shape mismatch: data shape {0} does not match specified shape {1}".format( data.shape, shape )) #--------------------------------------------------------------------------------# # View-cast the ret_val to the class in question, but only if we haven't already if not isinstance(ret_val, cls): ret_val = ret_val.view(cls) # Now assign stuff from any special args... if has_indices: ret_val.indices = indices ret_val.index_range_set = index_range_set else: ret_val.indices = None ret_val.units = units ret_val.name = name if name is None: ret_val.name = "(unnamed tensor)" # pass the remaining kwargs to the initializer... ret_val.__tensor_init__(**subclass_kwargs) return ret_val
def __new__(cls, **kwargs): """ """ def get_kwarg(arg, cls, subclass=False): gotten = pop_kwarg(kwargs, arg) if not subclass: if gotten is not None and not isinstance(gotten, cls): if not isinstance(cls, tuple): raise TypeError("#{0} argument given to Computation() "\ "must be an instance of the #{1} class.".format(arg, classname(cls))) else: raise TypeError("#{0} argument given to Computation() "\ "must be one of (#{1}) .".format(arg, ', '.join(classname(cl) for cl in cls))) else: if gotten is not None and not issubclass(gotten, cls): if not isinstance(cls, tuple): raise TypeError("#{0} argument given to Computation() "\ "must be an subclass of the #{1} class.".format(arg, classname(cls))) else: raise TypeError("#{0} argument given to Computation() "\ "must be subclass of one of (#{1}) .".format(arg, ', '.join(classname(cl) for cl in cls))) return gotten #--------------------------------------------------------------------------------# # Handle molecule, make a ResultGetter molecule = get_kwarg('molecule', Molecule) res_getter = get_kwarg('result_getter', ComputationResultGetter) if res_getter is None: res_getter = ComputationResultGetter(**kwargs) molecule.result_getters.append(res_getter) #--------------------------------------------------------------------------------# # Now see what package we're using package = get_kwarg('package', (basestring, PackageInterface)) comp_class = cls interface = None if isinstance(package, PackageInterface): interface = package elif isinstance(package, basestring): interface = PackageInterface.load_package( package, input_generator=get_kwarg('input_generator', InputGenerator, True), runner=get_kwarg('runner', Runner, True), output_parser=get_kwarg('output_parser', OutputParser, True), details_class=get_kwarg('details_class', ComputationDetails, True), computation_class=get_kwarg('computation_class', cls, True) ) if interface.computation_class is not None: comp_class = cls ret_val = object.__new__(comp_class) #--------------------------------------------------------------------------------# ret_val.molecule = molecule res_getter.add_computation(ret_val) ret_val.input_generator = interface.input_generator(ret_val) ret_val.output_parser = interface.output_parser(ret_val) #TODO accept option for directory to put input/output files in #TODO accept option for input/output filenames #TODO support for runners that don't follow the input/output pattern ret_val.runner = interface.runner(input_file='input.dat', output_file='output.dat') #--------------------------------------------------------------------------------# ret_val.details = get_kwarg('details', interface.details_class) ret_val.properties = pop_kwarg(kwargs, 'property') if isinstance(ret_val.properties, Iterable): ret_val.properties = list(ret_val.properties) else: ret_val.properties = [ret_val.properties] if not all(issubclass(prop, MolecularProperty) or isinstance(prop, MolecularProperty) for prop in ret_val.properties): raise TypeError("property argument given to Computation() must be a list of MolecularProperty subclasses or instances.".format(classname(cls))) fixed_properties = [] for prop in ret_val.properties: if isinstance(prop, MolecularProperty): fixed_properties.append(prop) prop.details = ret_val.details elif issubclass(prop, MolecularProperty): fixed_properties.append(prop(molecule=ret_val.molecule, details=ret_val.details)) ret_val.properties = fixed_properties #--------------------------------------------------------------------------------# # Get input_filename and output_filename, if specified. Make sure that neither # is a path. # first get input_filename input_filename = pop_kwarg(kwargs, 'input_filename') if input_filename: if os.path.split(input_filename)[0] != '': raise ValueError("File name '{0}' given for argument 'input_filename' of Computation constructor " "must be a single file, not a path. Use the 'directory' argument to specify " "a directory.".format(input_filename)) ret_val.runner.input_file = input_filename else: ret_val.runner.input_file = Runner.default_input_filename #----------------------------------------# # now get output_filename output_filename = pop_kwarg(kwargs, 'output_filename') if output_filename: if os.path.split(output_filename)[0] != '': raise ValueError("File name '{0}' given for argument 'output_filename' of Computation constructor " "must be a single file, not a path. Use the 'directory' argument to specify " "a directory.".format(output_filename)) ret_val.runner.output_file = output_filename else: ret_val.runner.output_file = Runner.default_output_filename #--------------------------------------------------------------------------------# # Find out the desired directory and make new ones if necessary and allowed. ret_val.directory = pop_kwarg(kwargs, 'directory') used_template = False if ret_val.directory is not None: ret_val.directory = full_path(ret_val.directory) elif cls.directory_template is not None: ret_val.directory = cls.directory_template used_template = True else: ret_val.directory = full_path(os.curdir) #----------------------------------------# # Do mako-template style replacement, if the user has mako try: from mako.template import Template from mako import exceptions # Start with some obvious standard things to add... variables = dict( mol = ret_val.molecule, molecule = ret_val.molecule, calc = ret_val, calculation = ret_val, rg = res_getter, result_getter = res_getter, details = ret_val.details ) # Get all of the known details, as strings det_strs = dict() for k, v in ret_val.details._known_details.items(): det_strs[str(k)] = str(v) det_strs[str(k).lower()] = str(v) det_strs[str(k).upper()] = str(v) if k in Computation.attribute_aliases: for alias in Computation.attribute_aliases[k]: alias = make_safe_identifier(alias) det_strs[str(alias)] = str(v) det_strs[str(alias).lower()] = str(v) det_strs[str(alias).upper()] = str(v) variables.update(det_strs) # Get any additional keywords specified by the user (advanced feature) additional = kwargs.pop('directory_creation_vars', dict()) variables.update(additional) # Now run mako on the template... try: ret_val.directory = Template( text=ret_val.directory, strict_undefined=True ).render( **variables ) except: raise ValueError( "invalid MAKO template:\n{}\nMako came back with error:\n{}".format( indent(ret_val.directory), indent(exceptions.text_error_template().render()) ) ) # and make it into a "url safe string": ret_val.directory = urllib.quote_plus( ret_val.directory, safe='/' + cls.additional_safe_directory_characters ) except ImportError: # Can't do mako parsing of path. Oh well... # But if the user specified additional args, they probably thought they did have # mako installed... additional = kwargs.pop('directory_creation_vars', None) if additional is not None: raise # and if they have a ${ in the directory name or a <% in the directory name, # they probably also thought that they had mako. # (Note: this could be an issue in the unlikely scenario that the user is using # e.g. zsh and specifying environment variables from within Python as ${ }. # If someone is having that problem, they probably have enough intelligence # also to figure out how to get around it.) if '${' in ret_val.directory or '<%' in ret_val.directory: raise # if they're using a class-level template, they also need to have mako installed if used_template: raise #----------------------------------------# ret_val.directory = full_path(ret_val.directory) #----------------------------------------# # Now create the directory if we're allowed to and need to create_dirs = kwargs.pop('create_directories', cls.always_create_missing) if create_dirs: if not os.path.isdir(ret_val.directory): if os.path.exists(ret_val.directory): raise OSError( "path '{}' exists and is not a directory".format( ret_val.directory ) ) os.makedirs(ret_val.directory) else: if not os.path.isdir(ret_val.directory): raise OSError( "'{0}' is not a valid directory, and will not be created. Set the " "'create_directories' argument of computation to True to do directory " "creation automatically.".format( ret_val.directory )) #----------------------------------------# # now set the input_file and output_file values ret_val.runner.input_file = os.path.join(ret_val.directory, ret_val.runner.input_file) ret_val.runner.output_file = os.path.join(ret_val.directory, ret_val.runner.output_file) ret_val.runner.working_directory = ret_val.directory #--------------------------------------------------------------------------------# # Pass the remaining kwargs on to the initialization, which may have been customized for the particular package. ret_val.__init__(**kwargs) return ret_val
def __new__(cls, *args, **kwargs): """ TODO Move this to class level documentation, since it doesn't show up in Sphinx Takes several different forms. Given a number of arguments (possibly nested lists), a Tensor is created as expected. *This is the only form in which arguments may be specified without keywords.* `Tensor` can also be initialized by giving a list of dimensions for the `shape` (aliased as `dimension`) keyword argument, with a possible default value keyword argument `default_val`. Unless otherwise specified via the `dtype` keyword argument, it is assumed that the input data should be cast as a `numpy.float64`. """ # Pop off any special args or kwargs and store their values for later has_indices = False ret_val = None indices = None units = pop_kwarg(kwargs, 'units') name = kwargs.pop('name', None) #--------------------------------------------------------------------------------# # pop off any kwargs that the subclass's __init__ takes... subclass_kwargs = {} for supercls in cls.__mro__: if supercls is Tensor: break if hasattr(supercls, '__tensor_init__') and callable( supercls.__tensor_init__): if hasattr(supercls.__tensor_init__, 'getargspec'): argspec = supercls.__tensor_init__.getargspec() else: argspec = inspect.getargspec(supercls.__tensor_init__) for arg in argspec.args[1:]: subclass_kwargs[arg] = kwargs.pop(arg, None) #--------------------------------------------------------------------------------# # Check for indices... indices_kwarg = kwargs.pop('indices', None) if indices_kwarg is None and len(args) == 1 and isinstance( args[0], basestring): args = list(args) indices_kwarg = args.pop(0) args = tuple(args) index_range_set = pop_multikwarg(kwargs, 'index_range_set', 'in_set', 'set') if indices_kwarg is not None: has_indices = True indices = EinsumTensor.split_indices(indices_kwarg) if index_range_set is None: index_range_set = IndexRange.global_index_range_set shape = [] for idx in indices: if idx not in index_range_set.known_ranges: raise IndexError("unknown index '" + idx + "'") shape.append(index_range_set.known_ranges[idx].size) shape = tuple(shape) if "shape" in kwargs: kwshape = kwargs.pop('shape') if shape != kwshape: raise TypeError( "inconsistent shape: indices '{}' indicate a shape of {}, but" " the keyword 'shape' was given with the shape {}". format(",".join(indices), shape, kwshape)) kwargs['shape'] = shape #--------------------------------------------------------------------------------# # Now create a numpy.ndarray object... def_val = pop_kwarg(kwargs, 'default_val', 'default_value', 'default') or 0.0 # Set the default data type to float, unless the user specifies dtype = get_kwarg(kwargs, 'dtype') or float if not callable(dtype) and grendel.show_warnings: warn("dtype given to {0} constructor is not Callable and thus cannot be used for casting." \ " It is better to use callable types for dtype if possible; e.g. numpy.float64 instead" \ "of numpy.dtype('float64')".format(classname(cls))) kwargs['dtype'] = dtype # Typecast the default value if callable(dtype): def_val = dtype(def_val) # See if we have a shape... shape = pop_kwarg(kwargs, 'shape', 'dimension') # See if we have data... # This allows us to support the form Tensor(1, 2, 3, 4) if len(args) == 1 and isinstance(args[0], np.ndarray): data = args[0] else: data = listify_args(*args) if 'data' in kwargs: if data: raise TypeError("`data` may be specified as a keyword argument or as " \ "the regular arguments to {0} constructor, but not both.".format(classname(cls))) else: data = pop_kwarg('data') if shape and not isinstance(data, np.ndarray): has_content = False if len(args) != 0: has_content = True if not has_content: if def_val == 0.0: try: ret_val = np.zeros(shape=shape, **kwargs) except: raise else: ret_val = (np.ones(shape=shape, **kwargs) * def_val) else: if grendel.sanity_checking_enabled: # Check data length tmp = np.array(data) needed_data_size = 1 for dim in shape: needed_data_size *= dim try: tmp.reshape((needed_data_size, )) except ValueError: raise ValueError( "Data provided to {0} constructor is incompatible with the shape {1}" .format(classname(cls), shape)) # Check data type ret_val = np.array(data, **kwargs).reshape(shape) else: # Just pass on the data and any surviving kwargs try: if isinstance(data, np.ndarray) and ( len(kwargs) == 0 or (len(kwargs) == 1 and 'dtype' in kwargs and kwargs['dtype'] == data.dtype)): # Just do a view ret_val = data.view(cls) else: # Otherwise, we need to call the numpy "constructor" (actually a factory function) of ndarray ret_val = np.array(data, **kwargs) except: # debugging breakpoint hook raise if shape and ret_val.shape != shape: raise ValueError( "Shape mismatch: data shape {0} does not match specified shape {1}" .format(data.shape, shape)) #--------------------------------------------------------------------------------# # View-cast the ret_val to the class in question, but only if we haven't already if not isinstance(ret_val, cls): ret_val = ret_val.view(cls) # Now assign stuff from any special args... if has_indices: ret_val.indices = indices ret_val.index_range_set = index_range_set else: ret_val.indices = None ret_val.units = units ret_val.name = name if name is None: ret_val.name = "(unnamed tensor)" # pass the remaining kwargs to the initializer... ret_val.__tensor_init__(**subclass_kwargs) return ret_val