def main(): print 'Building lookup tables.', outfile = open(FILENAME,'w') outfile.write('#include <avr/pgmspace.h>\n\n#define NUM_WAVE_ENTRIES {0}\nunsigned char wave_table[{0}] PROGMEM = \n{{\n'.format(N_ENTRIES)) np.set_string_function(arrayPrint,False) # calculate time = np.linspace(0,10*pi,N_ENTRIES) wave_vals = np.zeros_like(time) wave_vals = (np.exp(-time / T_CONST) + (0.05 * np.cos(time * 2) + .05) * np.exp(-time / T_CONST / 5)) * 200 if 1: plt.plot(time, wave_vals) plt.grid(True) plt.show() # Output to file for row in range(0,N_ENTRIES): outfile.write(str(int(max(0,wave_vals[row]))) + ',\n') print '.', outfile.write('};\n') outfile.close() print '\nLookup tables built' pass
def __setitem__(self, key: Tuple[Union[int, slice], Union[int, slice], Union[int, slice]], value: str): """Sets cell code and resets result cache :param key: Cell key(s) that shall be set :param value: Code for cell(s) to be set """ # Change numpy array repr function for grid cell results numpy.set_string_function(lambda s: repr(s.tolist())) # Prevent unchanged cells from being recalculated on cursor movement repr_key = repr(key) unchanged = (repr_key in self.result_cache and value == self(key)) or \ ((value is None or value == "") and repr_key not in self.result_cache) super().__setitem__(key, value) if not unchanged: # Reset result cache self.result_cache = {}
def DisplaySkillScores(skillScores, skillScoreName): """ Display the skill score results in a neat manner. Note, this function messes around with the formatting options of printing numpy arrays. It does restore the settings back to the numpy defaults, but if you had your own formatting specified before calling this function, you will need to reset it. """ np.set_string_function(lambda x: '\n'.join( [' '.join(["% 11.8f" % val for val in row]) for row in x]), repr=True) # Print the last eleven characters of each trackrun name for # the column labels. print(' '.join( ["%11.11s" % tracker[-11:] for tracker in skillScores.label[-1]])) print(repr(skillScores.x)) print("-" * (11 * skillScores.shape[1] + 2 * (skillScores.shape[1] - 1))) # Resetting back to how it was np.set_string_function(None, repr=True)
def export_data_if_needed(df, props): def get_prop(k): return props[k] if k in props else None def printer(arr): return np.array_str(arr) if _parse_optional_bool(get_prop("export_data")): format = "csv" folder = get_prop("image_export_folder") or os.getcwd() filename = get_prop("image_export_filename") or _sanitize_filename( chart.get_name()) filepath = os.path.join(folder, filename) + "." + format #TODO make it better print("exporting data to: '" + filepath + "' as " + format) old_opts = np.get_printoptions() np.set_printoptions(threshold=np.inf, linewidth=np.inf) np.set_string_function(printer, False) pd.set_option('display.max_columns', None) pd.set_option('display.max_colwidth', None) df.to_csv(filepath) np.set_string_function(None, False) np.set_printoptions(**old_opts)
def DisplaySkillScores(skillScores, skillScoreName) : """ Display the skill score results in a neat manner. Note, this function messes around with the formatting options of printing numpy arrays. It does restore the settings back to the numpy defaults, but if you had your own formatting specified before calling this function, you will need to reset it. """ np.set_string_function( lambda x: '\n'.join([' '.join(["% 11.8f" % val for val in row]) for row in x]), repr=True) # Print the last eleven characters of each trackrun name for # the column labels. print ' '.join(["%11.11s" % tracker[-11:] for tracker in skillScores.label[-1]]) print repr(skillScores.x) print "-" * (11*skillScores.shape[1] + 2*(skillScores.shape[1] - 1)) # Resetting back to how it was np.set_string_function(None, repr=True)
def process_Iqxy_data(file_content): """ Process the content of an I(qx,qy) file and return a string representation of the data that we can ship to the client for plotting. @param file_content: content of the data file """ fd = tempfile.NamedTemporaryFile() fd.write(file_content) fd.seek(0) numpy.set_printoptions(threshold='nan', nanstr='0', infstr='0') fd = h5py.File(fd.name, 'r') g = fd['mantid_workspace_1'] y = g['workspace']['axis1'] x = g['workspace']['axis2'] values = g['workspace']['values'] z_max = numpy.amax(values) numpy.set_string_function( lambda x: '['+','.join(map(lambda y:'['+','.join(map(lambda z: "%.4g" % z,y))+']',x))+']' ) data_str_2d = values[:].__repr__() numpy.set_string_function( lambda x: '['+','.join(map(lambda z: "%.4g" % z,x))+']' ) y_str = y[:].__repr__() x_str = x[:].__repr__() return data_str_2d, x_str, y_str, 0.0, z_max
def main(): # set up and parse arguments class MyFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.MetavarTypeHelpFormatter): pass parser = argparse.ArgumentParser( description='Calculate RONN disorder prediction.', formatter_class=MyFormatter) parser.add_argument('inputfile', type=str, default=sys.stdin, help='File of protein coding sequences to predict on.') parser.add_argument('--inputformat', type=str, default='fasta', help='File format of [inputfile]. ' 'Any Bio.SeqIO-readable supported') args = parser.parse_args() if (args.inputfile == '-'): args.inputfile = sys.stdin # print numpy entire array one by entry at a time np.set_string_function(lambda x: "\n".join(map(str, x.tolist())), False) np.set_printoptions(threshold=sys.maxsize) for record in Bio.SeqIO.parse(args.inputfile, args.inputformat): record.seq = record.seq.upper() print(">" + record.id, calc_ronn(str(record.seq)), sep='\n') return
def set_numpy_oneline_repr(): """Change the default Numpy array ``__repr__`` to a compact one. This is useful for keeping the log clean when the job functions involve large Numpy arrays as args. """ import numpy as np EDGEITEMS = 3 def oneline_repr(a): N = np.prod(a.shape) if N == 0: # No numbers to show if the last dimension is zero return f"array[{a.shape}]([])" ind = np.unravel_index(range(min(EDGEITEMS, N)), a.shape) afew = [] for x, val in zip(np.c_[ind], a[ind]): for i, y in enumerate(x[::-1]): if y != 0: break else: i += 1 for j, y in enumerate(x[::-1]): if y + 1 != a.shape[len(x) - j - 1]: break else: j += 1 afew.append(f"{'['*i}{val}{']'*j}") return f"array[{a.shape}]({', '.join(afew)}{'...' if N > EDGEITEMS else ''})" np.set_string_function(oneline_repr, repr=True)
def simulation(self, N=50000): import pandas import scikits.statsmodels.tsa.api np.set_string_function(None) nSims = 1 logger.debug('\n\n--------- SIMULATION---------\n\n') logger.debug('T = %d\n' % N) cumTransMatrix = np.cumsum(self.T, 1) varSim = np.zeros((N, self.vars)) shocks = np.zeros((N), dtype=int) shocks[0] = 1 varSim[0, :] = self.S[shocks[0], :] lastShock = 1 for t in range(1, N): shockR = np.random.random_sample() itemindex = np.where(shockR < cumTransMatrix[lastShock, :]) shock = itemindex[0][0] shocks[t] = shock for ixVar in range(self.vars): varSim[t, ixVar] = self.S[shock, ixVar] lastShock = shock index = np.arange(N) varSim = pandas.DataFrame(data=varSim, index=index, columns=map(str, range(self.vars))) logger.debug(varSim) logger.debug('\nUnconditional means(E)') logger.debug(varSim.mean()) logger.debug('\nUnconditional std') logger.debug(varSim.std()) logger.debug('\nUnconditional skewness') logger.debug(varSim.skew()) try: logger.debug('\nUnconditional correlation') logger.debug(varSim.corr()) except: pass model = scikits.statsmodels.tsa.api.VAR(varSim) # model = scikits.statsmodels.tsa.vector_ar.var_model.VAR(varSim) results = model.fit(1) Theta = results.params[1:, :] logger.debug('\n Persistence') logger.debug(Theta) logger.debug('done with simulation') return varSim, Theta
def repr_g(a): (precision, linewidth, edgeitems) = est_options(a) set_options(precision, linewidth, edgeitems) np.set_string_function(None, repr=True) str = a.__repr__() np.set_string_function(repr_g, repr=True) return str
def main_paste(args): """ Correlate particles properties from trajectory files. Example: -------- trj.py paste.py file1.xyz:radius file2.xyz.voronoi.xyz:volume """ from atooms import trajectory as trj from atooms.core.utils import tipify f1, attr1 = args.file_inp[0].split(':') f2, attr2 = args.file_inp[1].split(':') if args.inp is None: fmt1, fmt2 = None, None else: fmt1, fmt2 = args.inp.split(',') t1 = trj.Trajectory(f1, fmt=fmt1) t2 = trj.Trajectory(f2, fmt=fmt2) # Define slice. # We interpret --first N --last N as a request of step N if args.last == args.first and args.last is not None: args.last += 1 sl1 = fractional_slice(args.first, args.last, args.skip, len(t1)) sl2 = fractional_slice(args.first, args.last, args.skip, len(t2)) # Here we could you a trajectory slice t[sl] but this will load # everything in ram (getitem doesnt provide a generator). This # will be fixed with python 3. ts1 = trajectory.Sliced(t1, sl1) ts2 = trajectory.Sliced(t2, sl2) def array_fmt(arr): """Remove commas and [] from numpy array repr.""" # Passing a scalar will trigger an error (gotcha: even # when casting numpy array to list, the elements remain of # numpy type and this function gets called! (4% slowdown) _fmt = '%g' try: return ' '.join([_fmt % x for x in arr]) except: return _fmt % arr # except: # return numpy.array2string(arr, precision=self.precision, separator=',')[1:-1] import numpy numpy.set_string_function(array_fmt, repr=False) for step, s1, s2 in trj.utils.paste(ts1, ts2): try: for i in range(len(s1.particle)): print(getattr(s1.particle[i], attr1), getattr(s2.particle[i], attr2)) except: print(getattr(s1, attr1), getattr(s2, attr2))
def simulation(self, N = 50000): import pandas import scikits.statsmodels.tsa.api np.set_string_function(None) nSims = 1 logger.debug('\n\n--------- SIMULATION---------\n\n') logger.debug('T = %d\n' % N) cumTransMatrix = np.cumsum(self.T, 1) varSim = np.zeros( (N, self.vars ) ) shocks = np.zeros( (N), dtype = int ) shocks[0] = 1 varSim[0, :] = self.S[shocks[0], :] lastShock = 1 for t in range(1, N): shockR = np.random.random_sample() itemindex = np.where(shockR < cumTransMatrix[lastShock, :]) shock = itemindex[0][0] shocks[t] = shock for ixVar in range(self.vars): varSim[t, ixVar] = self.S[shock, ixVar] lastShock = shock index = np.arange(N) varSim = pandas.DataFrame(data = varSim, index = index, columns = map(str, range(self.vars))) logger.debug(varSim) logger.debug('\nUnconditional means(E)') logger.debug(varSim.mean()) logger.debug('\nUnconditional std') logger.debug(varSim.std()) logger.debug('\nUnconditional skewness') logger.debug(varSim.skew()) try: logger.debug('\nUnconditional correlation') logger.debug(varSim.corr()) except: pass model = scikits.statsmodels.tsa.api.VAR(varSim) # model = scikits.statsmodels.tsa.vector_ar.var_model.VAR(varSim) results = model.fit(1) Theta = results.params[1:,:] logger.debug('\n Persistence') logger.debug(Theta) logger.debug('done with simulation') return varSim, Theta
def _install(): import io import numpy as np from IPython import get_ipython from IPython.core import magic from highlighter import HighlightTextFormatter ip = get_ipython() ip.display_formatter.formatters[ 'text/plain'] = HighlightTextFormatter(config=ip.config) import ipython_autocd ipython_autocd.register() import lambda_filter lambda_filter.register() @magic.register_line_magic def run_cython(args): """Run a Cython file using %%cython magic.""" args = magic.arg_split(args, posix=True) filename = args.pop() if '--force' not in args: args.append('--force') ip = get_ipython() ip.extension_manager.load_extension('cython') with io.open(filename, 'r', encoding='utf-8') as f: ip.run_cell_magic('cython', ' '.join(args), f.read()) @magic.register_line_cell_magic def create(line='', cell=None): """Start a plotinteract session from user namespace data.""" from plottools import create, dataobj ip = get_ipython() if not cell: cell = line line = '' args = ip.ev('dict({})'.format(line)) objs = (eval('dataobj({})'.format(line), ip.user_global_ns, dict(dataobj=dataobj)) for line in cell.splitlines()) create(*objs, **args) def arraystr(a, max_line_width=None, precision=None, suppress_small=None): """Separate values with a comma in array2string.""" return np.array2string(a, max_line_width, precision, suppress_small, separator=', ', prefix="", style=str)\ .replace('..., ', '..., ' if PY3 else 'Ellipsis, ') np.set_string_function(arraystr, repr=False) if not PY3: np.set_string_function(arraystr) np.ma.masked_print_option.set_display("masked")
def _install(): import io import numpy as np from IPython import get_ipython from IPython.core import magic from highlighter import HighlightTextFormatter ip = get_ipython() ip.display_formatter.formatters['text/plain'] = HighlightTextFormatter( config=ip.config) import ipython_autocd ipython_autocd.register() import lambda_filter lambda_filter.register() @magic.register_line_magic def run_cython(args): """Run a Cython file using %%cython magic.""" args = magic.arg_split(args, posix=True) filename = args.pop() if '--force' not in args: args.append('--force') ip = get_ipython() ip.extension_manager.load_extension('cython') with io.open(filename, 'r', encoding='utf-8') as f: ip.run_cell_magic('cython', ' '.join(args), f.read()) @magic.register_line_cell_magic def create(line='', cell=None): """Start a plotinteract session from user namespace data.""" from plottools import create, dataobj ip = get_ipython() if not cell: cell = line line = '' args = ip.ev('dict({})'.format(line)) objs = (eval('dataobj({})'.format(line), ip.user_global_ns, dict(dataobj=dataobj)) for line in cell.splitlines()) create(*objs, **args) def arraystr(a, max_line_width=None, precision=None, suppress_small=None): """Separate values with a comma in array2string.""" return np.array2string(a, max_line_width, precision, suppress_small, separator=', ', prefix="", style=str)\ .replace('..., ', '..., ' if PY3 else 'Ellipsis, ') np.set_string_function(arraystr, repr=False) if not PY3: np.set_string_function(arraystr) np.ma.masked_print_option.set_display("masked")
def __str__(self): np.set_string_function(format_array, repr=False) s = '{' for k in sorted(self.keys()): v = self[k] if v.ndim > 1: v = v.ravel() if s == '{': s += '{}: {}'.format(k, v) else: s += ', {}: {}'.format(k, v) s += '}' np.set_string_function(None, repr=False) return s
def __str__(self): def format_array(array): def format_element(e): if e > 1e15: return '%(n).2e' % {'n': e} elif e == np.floor(e): return '%(n).0f' % {'n': e} elif e - np.floor(e) > 0.01 or e < 1000: return '%(n).2f' % {'n': e} else: return '%(n).2e' % {'n': e} if array.ndim == 0: return str(array.item()) elif len(array) == 0: return '' elif len(array) == 1: if defaults.compact_print: return '[' + format_element(array[0]) + ']' else: return '[{}]'.format(array[0]) s = '[' for ii in np.arange(len(array) - 1): if defaults.compact_print: s += format_element(array[ii]) + ', ' else: s += '{}, '.format(array[ii]) if defaults.compact_print: s += format_element(array[-1]) + ']' else: s += '{}]'.format(array[-1]) return s np.set_string_function(format_array, repr=False) if self.__keys is None: self.__keys = sorted(self.keys()) s = '{' for k in self.__keys: v = self[k] if v.ndim > 1: v = v.ravel() if s == '{': s += '{}: {}'.format(k, v) else: s += ', {}: {}'.format(k, v) s += '}' np.set_string_function(None, repr=False) return s
def set_ndarray_format() -> None: """ndarrayのstr()やrepr()でshapeが分かるようにする。""" def format_ndarray(x): try: result = f"<ndarray shape={x.shape} dtype={x.dtype}" if issubclass(x.dtype.type, numbers.Number): result += f" min={x.min()}" result += f" max={x.max()}" result += f" mean={x.mean(dtype=np.float32)}" s = np.array_str(x).replace("\n", "") result += f" values={s}" result += ">" return result except Exception: return np.array_repr(x) # 念のため np.set_string_function(format_ndarray, repr=False) np.set_string_function(format_ndarray, repr=True)
def DisplayTableAnalysis(figTitles, plotLabels, tickLabels, meanSkills, skills_ci_upper, skills_ci_lower): np.set_string_function(lambda x: ' '.join(["% 8.4f" % val for val in x]), repr=True) for figIndex, title in enumerate(figTitles): print("%50s" % title) print(" " * 10, ) print(" ".join(["%9s"] * len(tickLabels)) % tuple(tickLabels)) print("-" * (11 + 10 * len(tickLabels))) for plotIndex, label in enumerate(plotLabels): print("%10s|" % label, ) print(repr(meanSkills[:, figIndex, plotIndex])) # Restore the formatting state to the default np.set_string_function(None, repr=True)
def html(self, css=None): """Create an HTML representation of the table Parameters ---------- css : str A string that may refer to a CSS or style parameter. This can be used for special formatting of the table, e.g. striping. Default: No extra formatting. Returns ------- string HTML <table> representation. """ if css: tablestr = '<h3>%s</h3>\n<table %s><thead><tr>' % ( self.description, css) else: tablestr = '<h3>%s</h3>\n<table><thead><tr>' % self.description for h in self.columns: tablestr = tablestr + '<th>%s</th>' % h tablestr = tablestr + '</tr>\n' for u in self.units: tablestr = tablestr + '<th>[%s]</th>' % u tablestr = tablestr + '</tr></thead>\n<tbody>' np.set_string_function(None) np.set_printoptions( threshold=None, nanstr='NaN', infstr='Inf', formatter={ 'float': '<td>{:.3E}</td>'.format, #'str_kind' : '<td>{}</td>'.format 'str_kind': lambda x: self._formatcell(x) }) for row in self.data: #strip beginning and ending [,] from string. rowstr = str(row)[1:-1] tablestr = tablestr + '<tr>' + rowstr + '</tr>\n' tablestr = tablestr + '</tbody></table>\n' np.set_printoptions(formatter=None) return tablestr
def test_set_string_function(self): a = np.array([1]) np.set_string_function(lambda x: "FOO", repr=True) assert_equal(repr(a), "FOO") np.set_string_function(None, repr=True) assert_equal(repr(a), "array([1])") np.set_string_function(lambda x: "FOO", repr=False) assert_equal(str(a), "FOO") np.set_string_function(None, repr=False) assert_equal(str(a), "[1]")
def html(self,css=None) : """Create an HTML representation of the table Parameters ---------- css : str A string that may refer to a CSS or style parameter. This can be used for special formatting of the table, e.g. striping. Default: No extra formatting. Returns ------- string HTML <table> representation. """ if css: tablestr = '<h3>%s</h3>\n<table %s><thead><tr>' % (self.description,css) else: tablestr = '<h3>%s</h3>\n<table><thead><tr>' % self.description for h in self.columns: tablestr = tablestr + '<th>%s</th>' % h tablestr = tablestr + '</tr>\n' for u in self.units: tablestr = tablestr + '<th>[%s]</th>' % u tablestr = tablestr + '</tr></thead>\n<tbody>' np.set_string_function(None) np.set_printoptions( threshold = None, nanstr = 'NaN', infstr = 'Inf', formatter={'float' : '<td>{:.3E}</td>'.format, #'str_kind' : '<td>{}</td>'.format 'str_kind' : lambda x: self._formatcell(x) }) for row in self.data: #strip beginning and ending [,] from string. rowstr = str(row)[1:-1] tablestr = tablestr + '<tr>'+rowstr+'</tr>\n' tablestr = tablestr + '</tbody></table>\n' np.set_printoptions(formatter=None) return tablestr
def debug(func): np.set_string_function(lambda arr: '<array>') @wraps(func) def new_func(*args, **kwargs): if get_debug_flag(): # noinspection PyShadowingNames logger = logging.getLogger() str_args = ', '.join([repr(a) for a in args]) str_kwargs = ', '.join([f"{k}={v}" for k, v in kwargs.items()]) logger.debug( f"Calling {func.__name__}({str_args}{', ' if str_kwargs else ''}{str_kwargs})" ) out = func(*args, **kwargs) logger.debug(f"Finished {func.__name__} -> {out}") return out else: return func(*args, **kwargs) return new_func
def DisplayTableAnalysis(figTitles, plotLabels, tickLabels, meanSkills, skills_ci_upper, skills_ci_lower) : np.set_string_function( lambda x: ' '.join(["% 8.4f" % val for val in x]), repr=True) for figIndex, title in enumerate(figTitles) : print "%50s" % title print " " * 10, print " ".join(["%9s"] * len(tickLabels)) % tuple(tickLabels) print "-" * (11 + 10 * len(tickLabels)) for plotIndex, label in enumerate(plotLabels) : print ("%10s|" % label), print repr(meanSkills[:, figIndex, plotIndex]) # Restore the formatting state to the default np.set_string_function(None, repr=True)
def __setitem__(self, key, value): """Sets cell code and resets result cache""" # Change numpy array repr function for grid cell results numpy.set_string_function(lambda s: repr(s.tolist())) # Prevent unchanged cells from being recalculated on cursor movement repr_key = repr(key) unchanged = (repr_key in self.result_cache and value == self(key)) or \ ((value is None or value == "") and repr_key not in self.result_cache) super().__setitem__(key, value) if not unchanged: # Reset result cache self.result_cache = {}
def _setup_format(self): # %g allows to format both float and int but it's 2x slower. # This switch is for performance if self._fields_float: _fmt = '%.' + str(self.precision) + 'f' else: _fmt = '%g' def array_fmt(arr): """Remove commas and [] from numpy array repr.""" # Passing a scalar will trigger an error (gotcha: even # when casting numpy array to list, the elements remain of # numpy type and this function gets called! (4% slowdown) try: return ' '.join([_fmt % x for x in arr]) except: return _fmt % arr # Note: numpy.array2string is MUCH slower numpy.set_string_function(array_fmt, repr=False)
def format_exc_info(info: ExcInfo, as_html: bool, color='Neutral') -> str: # avoids printing the array data # some discussion related to obtaining the current string function # can be found here, https://github.com/numpy/numpy/issues/11266 np.set_string_function( lambda arr: f'{type(arr)} {arr.shape} {arr.dtype}') vbtb = IPython.core.ultratb.VerboseTB(color_scheme=color) if as_html: ansi_string = vbtb.text(*info).replace(" ", " ") html = "".join(ansi2html(ansi_string)) html = html.replace("\n", "<br>") html = ( "<span style='font-family: monaco,courier,monospace;'>" + html + "</span>") tb_text = html else: tb_text = vbtb.text(*info) # resets to default behavior np.set_string_function(None) return tb_text
def format_exc_info(info: ExcInfo, as_html: bool, color=None) -> str: # avoids printing the array data np.set_string_function( lambda arr: f'{type(arr)} {arr.shape} {arr.dtype}') if as_html: html = "\n".join(cgitb_chain(info[1])) # cgitb has a lot of hardcoded colors that don't work for us # remove bgcolor, and let theme handle it html = re.sub('bgcolor="#.*"', '', html) # remove superfluous whitespace html = html.replace('<br>\n', '\n') # but retain it around the <small> bits html = re.sub(r'(<tr><td><small.*</tr>)', '<br>\\1<br>', html) # weird 2-part syntax is a workaround for hard-to-grep text. html = html.replace( "<p>A problem occurred in a Python script. " "Here is the sequence of", "", ) html = html.replace( "function calls leading up to the error, " "in the order they occurred.</p>", "<br>", ) # remove hardcoded fonts html = html.replace('face="helvetica, arial"', "") html = ( "<span style='font-family: monaco,courier,monospace;'>" + html + "</span>") tb_text = html else: # if we don't need HTML, just use traceback tb_text = ''.join(traceback.format_exception(*info)) # resets to default behavior np.set_string_function(None) return tb_text
DIM_NETHER = -1 DIM_END = 1 _zeros = {} def string_func(array): numpy.set_string_function(None) string = repr(array) string = string[:-1] + ", shape=%s)" % (array.shape,) numpy.set_string_function(string_func) return string numpy.set_string_function(string_func) class EntityListProxy(collections.MutableSequence): """ A proxy for the Entities and TileEntities lists of a WorldEditorChunk. Accessing an element returns an EntityRef or TileEntityRef wrapping the element of the underlying NBT compound, with a reference to the WorldEditorChunk. These Refs cannot be created at load time as they hold a reference to the chunk, preventing the chunk from being unloaded when its refcount reaches zero. """ chunk = weakrefprop() def __init__(self, chunk, attrName, refClass): self.attrName = attrName
def main(): parser = OptionParser() #parser.add_option("-s",metavar="FILE",help="Reference Structure file",default="topol.tpr") parser.add_option("-n",metavar="FILE",help="Index file") parser.add_option("-f",metavar="FILE",help="Trajectory with coordinates, velocies, and forces") parser.add_option("-x",metavar="FILE",help="Trajectory with coordinates") parser.add_option("-v",metavar="FILE",help="Trajectory with velocities") parser.add_option("-k",metavar="FILE",help="Trajectory with forces") parser.add_option("-o",metavar="FILE",help="Output",default="corr.dat") parser.add_option("-r",metavar="FILE",help="Output",default="rot.dat") (options, args) = parser.parse_args() #open files #sf = tpxfile(options.s) #read index if options.n: isize,idx,iname = rd_index(options.n,1) print("Using group %s with %s atoms"%(iname[0],isize[0])) idx=idx[0] if not options.f and not(options.x and options.v and options.k): print("Requires either one trajectory containing all input (-f) or three (-x, -v, -f)") sys.exit(1); if options.f and (options.x or options.v or options.k): print("-f cannot be used with x,v, or k") sys.exit(1); if options.f: tf = gmxfile(options.f,flags=TRX_NEED_X|TRX_NEED_V|TRX_NEED_F) natoms = tf.natoms else: xf = gmxfile(options.x,flags=TRX_NEED_X) vf = gmxfile(options.v,flags=TRX_NEED_X) #DCD doesn't have V/F so we expect all 3 files have X field kf = gmxfile(options.k,flags=TRX_NEED_X) print("Warning: NAMD units are expected for input files!") natoms = xf.natoms tf = izip(xf,vf,kf) if natoms!=vf.natoms or natoms!=kf.natoms: print("Number of atoms doesn't match between files!") sys.exit(1); if (natoms%3!=0): print("This program only supports water. Number of atoms has to be multiple of 3."); sys.exit(1); temp_v=[] temp_x=[] temp_f=[] trn_v=N.empty((nP,natoms/3,3,3),dtype=N_real) trn_x=N.empty((nP,natoms/3,3,3),dtype=N_real) for i,frm in enumerate(tf): if not options.f: frm_t = frm class frm: #convert tuple into attributes x = frm_t[0].x v = frm_t[1].x*20.45482706 f = frm_t[2].x*(100/4.184) if options.n: frm_x = frm.x[idx] frm_v = frm.v[idx] frm_f = frm.f[idx] else: frm_x = frm.x frm_v = frm.v frm_f = frm.f frm_v=frm_v.reshape(-1,3,3) frm_x=frm_x.reshape(-1,3,3) frm_f=frm_f.reshape(-1,3,3) temp_v += [frm_v.copy()] temp_x += [frm_x.copy()] temp_f += [frm_f.copy()] temp_v = temp_v[-3:] temp_x = temp_x[-3:] temp_f = temp_f[-3:] if i%ds!=2: continue #assumes ds>=3, if __debug__: a=temp_x[1] b=temp_x[0]+temp_v[1]*dt #print(temp_v[1]*dt) #print(temp_x[0]) #idx=N.abs(a-b).max(axis=1).max(axis=1)>.0005 #print(a.shape,temp_v[1].shape) #print(idx) #print(a[idx]) #print(b[idx]) N.testing.assert_almost_equal(a,b,decimal=2,verbose=True) #low accuracy dominated by pressure coupling i=i//ds trn_v[i] = .5*(temp_v[1]+temp_v[2])-dt/16/mass*(temp_f[2]-temp_f[0]) #trn_v[i] = .5*(temp_v[1]+temp_v[2]) ##trn_v2 = 2*(trn_v[1:-1]+trn_v[2:])-3/2/dt*(trn_x[2:]-trn_x[:-2]) trn_x[i] = temp_x[1] if i==nP-1: break if __debug__: a=(temp_v[0]+temp_v[1])/2#trn_v2[n] b=trn_v[i] #idx=N.abs((a-b))>.4+.2*N.abs(a+b) #.6 .4 old values: for 2fs: a few,, 1fs: none idx=N.abs((a-b))>.6+.4*N.abs(a+b) #.6 .4 old values: for 2fs: a few,, 1fs: none idx2=N.abs((a-b))<.1+.05*N.abs(a+b) #.1 .05 : 75% - 1fs: 91% assert(N.sum(idx)/N.prod(idx2.shape)<1/400) #assert(N.sum(idx2)/N.prod(idx2.shape)>.9) assert(N.sum(idx2)/N.prod(idx2.shape)>.70) assert(i==nP-1) N.set_string_function(N.array_repr,False) x=trn_x[0][0] #1st frame, 1st molecule x=x-N.sum(x*mass,axis=0)/N.sum(mass) I=inertia_tensor(x,mass) I,evec=LA.eigh(I) print() print("I",I) ic = N.array(N.dot(x,evec),dtype=N_real) #initial water coordinates (oriented according to I); orientation x: H1->H2, y: O->(H1+H2)/2, z: perpendicular to plane print(trn_x.shape,trn_v.shape) trn_w=N.empty((len(trn_v),natoms/3,3),dtype=N_real) #COM trn_x -= N.sum(trn_x*mass,axis=2).reshape(nP,natoms/3,1,3)/N.sum(mass) #reshape doesn't change shape but reinserts the summed axis trn_v_com = trn_v - N.sum(trn_v*mass,axis=2).reshape(nP,natoms/3,1,3)/N.sum(mass) for n in range(len(trn_v)): trn_w[n] = compute_omega(trn_x[n], trn_v_com[n], ic, I, mass.reshape(3)) #trn_w[n] = compute_omega_py(trn_x[n], trn_v_com[n], ic, I, mass) trn_w=trn_w[:nP] corr = N.apply_along_axis(lambda a: scipy.signal.fftconvolve(a,a[::-1],'same'),axis=0,arr=trn_w) corr = corr[len(corr)/2:] #remove negative lag corr=norm_corr(corr) rf=file(options.r,"w") map(lambda x:print(x,file=rf),corr) #if len(trn_v)%2==1: #remove last if uneven number of trn-data points # trn_v=trn_v[:nP] #TODO: make configurable, switch here for trn/rot print(trn_v.shape) if bCom: #trn_v = trn_v.reshape((-1,natoms/3,3,3)) #group as waters trn_v = N.mean(trn_v*mass, axis=2) #average over waters, now shape is: frame, mol, coor print(trn_v.shape) corr = N.apply_along_axis(lambda a: scipy.signal.fftconvolve(a,a[::-1],'same'),axis=0,arr=trn_v) #corr = N.apply_along_axis(lambda a: N.correlate(a,a,'same'),axis=0,arr=trn_v) #slower identical alternative print(corr.shape) corr = corr[len(corr)/2:] #remove negative lag if bCom: if not bNormalize: corr/=N.sum(mass) #we multiplied trn with mass and because correlation is trn*trn the correlation was mass^2 else: corr = corr.reshape(-1,natoms/3,3,3) #frames, molecule, atoms, xyz; so that we can multiply with mass print(corr.shape) corr *= mass corr=norm_corr(corr) print(corr.shape) of=file(options.o,"w") map(lambda x:print(x,file=of),corr)
import Sofa from SofaPython import Quaternion as quat import numpy as np np.set_string_function( lambda x: ' '.join( map(str, x)), repr = False ) import path import tool # rigid body operations def id(): res = np.zeros(7) res[-1] = 1 return res def inv(x): res = np.zeros(7) res[3:] = quat.conj(x[3:]) res[:3] = -quat.rotate(res[3:], x[:3]) return res def prod(x, y): print x.inv() res = np.zeros(7) res[:3] = x[:3] + quat.rotate(x[3:], y[:3]) res[3:] = quat.prod(x[3:], y[3:])
fh.setFormatter( logging.Formatter( '%(asctime)s %(filename)s [line:%(lineno)d] %(levelname)s %(message)s' )) logging.root.addHandler(fh) if os.environ.get('log', '0') == '1': logging.root.setLevel(logging.DEBUG) # set_stream_logger(logging.DEBUG) set_stream_logger(logging.INFO) set_file_logger(log_level=logging.DEBUG) ## ndarray will be pretty np.set_string_function(lambda arr: f'{arr.shape} {arr.dtype} ' f'{arr.__str__()} ' f'dtype:{arr.dtype} shape:{arr.shape}', repr=True) logging.info('import lz') ## print(ndarray) will be pretty (and pycharm dbg) # np.set_string_function(lambda arr: f'{arr.shape} {arr.dtype} \n' # f'{arr.__repr__()} \n' # f'dtype:{arr.dtype} shape:{arr.shape}', repr=False) # old_np_repr = np.ndarray.__repr__ # np.ndarray.__repr__ = lambda arr: (f'{arr.shape} {arr.dtype} \n' # f'{old_np_repr(arr)} \n' # f'dtype:{arr.dtype} shape:{arr.shape}')
if self.orig_handlers is not None: for sig, orig in zip(self.sigs, self.orig_handlers): signal.signal(sig, orig) self.orig_handlers = None def df2md(df1): import tabulate return tabulate.tabulate(df1, headers="keys", tablefmt="pipe") def np_print(arr): return '{} \n dtype:{} shape:{}'.format(arr, arr.dtype, arr.shape) np.set_string_function(np_print) def set_stream_logger(log_level=logging.DEBUG): sh = colorlog.StreamHandler() sh.setLevel(log_level) sh.setFormatter( colorlog.ColoredFormatter( ' %(asctime)s %(filename)s [line:%(lineno)d] %(log_color)s%(levelname)s%(reset)s %(message)s')) logging.root.addHandler(sh) def set_file_logger(work_dir=None, log_level=logging.DEBUG): work_dir = work_dir or os.getcwd() fh = logging.FileHandler(os.path.join(work_dir, 'log.txt')) fh.setLevel(log_level)
def genMarkov(markovFilePath, verb = 'INFO', nSimulation = int(5.e+4)): logger.setStreamVerb(verb = verb) logger.info('') #os.system('cat ' + markovFilePath) logger.debug('markovFilePath is %s' % markovFilePath) # Read paramter file beta = float(getParameter(markovFilePath, 'beta', 'bar-separated')) muG = float(getParameter(markovFilePath, 'muG', 'bar-separated')) sigmaG = float(getParameter(markovFilePath, 'sigmaG', 'bar-separated')) p = float(getParameter(markovFilePath, 'p', 'bar-separated')) dy = float(getParameter(markovFilePath, 'dy', 'bar-separated')) nAgent = int(getParameter(markovFilePath, 'nAgent', 'bar-separated')) theta = float(getParameter(markovFilePath, 'theta', 'bar-separated')) periodsPerYear = int(getParameter(markovFilePath, 'periodsPerYear', 'bar-separated')) # Output read parameters logger.debug('') logger.debug('read the following yearly parameters: ') logger.debug('beta = %s' % beta) logger.debug('muG = %s' % muG) logger.debug('sigmaG = %s' % sigmaG) logger.debug('p = %s' % p) logger.debug('dy = %s' % dy) logger.debug('nAgent = %s' % nAgent) logger.debug('theta = %s' % theta) logger.debug('periodsPerYear = %s' % periodsPerYear) # perform consitency check on paramters: assert periodsPerYear >= 1 assert beta >= 0.8 and beta <= 1.0 assert sigmaG >= 0. assert muG <= 1.0 # computing scaled parameters muG_sc = (1. + muG) ** (1./ periodsPerYear) sigmaG_sc = sigmaG / np.sqrt(periodsPerYear) beta_sc = beta ** (1./ periodsPerYear) # scaled parameters: logger.info('') logger.info('computed the following scaled 1/%s parameters: ' % periodsPerYear) logger.info('beta_sc = %s' % beta_sc) logger.info('muG_sc = %s' % (muG_sc - 1.)) logger.info('sigmaG_sc = %s' % sigmaG_sc) logger.info('') # Build trans matrix TransMatrix = np.array([[p, 1.-p], [1.-p, p]]) ShockMatrix = np.array([[ muG_sc - sigmaG_sc], [muG_sc + sigmaG_sc]]) mkov = MkovM(ShockMatrix, TransMatrix) logger.debug(ShockMatrix) logger.debug(TransMatrix) #mkov.simulation() logger.debug(mkov) if nAgent > 1: # Add unemployment shocks #TransMatrixUnem = np.triu(np.ones( (nAgent, nAgent)), 1 ) * ( ( 1. - p) / ( nAgent - 1.) ) + np.tril(np.ones( (nAgent, nAgent)), -1 ) * ( ( 1. - p) / ( nAgent - 1.) ) + np.eye(nAgent) * p ShockMatrixUnem = np.triu(np.ones( (nAgent, nAgent)), 1 ) * dy / ( nAgent - 1 ) + np.tril(np.ones( (nAgent, nAgent)), -1 ) * dy / (nAgent - 1 ) + np.eye(nAgent) * ( -dy ) # Print unemployment shock matrix logger.info('ShockMatrixUnem \n %s' % ShockMatrixUnem) # Combine aggregate uncertainty with idiosyncratic unemployment uncertainty fullShockMatrix = np.hstack( ( np.repeat(ShockMatrix, nAgent, axis = 0), np.tile(ShockMatrixUnem, (2, 1))) ) #fullTransMatrix = np.kron(TransMatrix, TransMatrixUnem) # ugly ugly ugly fullShockMatrix = fullShockMatrix[:nAgent + 1] fullShockMatrix[nAgent][ShockMatrix.shape[1]:] = np.zeros( (1, nAgent) ) # more ugly ugly ugly pers = TransMatrix[0,0] fullTransMatrix = np.vstack( (np.hstack( ( np.eye(nAgent) * pers, np.ones( (nAgent, 1) ) * (1.-pers) ) ), np.append(((1.-pers)/ nAgent) *np.ones(nAgent), pers) ) ) else: fullShockMatrix = ShockMatrix fullTransMatrix = TransMatrix np.set_printoptions(precision = 7, linewidth = 300) logger.info('fullShock\n %s' % fullShockMatrix ) logger.info('fullTrans\n %s' % fullTransMatrix ) ################### # Compute PD # nD_t+1 = nYbar_t+1 - n(1-theta)E_tYbar_t+1 # divide through with Ybar_t+1 # nd_t+1 = n - n(1-theta) E_t[g_t+1]/ g_t+1 g = fullShockMatrix[:, 0] Etg = np.dot(fullTransMatrix, g) PD = 1. - ( 1. - theta ) * np.reshape(Etg, (len(Etg), 1)) / g PB = np.reshape(Etg, (len(Etg), 1)) / g ################## try: np.savetxt(os.path.join(os.getcwd(), 'output', 'shockMatrix.in'), fullShockMatrix, fmt = '%15.10f') np.savetxt(os.path.join(os.getcwd(), 'output', 'transMatrix.in'), fullTransMatrix, fmt = '%15.10f') np.savetxt(os.path.join(os.getcwd(), 'output', 'p_a.in'), PD, fmt = '%15.10f') np.savetxt(os.path.join(os.getcwd(), 'output', 'p_b.in'), PB, fmt = '%15.10f') except IOError: logger.critical('cannot write to output/.') logger.info('') #Estimate moments for full markov chain. fullMChain = MkovM(fullShockMatrix, fullTransMatrix) varSim, Theta = fullMChain.simulation(nSimulation) # rename columns varSim = varSim.rename(columns = {'0': 'agIncGrowth'}) for agent in range(1, nAgent): varSim = varSim.rename(columns = {str(agent): 'dy_agent' + str(agent)}) qPers = Theta[1, 1] yearlyStockSeries = pandas.DataFrame(varSim[::4]) model = scikits.statsmodels.tsa.api.VAR(yearlyStockSeries) results = model.fit(1) Theta = results.params[1:,:] aPers = Theta[1, 1] logger.info('quarterly pers %s, annual pers %s, diagnol pers %s' % (qPers, aPers, TransMatrix[0, 0]) ) np.set_string_function(None) varSim['simInc'] = float(nAgent) varSim['incGrIndAg0'] = float(1.) varSim['incShareAg0'] = ( 1. + varSim['dy_agent1'] ) / nAgent for row in varSim.rows(): if row > 0: varSim['simInc'][row] = varSim['simInc'][row-1] * varSim['agIncGrowth'][row] varSim['incGrIndAg0'][row] = ( varSim['incShareAg0'][row] / varSim['incShareAg0'][row - 1] ) * varSim['agIncGrowth'][row] varSim['simIndIncAg0'] = varSim['incShareAg0'] * varSim['simInc'] x = varSim.ix[:,['0','incShareAg0', 'incGrIndAg0']] logger.info(x[:100]) ## logger.info('VAR for growth, incshareAg0, incGrIndAg0') ## model = scikits.statsmodels.tsa.api.VAR(x) ## result = model.fit(1) ## logger.info(result.summary()) logger.info('incShareAg0') incShareAg0 = x['incShareAg0'] logger.info(incShareAg0.describe()) logger.info('skewness %s' % scipy.stats.kurtosis(incShareAg0)) logger.info('kurtosis %s' % scipy.stats.skew(incShareAg0)) logger.info('\nincGrIndAg0') incGrIndAg0 = x['incGrIndAg0'] logger.info(incGrIndAg0.describe()) logger.info('skewness %s' % scipy.stats.skew(incGrIndAg0)) logger.info('kurtosis %s' % (scipy.stats.kurtosis(incGrIndAg0))) #scikits.statsmodels.tsa.ar_model.AR(np.asarray(varSim['simIndIncAg0'])) #varSim = varSim[:100] #N = len(varSim) #rng = pandas.DateRange('1/1/1900', periods = N, timeRule = 'Q@JAN') #ts = pandas.Series(varSim['1'], index = rng) return fullShockMatrix, fullTransMatrix, beta_sc, g, Etg, PD, PB, incGrIndAg0
sys.path.append(path2Src) from pymods.markovChain.mcInterface import MkovM from pymods.support.support import wrapLogger from pymods.support.support import getParameter from pymods.support.support import myArrayPrint import numpy as np import pandas import scipy.optimize from genMarkov import genMarkov from numpy.core.umath_tests import inner1d pprintFun = myArrayPrint(width = 12, prec = 7) np.set_string_function(pprintFun, repr = False) logger = wrapLogger(loggerName = 'lucasMainLog', streamVerb = 'DEBUG', logFile = None) def lucasOneAgent(shockMatrix, transMatrix, beta, g, Etg, PD, PB, markovFilePath, deterministic = False): ''' markovFilePath: path to parameters.in file determistic: boolean indicating whether to compute special determistic or stochastic case. One agent economy. Therefore we have: C_t = Y_t C_{t+1} = Y_{t+1} In the normalied world: c_t = 1 c_{t+1} = 1 '''
def genMarkov(markovFilePath, verb='INFO', nSimulation=int(5.e+4)): logger.setStreamVerb(verb=verb) logger.info('') #os.system('cat ' + markovFilePath) logger.debug('markovFilePath is %s' % markovFilePath) # Read paramter file beta = float(getParameter(markovFilePath, 'beta', 'bar-separated')) muG = float(getParameter(markovFilePath, 'muG', 'bar-separated')) sigmaG = float(getParameter(markovFilePath, 'sigmaG', 'bar-separated')) p = float(getParameter(markovFilePath, 'p', 'bar-separated')) dy = float(getParameter(markovFilePath, 'dy', 'bar-separated')) nAgent = int(getParameter(markovFilePath, 'nAgent', 'bar-separated')) theta = float(getParameter(markovFilePath, 'theta', 'bar-separated')) periodsPerYear = int( getParameter(markovFilePath, 'periodsPerYear', 'bar-separated')) # Output read parameters logger.debug('') logger.debug('read the following yearly parameters: ') logger.debug('beta = %s' % beta) logger.debug('muG = %s' % muG) logger.debug('sigmaG = %s' % sigmaG) logger.debug('p = %s' % p) logger.debug('dy = %s' % dy) logger.debug('nAgent = %s' % nAgent) logger.debug('theta = %s' % theta) logger.debug('periodsPerYear = %s' % periodsPerYear) # perform consitency check on paramters: assert periodsPerYear >= 1 assert beta >= 0.8 and beta <= 1.0 assert sigmaG >= 0. assert muG <= 1.0 # computing scaled parameters muG_sc = (1. + muG)**(1. / periodsPerYear) sigmaG_sc = sigmaG / np.sqrt(periodsPerYear) beta_sc = beta**(1. / periodsPerYear) # scaled parameters: logger.info('') logger.info('computed the following scaled 1/%s parameters: ' % periodsPerYear) logger.info('beta_sc = %s' % beta_sc) logger.info('muG_sc = %s' % (muG_sc - 1.)) logger.info('sigmaG_sc = %s' % sigmaG_sc) logger.info('') # Build trans matrix TransMatrix = np.array([[p, 1. - p], [1. - p, p]]) ShockMatrix = np.array([[muG_sc - sigmaG_sc], [muG_sc + sigmaG_sc]]) mkov = MkovM(ShockMatrix, TransMatrix) logger.debug(ShockMatrix) logger.debug(TransMatrix) #mkov.simulation() logger.debug(mkov) if nAgent > 1: # Add unemployment shocks #TransMatrixUnem = np.triu(np.ones( (nAgent, nAgent)), 1 ) * ( ( 1. - p) / ( nAgent - 1.) ) + np.tril(np.ones( (nAgent, nAgent)), -1 ) * ( ( 1. - p) / ( nAgent - 1.) ) + np.eye(nAgent) * p ShockMatrixUnem = np.triu(np.ones( (nAgent, nAgent) ), 1) * dy / (nAgent - 1) + np.tril(np.ones( (nAgent, nAgent)), -1) * dy / (nAgent - 1) + np.eye(nAgent) * (-dy) # Print unemployment shock matrix logger.info('ShockMatrixUnem \n %s' % ShockMatrixUnem) # Combine aggregate uncertainty with idiosyncratic unemployment uncertainty fullShockMatrix = np.hstack( (np.repeat(ShockMatrix, nAgent, axis=0), np.tile(ShockMatrixUnem, (2, 1)))) #fullTransMatrix = np.kron(TransMatrix, TransMatrixUnem) # ugly ugly ugly fullShockMatrix = fullShockMatrix[:nAgent + 1] fullShockMatrix[nAgent][ShockMatrix.shape[1]:] = np.zeros((1, nAgent)) # more ugly ugly ugly pers = TransMatrix[0, 0] fullTransMatrix = np.vstack( (np.hstack((np.eye(nAgent) * pers, np.ones( (nAgent, 1)) * (1. - pers))), np.append(((1. - pers) / nAgent) * np.ones(nAgent), pers))) else: fullShockMatrix = ShockMatrix fullTransMatrix = TransMatrix np.set_printoptions(precision=7, linewidth=300) logger.info('fullShock\n %s' % fullShockMatrix) logger.info('fullTrans\n %s' % fullTransMatrix) ################### # Compute PD # nD_t+1 = nYbar_t+1 - n(1-theta)E_tYbar_t+1 # divide through with Ybar_t+1 # nd_t+1 = n - n(1-theta) E_t[g_t+1]/ g_t+1 g = fullShockMatrix[:, 0] Etg = np.dot(fullTransMatrix, g) PD = 1. - (1. - theta) * np.reshape(Etg, (len(Etg), 1)) / g PB = np.reshape(Etg, (len(Etg), 1)) / g ################## try: np.savetxt(os.path.join(os.getcwd(), 'output', 'shockMatrix.in'), fullShockMatrix, fmt='%15.10f') np.savetxt(os.path.join(os.getcwd(), 'output', 'transMatrix.in'), fullTransMatrix, fmt='%15.10f') np.savetxt(os.path.join(os.getcwd(), 'output', 'p_a.in'), PD, fmt='%15.10f') np.savetxt(os.path.join(os.getcwd(), 'output', 'p_b.in'), PB, fmt='%15.10f') except IOError: logger.critical('cannot write to output/.') logger.info('') #Estimate moments for full markov chain. fullMChain = MkovM(fullShockMatrix, fullTransMatrix) varSim, Theta = fullMChain.simulation(nSimulation) # rename columns varSim = varSim.rename(columns={'0': 'agIncGrowth'}) for agent in range(1, nAgent): varSim = varSim.rename(columns={str(agent): 'dy_agent' + str(agent)}) qPers = Theta[1, 1] yearlyStockSeries = pandas.DataFrame(varSim[::4]) model = scikits.statsmodels.tsa.api.VAR(yearlyStockSeries) results = model.fit(1) Theta = results.params[1:, :] aPers = Theta[1, 1] logger.info('quarterly pers %s, annual pers %s, diagnol pers %s' % (qPers, aPers, TransMatrix[0, 0])) np.set_string_function(None) varSim['simInc'] = float(nAgent) varSim['incGrIndAg0'] = float(1.) varSim['incShareAg0'] = (1. + varSim['dy_agent1']) / nAgent for row in varSim.rows(): if row > 0: varSim['simInc'][row] = varSim['simInc'][ row - 1] * varSim['agIncGrowth'][row] varSim['incGrIndAg0'][row] = ( varSim['incShareAg0'][row] / varSim['incShareAg0'][row - 1]) * varSim['agIncGrowth'][row] varSim['simIndIncAg0'] = varSim['incShareAg0'] * varSim['simInc'] x = varSim.ix[:, ['0', 'incShareAg0', 'incGrIndAg0']] logger.info(x[:100]) ## logger.info('VAR for growth, incshareAg0, incGrIndAg0') ## model = scikits.statsmodels.tsa.api.VAR(x) ## result = model.fit(1) ## logger.info(result.summary()) logger.info('incShareAg0') incShareAg0 = x['incShareAg0'] logger.info(incShareAg0.describe()) logger.info('skewness %s' % scipy.stats.kurtosis(incShareAg0)) logger.info('kurtosis %s' % scipy.stats.skew(incShareAg0)) logger.info('\nincGrIndAg0') incGrIndAg0 = x['incGrIndAg0'] logger.info(incGrIndAg0.describe()) logger.info('skewness %s' % scipy.stats.skew(incGrIndAg0)) logger.info('kurtosis %s' % (scipy.stats.kurtosis(incGrIndAg0))) #scikits.statsmodels.tsa.ar_model.AR(np.asarray(varSim['simIndIncAg0'])) #varSim = varSim[:100] #N = len(varSim) #rng = pandas.DateRange('1/1/1900', periods = N, timeRule = 'Q@JAN') #ts = pandas.Series(varSim['1'], index = rng) return fullShockMatrix, fullTransMatrix, beta_sc, g, Etg, PD, PB, incGrIndAg0
def string_func(array): numpy.set_string_function(None) string = repr(array) string = string[:-1] + ", shape=%s)" % (array.shape, ) numpy.set_string_function(string_func) return string
import os import pathlib import re import subprocess import warnings os.environ['NO_AT_BRIDGE'] = '1' # Hide X org false warning. import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import matplotlib.ticker as ticker import numpy as np import pandas as pd np.set_string_function(lambda x: f'<np.array shape={x.shape} dtype={x.dtype}>') Run = collections.namedtuple('Run', 'task method seed xs ys') PALETTES = dict( discrete=( '#377eb8', '#4daf4a', '#984ea3', '#e41a1c', '#ff7f00', '#a65628', '#f781bf', '#888888', '#a6cee3', '#b2df8a',
# Analyze with Rasch model. The shell gives a list of output datadicts. data.rasch( groups= None, # [<None, {'row':int row of group labels}, ['key', {'group0':['i1', i2'],...}], ['index', {'group0':[0, 1],...}]> => identify groups] anchors= None, # [<None, {'Bank':<pickle file>, 'row_ents':[<None,'All',row entity list>], 'col_ents':[<None,'All',col entity list>]}> ] runspecs=[ 0.0001, 20 ], # [<[stop_when_change, max_iteration]> => iteration stopping conditions ] minvar= 0.001, # [<decimal> => minimum row/col variance allowed during iteration] maxchange=10, # [<+num> => maximum change allowed per iteration] ) # Row entity stats np.set_string_function(None) arr = data.row_ents_out.whole print arr sys.exit() print '\nPerson Measures =\n', data.row_ents_out sys.exit() # Column entity stats print '\nItem Measures =\n', data.col_ents_out # Reliability print '\nReliability =\n', data.reliability # Export person measures to a text file using the export method.
def close(self): self.trajectory.close() # Restore numpy formatting defaults numpy.set_string_function(None, False) numpy.set_string_function(None, True)
import numpy as np # noinspection PyUnusedLocal def pprint(arr): return 'HA! - What are you going to do now?' np.set_string_function(pprint) a = np.arange(10) a print a np.set_string_function(None) a x = np.arange(4) np.set_string_function(lambda x_: 'random', repr=False) x.__str__() x.__repr__()
def set_array_layout(): numpy.set_printoptions(linewidth=300, suppress=True) numpy.set_string_function(f=format_d2)
def __init__(self, *args, **kwargs): super(AD, self).__init__(*args, **kwargs) self.__dict__ = self # ipshell = InteractiveShellEmbed() # ipshell.magic('%load_ext autoreload') # Add array shape to Numpy's repr def my_array_repr(arr): orig_str = array_repr(arr) return orig_str + '\n\nShape: ' + str(arr.shape) + '\n\n\n' np.set_string_function(my_array_repr) def fromfile(filename): return np.fromfile(filename, '>d') def equal(ax=None): ax = plt.gca() if ax is None else ax ax.set_aspect('equal', 'box-forced') def doc(fn): oinfo = Inspector().info(fn) url = sphinxify.rich_repr(oinfo) try:
def get_dummy_data(request, job_id): """ Create a dummy job and plot data @param request: request object @param job_id: RemoteJob pk """ try: remote_job = RemoteJob.objects.get(remote_id=job_id) except: eqsans = Instrument.objects.get(name='eqsans') reduction = ReductionProcess(instrument=eqsans, name='Dummy job', owner=request.user, data_file='/tmp/dummy.nxs') reduction.save() try: transaction = Transaction.objects.get(trans_id=-1) except: transaction = Transaction(trans_id=-1, owner=request.user, directory='/tmp') transaction.save() remote_job = RemoteJob(reduction=reduction, remote_id='-1', transaction=transaction) remote_job.save() breadcrumbs = "<a href='%s'>home</a>" % reverse(settings.LANDING_VIEW) breadcrumbs += " › <a href='%s'>eqsans reduction</a>" % reverse('eqsans.views.reduction_home') breadcrumbs += " › <a href='%s'>jobs</a>" % reverse('eqsans.views.reduction_jobs') breadcrumbs += " › dummy job" template_values = {'remote_job': remote_job, 'parameters': remote_job.reduction.get_data_dict(), 'reduction_id': remote_job.reduction.id, 'breadcrumbs': breadcrumbs, 'back_url': request.path} template_values = remote.view_util.fill_job_dictionary(request, job_id, **template_values) template_values = users.view_util.fill_template_values(request, **template_values) template_values = remote.view_util.fill_template_values(request, **template_values) # Go through the files and find data to plot f = os.path.join(os.path.split(__file__)[0],'..','plotting','data','4065_Iq.txt') # Do we read this data already? plot_object = remote_job.get_first_plot(filename='4065_Iq.txt', owner=request.user) if plot_object is not None and plot_object.first_data_layout() is not None: data_str = plot_object.first_data_layout().dataset.data else: # If we don't have data stored, read it from file file_content = open(f,'r').read() data_str = view_util.process_Iq_data(file_content) plot_object = Plot1D.objects.create_plot(request.user, data=data_str, filename='4065_Iq.txt') remote_job.plots.add(plot_object) template_values['plot_1d'] = data_str template_values['plot_object'] = plot_object template_values['plot_1d_id'] = plot_object.id if plot_object is not None else None # Now the 2D data f = os.path.join(os.path.split(__file__)[0],'..','plotting','data','4065_Iqxy.nxs') plot_object2d = remote_job.get_plot_2d(filename='4065_Iqxy.nxs', owner=request.user) if plot_object2d is None: numpy.set_printoptions(threshold='nan', nanstr='0', infstr='0') fd = h5py.File(f, 'r') g = fd['mantid_workspace_1'] y = g['workspace']['axis1'] x = g['workspace']['axis2'] values = g['workspace']['values'] z_max = numpy.amax(values) numpy.set_string_function( lambda x: '['+','.join(map(lambda y:'['+','.join(map(lambda z: "%.4g" % z,y))+']',x))+']' ) data_str_2d = values[:].__repr__() numpy.set_string_function( lambda x: '['+','.join(map(lambda z: "%.4g" % z,x))+']' ) y_str = y[:].__repr__() x_str = x[:].__repr__() plot_object2d = Plot2D.objects.create_plot(user=request.user, data=data_str_2d, x_axis=x_str, y_axis=y_str, z_min=0.0, z_max=z_max, filename='4065_Iqxy.nxs') remote_job.plots2d.add(plot_object2d) template_values['plot_2d'] = plot_object2d return template_values
DIM_NETHER = -1 DIM_END = 1 _zeros = {} def string_func(array): numpy.set_string_function(None) string = repr(array) string = string[:-1] + ", shape=%s)" % (array.shape, ) numpy.set_string_function(string_func) return string numpy.set_string_function(string_func) class WorldEditorChunk(object): """ This is a 16x16xH chunk in a format-independent world. The Blocks, Data, SkyLight, and BlockLight arrays are divided into vertical sections of 16x16x16, accessed using the `getSection` method. """ def __init__(self, chunkData, editor): self.worldEditor = editor self.chunkData = chunkData self.cx, self.cz = chunkData.cx, chunkData.cz self.dimName = chunkData.dimName self.dimension = editor.getDimension(self.dimName) self.Entities = [
import Sofa from SofaPython import Quaternion as quat import numpy as np np.set_string_function(lambda x: ' '.join(map(str, x)), repr=False) import path import tool # rigid body operations def id(): res = np.zeros(7) res[-1] = 1 return res def inv(x): res = np.zeros(7) res[3:] = quat.conj(x[3:]) res[:3] = -quat.rotate(res[3:], x[:3]) return res def prod(x, y): print x.inv() res = np.zeros(7)
def string_func(array): numpy.set_string_function(None) string = repr(array) string = string[:-1] + ", shape=%s)" % (array.shape,) numpy.set_string_function(string_func) return string
def main(): parser = OptionParser() #parser.add_option("-s",metavar="FILE",help="Reference Structure file",default="topol.tpr") parser.add_option("-n", metavar="FILE", help="Index file") parser.add_option("-f", metavar="FILE", help="Trajectory with coordinates, velocies, and forces") parser.add_option("-x", metavar="FILE", help="Trajectory with coordinates") parser.add_option("-v", metavar="FILE", help="Trajectory with velocities") parser.add_option("-k", metavar="FILE", help="Trajectory with forces") parser.add_option("-o", metavar="FILE", help="Output", default="corr.dat") parser.add_option("-r", metavar="FILE", help="Output", default="rot.dat") (options, args) = parser.parse_args() #open files #sf = tpxfile(options.s) #read index if options.n: isize, idx, iname = rd_index(options.n, 1) print("Using group %s with %s atoms" % (iname[0], isize[0])) idx = idx[0] if not options.f and not (options.x and options.v and options.k): print( "Requires either one trajectory containing all input (-f) or three (-x, -v, -f)" ) sys.exit(1) if options.f and (options.x or options.v or options.k): print("-f cannot be used with x,v, or k") sys.exit(1) if options.f: tf = gmxfile(options.f, flags=TRX_NEED_X | TRX_NEED_V | TRX_NEED_F) natoms = tf.natoms else: xf = gmxfile(options.x, flags=TRX_NEED_X) vf = gmxfile( options.v, flags=TRX_NEED_X ) #DCD doesn't have V/F so we expect all 3 files have X field kf = gmxfile(options.k, flags=TRX_NEED_X) print("Warning: NAMD units are expected for input files!") natoms = xf.natoms tf = izip(xf, vf, kf) if natoms != vf.natoms or natoms != kf.natoms: print("Number of atoms doesn't match between files!") sys.exit(1) if (natoms % 3 != 0): print( "This program only supports water. Number of atoms has to be multiple of 3." ) sys.exit(1) temp_v = [] temp_x = [] temp_f = [] trn_v = N.empty((nP, natoms / 3, 3, 3), dtype=N_real) trn_x = N.empty((nP, natoms / 3, 3, 3), dtype=N_real) for i, frm in enumerate(tf): if not options.f: frm_t = frm class frm: #convert tuple into attributes x = frm_t[0].x v = frm_t[1].x * 20.45482706 f = frm_t[2].x * (100 / 4.184) if options.n: frm_x = frm.x[idx] frm_v = frm.v[idx] frm_f = frm.f[idx] else: frm_x = frm.x frm_v = frm.v frm_f = frm.f frm_v = frm_v.reshape(-1, 3, 3) frm_x = frm_x.reshape(-1, 3, 3) frm_f = frm_f.reshape(-1, 3, 3) temp_v += [frm_v.copy()] temp_x += [frm_x.copy()] temp_f += [frm_f.copy()] temp_v = temp_v[-3:] temp_x = temp_x[-3:] temp_f = temp_f[-3:] if i % ds != 2: continue #assumes ds>=3, if __debug__: a = temp_x[1] b = temp_x[0] + temp_v[1] * dt #print(temp_v[1]*dt) #print(temp_x[0]) #idx=N.abs(a-b).max(axis=1).max(axis=1)>.0005 #print(a.shape,temp_v[1].shape) #print(idx) #print(a[idx]) #print(b[idx]) N.testing.assert_almost_equal( a, b, decimal=2, verbose=True) #low accuracy dominated by pressure coupling i = i // ds trn_v[i] = .5 * (temp_v[1] + temp_v[2]) - dt / 16 / mass * (temp_f[2] - temp_f[0]) #trn_v[i] = .5*(temp_v[1]+temp_v[2]) ##trn_v2 = 2*(trn_v[1:-1]+trn_v[2:])-3/2/dt*(trn_x[2:]-trn_x[:-2]) trn_x[i] = temp_x[1] if i == nP - 1: break if __debug__: a = (temp_v[0] + temp_v[1]) / 2 #trn_v2[n] b = trn_v[i] #idx=N.abs((a-b))>.4+.2*N.abs(a+b) #.6 .4 old values: for 2fs: a few,, 1fs: none idx = N.abs((a - b)) > .6 + .4 * N.abs( a + b) #.6 .4 old values: for 2fs: a few,, 1fs: none idx2 = N.abs( (a - b)) < .1 + .05 * N.abs(a + b) #.1 .05 : 75% - 1fs: 91% assert (N.sum(idx) / N.prod(idx2.shape) < 1 / 400) #assert(N.sum(idx2)/N.prod(idx2.shape)>.9) assert (N.sum(idx2) / N.prod(idx2.shape) > .70) assert (i == nP - 1) N.set_string_function(N.array_repr, False) x = trn_x[0][0] #1st frame, 1st molecule x = x - N.sum(x * mass, axis=0) / N.sum(mass) I = inertia_tensor(x, mass) I, evec = LA.eigh(I) print() print("I", I) ic = N.array( N.dot(x, evec), dtype=N_real ) #initial water coordinates (oriented according to I); orientation x: H1->H2, y: O->(H1+H2)/2, z: perpendicular to plane print(trn_x.shape, trn_v.shape) trn_w = N.empty((len(trn_v), natoms / 3, 3), dtype=N_real) #COM trn_x -= N.sum(trn_x * mass, axis=2).reshape(nP, natoms / 3, 1, 3) / N.sum( mass) #reshape doesn't change shape but reinserts the summed axis trn_v_com = trn_v - N.sum(trn_v * mass, axis=2).reshape( nP, natoms / 3, 1, 3) / N.sum(mass) for n in range(len(trn_v)): trn_w[n] = compute_omega(trn_x[n], trn_v_com[n], ic, I, mass.reshape(3)) #trn_w[n] = compute_omega_py(trn_x[n], trn_v_com[n], ic, I, mass) trn_w = trn_w[:nP] corr = N.apply_along_axis( lambda a: scipy.signal.fftconvolve(a, a[::-1], 'same'), axis=0, arr=trn_w) corr = corr[len(corr) / 2:] #remove negative lag corr = norm_corr(corr) rf = file(options.r, "w") map(lambda x: print(x, file=rf), corr) #if len(trn_v)%2==1: #remove last if uneven number of trn-data points # trn_v = trn_v[:nP] #TODO: make configurable, switch here for trn/rot print(trn_v.shape) if bCom: #trn_v = trn_v.reshape((-1,natoms/3,3,3)) #group as waters trn_v = N.mean( trn_v * mass, axis=2) #average over waters, now shape is: frame, mol, coor print(trn_v.shape) corr = N.apply_along_axis( lambda a: scipy.signal.fftconvolve(a, a[::-1], 'same'), axis=0, arr=trn_v) #corr = N.apply_along_axis(lambda a: N.correlate(a,a,'same'),axis=0,arr=trn_v) #slower identical alternative print(corr.shape) corr = corr[len(corr) / 2:] #remove negative lag if bCom: if not bNormalize: corr /= N.sum( mass ) #we multiplied trn with mass and because correlation is trn*trn the correlation was mass^2 else: corr = corr.reshape( -1, natoms / 3, 3, 3 ) #frames, molecule, atoms, xyz; so that we can multiply with mass print(corr.shape) corr *= mass corr = norm_corr(corr) print(corr.shape) of = file(options.o, "w") map(lambda x: print(x, file=of), corr)