def get(b, d, n): """Get the results for the given parameters""" print("Running with: B = " + str(b) + " D = " + str(d) + " N = " + str(n)) nn = int(n * (n + 1) / 2) cd(b, d, n) E, eigenvectors, index, c_max, H = \ eigensystem.get(return_eigv=True, return_index=True, return_cmax=True, return_H=True) # All available colors colors = ('black', 'red', 'teal', 'blue', 'orange', 'olive', 'magenta', 'cyan', 'Brown', 'Goldenrod', 'Green', 'Violet') ir_reps, colormap = eigensystem.levels(E, index[c_max], colors=colors) # Build eigenvalue string # If the colormap element corresponding to the i-th eigenvalue is empty, # skip adding \color eigenvalues = ', '.join( optional_color(format_float(E[i]), colormap[i]) for i in range(E.size)) return nn, H, E, eigenvalues, eigenvectors, index, c_max, ir_reps, \ colors, colormap
def main(B, D, N, max_energy=[0], energy_plot=False, small_plot=False): figsize = (5.8, 4) if not small_plot else (5.8, 3.5) fig, ax = plt.subplots(figsize=figsize) d = D[0] # alpha msize = 8 reps = 'reuna', 'reuns', 'rebde' for r in reps: values = [] for n_i in N: for max_e in max_energy: for b_i in B: cd(b_i, d, n_i) rep = np.loadtxt(r + '.dat', usecols=(0,)) files = find('alpha*.txt', '.') for f in files: regex = r"""alpha_max_e_([0-9]+)""" f_max_e = re.compile(regex).search(f) if f_max_e: f_max_e = float(f_max_e.group(1)) else: f_max_e = 0 if max_e == f_max_e: print(max_e, f_max_e, f) values.append([np.asscalar(np.loadtxt(f)), compute_eta(rep[rep < max_e] if max_e else rep)]) ax.scatter(np.array(values)[:, 0], np.array(values)[:, 1]) ax.set_xlabel('$\\alpha$') ax.set_ylabel('$\\eta$') fig.savefig('../../Statistics/correlation_B[' + ', '.join('{:.2}' for i in B).format(*B) + ']_N' + str(N) + '.pdf', dpi=400)
def main(a, b, d, n): n = int(n) cd(b, d, n) nn = int(n * (n + 1) / 2) H = np.empty((nn, nn)) index = [(n1, n2) for n1 in range(n) for n2 in range(n - n1)] np.seterr(over='raise') start = timer() # Compute the Hamiltonian matrix elements for i in range(H.shape[0]): for j in range(H.shape[1]): m1 = index[i][0] m2 = index[i][1] n1 = index[j][0] n2 = index[j][1] H[i][j] = a * (elem(m1, n1, 1, 1) * elem(m2, n2, 0, 0) + elem(m1, n1, 0, 0) * elem(m2, n2, 1, 1)) \ + 0.25 * b * (3 * elem(m1, n1, 1, 0) * elem(m2, n2, 2, 0) + 3 * elem(m1, n1, 0, 1) * elem(m2, n2, 0, 2) - elem(m1, n1, 3, 0) * elem(m2, n2, 0, 0) - elem(m1, n1, 0, 3) * elem(m2, n2, 0, 0)) \ + 0.75 * b * (elem(m1, n1, 0, 1) * elem(m2, n2, 2, 0) + elem(m1, n1, 1, 0) * elem(m2, n2, 0, 2) - elem(m1, n1, 1, 2) * elem(m2, n2, 0, 0) - elem(m1, n1, 2, 1) * elem(m2, n2, 0, 0) + 2 * elem(m1, n1, 0, 1) * elem(m2, n2, 1, 1) + 2 * elem(m1, n1, 1, 0) * elem(m2, n2, 1, 1)) \ + 0.375 * d * (elem(m1, n1, 2, 2) * elem(m2, n2, 0, 0) + elem(m1, n1, 0, 0) * elem(m2, n2, 2, 2)) \ + 0.125 * d * (elem(m1, n1, 2, 0) * elem(m2, n2, 0, 2) + elem(m1, n1, 0, 2) * elem(m2, n2, 2, 0)) \ + 0.500 * d * elem(m1, n1, 1, 1) * elem(m2, n2, 1, 1) \ + 0.250 * d * (elem(m1, n1, 1, 3) * elem(m2, n2, 0, 0) + elem(m1, n1, 3, 1) * elem(m2, n2, 0, 0) + elem(m1, n1, 0, 0) * elem(m2, n2, 1, 3) + elem(m1, n1, 0, 0) * elem(m2, n2, 3, 1) + elem(m1, n1, 0, 2) * elem(m2, n2, 1, 1) + elem(m1, n1, 2, 0) * elem(m2, n2, 1, 1) + elem(m1, n1, 1, 1) * elem(m2, n2, 0, 2) + elem(m1, n1, 1, 1) * elem(m2, n2, 2, 0)) \ + 0.0625 * d * (elem(m1, n1, 4, 0) * elem(m2, n2, 0, 0) + elem(m1, n1, 0, 4) * elem(m2, n2, 0, 0) + elem(m1, n1, 0, 0) * elem(m2, n2, 4, 0) + elem(m1, n1, 0, 0) * elem(m2, n2, 0, 4) + 2 * elem(m1, n1, 2, 0) * elem(m2, n2, 2, 0) + 2 * elem(m1, n1, 0, 2) * elem(m2, n2, 0, 2)) end = timer() print('hamilt: ', end - start) np.savez_compressed('hamilt.npz', H=H) return H
def main(b, d, n, delta_n, st_epsilon, lvl_epsilon, cut=0): print("Running with: B = " + str(b) + " D = " + str(d) + " N = " + str(n)) cd(b, d, n) start = timer() E, ket = eigensystem.get(return_ket=True) # Select irreducible representations # ir_reps = eigensystem.levels(E, ket) # Select only the stable levels # stable_levels = diff.stable(E, ir_reps, b, d, n, delta_n) stable_levels = diff.stable(E, b, d, n, delta_n, st_epsilon) # Reduce the number of stable levels to check the convergence stable_levels = int((1 - cut) * stable_levels) E = E[:stable_levels] # Select irreducible representations ir_reps = eigensystem.levels(E, ket, lvl_epsilon) stop = timer() print('Get data:', stop - start, 'seconds') rebde = open('rebde.dat', 'w') reuna = open('reuna.dat', 'w') reuns = open('reuns.dat', 'w') # Write only one level corresponding to the bidimensional representation skip_next = False for i in range(E.size): n1 = ket[i][0] n2 = ket[i][1] if ir_reps[i] == 2: # bidimensional representation (rebde) if not skip_next: rebde.write('{0:.18f}'.format(E[i]) + '\t' + str(n1) + '\t' + str(n2) + '\n') skip_next = True else: skip_next = False else: if n2 % 2: # unidimensional anti-symmetric representation reuna.write('{0:.18f}'.format(E[i]) + '\t' + str(n1) + '\t' + str(n2) + '\n') else: # unidimensional symmetric representation reuns.write('{0:.18f}'.format(E[i]) + '\t' + str(n1) + '\t' + str(n2) + '\n') rebde.close() reuna.close() reuns.close() os.chdir("../../Scripts") print("Done\n")
def save(self): tls.cd(self.name) np.save(name+'shape',self.shape) np.save(name+'ul',self.ul) np.save(name+'beta',self.beta) np.save(name+'gamma',self.gamma) np.save(name+'T',self.T) np.save(name+'Dt',self.Dt) np.save(name+'eps_static',self.eps_static) np.save(name+'S',self.S) np.save(name+'r',self.r) np.save(name+'eps',self.eps) np.save(name+'alpha',self.alpha) np.save(name+'ND',self.ND) np.save(name+'ND0',self.ND0) np.save(name+'n',self.n)
def get_zipfile(self, cr, uid, ids, context=None): zfilecontent = '' if isinstance(ids, (int, long)): ids = [ids] product = self.browse(cr, uid, ids[0], context) zfilename = '%s_%s.zip' % (product.name, uuid.uuid4()) with cd(path.join(product.repository_id._parent_path, product.repository_id.relpath)): dirpath = path.join(os.getcwd(), product.name) if path.isdir(dirpath): with ZipFile(zfilename, 'w') as zfile: zipdir(path.relpath(dirpath), zfile) with open(zfilename, 'rb') as zfile: zfilecontent = zfile.read().encode('base64') product.write({'zipfile': zfilecontent, 'zipfilename': '%s.zip' % product.name}) return { 'name': _('Download zip'), 'type': 'ir.actions.act_window', 'view_type': 'form', 'view_mode': 'form', 'res_model': 'product.product', 'res_id': product.id, 'view_id': self.pool.get('ir.model.data').get_object_reference(cr, uid, 'smile_module_repository', 'view_product_product_form2')[1], 'target': 'new', 'context': context, }
def main(b, d, n): """Create bar plots for eigenvectors""" cd(b, d, n) # Get states E, eigenvectors, ket, index = \ eigensystem.get(return_eigv=True, return_ket=True, return_index=True) # Get the index array that sorts the eigenvector coefficients # such that n1 + n2 is increasing sort_idx = np.argsort(index.sum(axis=1)) # Sort the eigenvector coefficients eigenvectors = eigenvectors[:, sort_idx] # stable_levels = np.load('cache.npy') # get cached stable levels no_eigv = 40 # number of eigenvectors to plot # Select the states corresponding to stable levels E = E[:no_eigv] eigenvectors = eigenvectors[:no_eigv] ket = ket[:no_eigv] # Get irreducible representation index ir_reps = eigensystem.levels(E, ket) # Build irreducible representation string reps = 'reuns', 'reuna', 'rebde' ir_str = [reps[i] for i in ir_reps] # Compute energy plot parameters k = index[sort_idx].sum(axis=1) # n1 + n2 w = 1 / (k + 1) - 0.01 # bar widths r = [j for i in range(n + 1) for j in range(i)] x = k - 0.5 + r / (k + 1) # positions d_no = 0 # number of duplicate states clean_dir('eigenvectors') for i in range(eigenvectors.shape[0]): eigv_len = no_signif_el(eigenvectors[i]) minor_ticks = np.arange(0, eigv_len, 0.5) # Plot label label = 'E = ' + str(E[i]) + '\n' + '$\\left|' + \ str(ket[i][0]) + '\\,' + str(ket[i][1]) + '\\right\\rangle$\t' + \ ir_str[i] fname = str(ket[i][0]) + ' ' + str(ket[i][1]) # filename index_plot(eigenvectors[i], eigv_len, label, index, sort_idx, fname, d_no) energy_plot(eigenvectors[i], eigv_len, x, w, label, index, sort_idx, fname, d_no)
def main(b, d, n): cd(b, d, n) # Get states E, eigenvectors, ket = eigensystem.get(return_eigv=True, return_ket=True) # Select stable levels st_idx = int(np.loadtxt('stable.txt')[0]) E, eigenvectors, ket = E[:st_idx], eigenvectors[:st_idx], ket[:st_idx] if b: # Get irreducible representations ir_reps = eigensystem.levels(E, ket) rebde = np.loadtxt('rebde.dat', usecols=(0, )) reuna = np.loadtxt('reuna.dat', usecols=(0, )) reuns = np.loadtxt('reuns.dat', usecols=(0, )) # Compute participation ratio for each representation P_b = compute_p(eigenvectors, condition=np.where(ir_reps == 2)) P_a = compute_p(eigenvectors, condition=np.where(ir_reps == 1)) P_s = compute_p(eigenvectors, condition=np.where(ir_reps == 0)) # Plot the participation ratio for each representation plt.scatter(rebde, P_b[::2], s=1, label='$\Gamma_b$') plt.scatter(reuna, P_a, s=1, label='$\Gamma_a$', color='r') plt.scatter(reuns, P_s, s=1, label='$\Gamma_s$', color='y') plt.xlabel('$E$') plt.ylabel('Participation ratio') plt.legend() plt.savefig('participation_ratio_rep.pdf') plt.close() # Plot the difference between the states of the bidimensional # representation plt.plot(rebde, P_b[::2] - P_b[1::2], lw=0.7, label='$\Gamma_{b1} - \Gamma_{b2}$') plt.xlabel('$E$') plt.ylabel('$\\Delta$Participation ratio') plt.legend() plt.savefig('participation_ratio_rebde.pdf') plt.close() else: # P = 1 / (eigenvectors[0].size * np.sum(eigenvectors**4, axis=1)) P = compute_p(eigenvectors) plt.scatter(E, P, s=1) plt.xlabel('$E$') plt.ylabel('Participation ratio') plt.savefig('participation_ratio.pdf') plt.close()
def b_plot(B, d, n_i, max_e, ax, msize, marker): """Plot eta as a function of B""" reps = 'reuna', 'reuns', 'rebde' rnames = { 'reuna': r'$\Gamma_a$', 'reuns': r'$\Gamma_s$', 'rebde': r'$\Gamma_b$' } for r in reps: values = [] for b in B: cd(b, d, n_i) rep = np.loadtxt(r + '.dat', usecols=(0, )) values.append(compute_eta(rep[rep < max_e] if max_e else rep)) os.chdir('../../Scripts') # Plot the results ax.plot(B, values, linestyle='', label=rnames[r], markersize=msize, marker=marker)
def pull(self, cr, uid, ids, context=None): if isinstance(ids, (int, long)): ids = [ids] for rep in self.browse(cr, uid, ids, context): if rep.state == 'draft': raise orm.except_orm(_('Error'), _('You cannot pull a repository not cloned')) with cd(path.join(self._parent_path, rep.relpath)): vcs = rep.vcs_id IrModuleRepository._call([vcs.cmd, vcs.cmd_pull]) self.extract_modules(cr, uid, ids, context) self.write(cr, uid, ids, {'last_update': time.strftime('%Y-%m-%d %H:%M:%S')}, context) self.message_post(cr, uid, ids, body=_("Repository updated"), context=context) return True
def clone(self, cr, uid, ids, context=None): if isinstance(ids, (int, long)): ids = [ids] with cd(self._parent_path): for rep in self.browse(cr, uid, ids, context): if rep.state != 'draft': raise orm.except_orm(_('Error'), _('You cannot clone a repository already cloned')) vcs = rep.vcs_id IrModuleRepository._call([vcs.cmd, vcs.cmd_clone, rep.directory, rep.relpath]) self.extract_modules(cr, uid, ids, context) self.write(cr, uid, ids, {'active': True, 'state': 'done', 'last_update': time.strftime('%Y-%m-%d %H:%M:%S')}, context) self.message_post(cr, uid, ids, body=_("Repository cloned"), context=context) return True
def map_and_analyze(self, eqfil=None): if self.mapped is None: logger.debug('Mapping disabled.') elif self.mapped is True: logger.debug('is already mapped (skipping)!') return True elif self.mapped is False: with tools.cd(self.path): if eqfil is None: self.mapped = analysis.main(self.map_settings, eqfil) else: analysis.main(self.map_settings, eqfil) else: raise 'WTF'
def e_plot(b_i, d, n_i, max_energy, ax, msize, marker): """Plot eta as a function of deltaE""" cd(b_i, d, n_i) reps = 'reuna', 'reuns', 'rebde' rnames = { 'reuna': r'$\Gamma_a$', 'reuns': r'$\Gamma_s$', 'rebde': r'$\Gamma_b$' } for r in reps: values = [] rep = np.loadtxt(r + '.dat', usecols=(0, )) for max_e in max_energy: values.append(compute_eta(rep[rep < max_e] if max_e else rep)) # Plot the results max_energy = np.array(max_energy) max_energy[max_energy == 0] = rep[-1] - rep[0] ax.plot(max_energy, values, linestyle='', label=r'$B=' + str(b_i) + r'$, ' + rnames[r], markersize=msize, marker=marker) os.chdir('../../Scripts')
def install_vim(config_path='/usr/lib/python2.7/config-x86_64-linux-gnu', remove_build_dir=True, tag=None): with tempdir(remove_build_dir) as build_dir: print('Building Vim at folder {}'.format(build_dir)) vim_src_dir = os.path.join(build_dir, 'vim_src') logging.info('Cloning Vim repo') run(['hg', 'clone', 'https://vim.googlecode.com/hg/', vim_src_dir]) with cd(vim_src_dir): if tag: run(['hg', 'update', tag]) #-rv7-3-1034, -rv7-4b-022 run(['./configure', '--enable-multibyte', '--with-tlib=ncurses', '--enable-pythoninterp=yes', '--enable-rubyinterp=yes', '--with-features=huge', '--with-python-config-dir={}'.format(config_path)]) run(['make', vim_src_dir, '-j', '3']) run(['make', vim_src_dir, 'install']) logging.info('Vim compiled and installed. Linking to /usr/bin/vim') create_symlink('/usr/local/bin/vim', '/usr/bin/vim', backup=False)
def main(): parser = argparse.ArgumentParser(description='Mecacell project generator') parser.add_argument("name", help='project name') parser.add_argument('-c', '--nocmake', help='don\'t generate CmakeList') parser.add_argument('-v', '--noviewer', help='no qt viewer, console code only') args = parser.parse_args() baseDirectory = os.path.dirname(os.path.abspath(__file__)) + \ '/templates' env = Environment(loader=FileSystemLoader(searchpath=baseDirectory)) viewerEnabled = True if args.noviewer is not None: viewerEnabled = False if (os.path.exists(args.name)): msg = t.colors.WARNING + 'path ' + args.name + \ ' already exists. ' + t.colors.ENDC + 'Overwrite?' if not t.queryYN(msg, 'no'): sys.exit() else: os.makedirs(args.name) def mkdir(pname): if not os.path.exists(pname): os.makedirs(pname) with t.cd(args.name): projName = args.name.split('/')[-1] print t.colors.HEADER, 'Generating project', args.name, t.colors.ENDC sys.stdout.write(' * creating project architecture') if args.nocmake is None: mkdir('bin') mkdir('build') mkdir('src') mkdir('src/core') t.OK() tmplScenario = t.queryYN( 'Do you want your scenario to be a class template (with cell type as parameter)?', 'no') print 'Name of your scenario class? [default = "Scenario"]', scName = raw_input() if not scName: scName = 'Scenario' print 'Name of your cell class? [default = "Cell"]', cellName = raw_input() if not cellName: cellName = 'Cell' sys.stdout.write(' * generating base source files') with open("src/mainconsole.cpp", "wb") as fh: fh.write( env.get_template('mainconsole.cpp').render( tmplScenario=tmplScenario, Scenario=scName, Cell=cellName)) if viewerEnabled: with open("src/mainviewer.cpp", "wb") as fh: fh.write( env.get_template('mainviewer.cpp').render( tmplScenario=tmplScenario, Scenario=scName, Cell=cellName)) with open("src/core/" + cellName.lower() + ".h", "wb") as fh: fh.write(env.get_template('cell.h').render(Cell=cellName)) with open("src/core/" + cellName.lower() + ".cpp", "wb") as fh: fh.write(env.get_template('cell.cpp').render(Cell=cellName)) if tmplScenario: with open("src/core/" + scName.lower() + ".hpp", "wb") as fh: fh.write( env.get_template('scenario.hpp').render(Scenario=scName, Cell=cellName)) else: with open("src/core/" + scName.lower() + ".h", "wb") as fh: fh.write( env.get_template('scenario.h').render(Scenario=scName, Cell=cellName)) with open("src/core/" + scName.lower() + ".cpp", "wb") as fh: fh.write( env.get_template('scenario.cpp').render(Scenario=scName, Cell=cellName)) t.OK() if args.nocmake is None: sys.stdout.write(' * generating CMakeLists.txt') with open("CMakeLists.txt", "wb") as fh: fh.write( env.get_template('CMakeLists.txt').render( Project=projName, viewerEnabled=viewerEnabled)) t.OK()
def main(b, d, n, delta_n, st_epsilon, lvl_epsilon, reselect=True, cut=0, bin_size=0.25, max_energy=0): if reselect: select_rep.main(b, d, n, delta_n, st_epsilon, lvl_epsilon, cut) reps = 'reuna', 'reuns', 'rebde' rnames = { 'reuna': r'$\Gamma_a$', 'reuns': r'$\Gamma_s$', 'rebde': r'$\Gamma_b$' } cd(b, d, n) if max_energy: deltaE = max_energy else: deltaE = np.loadtxt('stable.txt')[1] count = int(4 / bin_size) + 1 rel_sp = [] # relative spacings avg_sp = [] # average spacings w = [] # weights for r in reps: rep = np.loadtxt(r + '.dat', usecols=(0, )) if max_energy: rep = rep[rep <= max_energy] rel_sp.append(relSpacing(rep)) avg_sp.append((rep[-1] - rep[0]) / rep.size) # P(s)Δs is the probability, P(s) is the probability density # P(s)Δs = Σ 1/3 * N_rep/N_tot w.append(np.ones(rel_sp[-1].shape) / (3 * rep.size * bin_size)) # Don't plot if not all levels are used if not max_energy: fname = r + '.pdf' histogram(rel_sp[-1], bins=np.linspace(0, 4, count), fname=fname, label=rnames[r], xlabel='$s$', figsize=(2.8, 3), ylabel='No. of levels') fname = 'bar_' + r + '.pdf' bar_plot(rel_sp[-1], label=rnames[r], ylabel='s', xlabel='index', fname=fname, dpi=400, figsize=(2.8, 3), title=r'$\frac{E_n-E_0}{N}=' + '{:.3}'.format(avg_sp[-1]) + '$') # Save the average spacings fname = 'avg_sp' + \ ('_max_e_' + str(max_energy) + '.txt' if max_energy else '.txt') with open(fname, 'w') as f: f.write('\n'.join([str(i) for i in avg_sp])) # Relative spacing histogram, P(s) # Don't plot if not all levels are used if not max_energy: fname = 'P(s)' + '_st_' + '{:.0e}'.format(st_epsilon) + '_eps_' + \ '{:.0e}'.format(lvl_epsilon) + \ ('_cut_' + '{:.2f}'.format(cut) + '.pdf' if cut else '.pdf') histogram(rel_sp, bins=np.linspace(0, 4, count), weights=w, label=[rnames[i] for i in reps], fname=fname, stacked=True, ylabel='$P(s)$', xlabel='$s$', count=count, use_wigner=True, use_poisson=True, figsize=(5.8, 4.5)) # Fitted P(s) fname = 'P(s)_fit_' + '{:.0e}'.format(st_epsilon) + '_eps_' + \ '{:.0e}'.format(lvl_epsilon) + \ ('_cut_' + '{:.2f}'.format(cut) if cut else '') + \ ('_max_e_' + str(max_energy) + '.pdf' if max_energy else '.pdf') histogram(rel_sp, bins=np.linspace(0, 4, count), weights=w, ylim=(0, 1.05), title='$\\Delta E =' + '{:.5}'.format(deltaE) + '$', fname=fname, count=count, ylabel='$P(s)$', xlabel='$s$', stacked=True, label=[rnames[i] for i in reps], fit=True, max_e=max_energy, figsize=(5.8, 3.7)) # For the cumulative distribution plots, each irreducible representation # is plotted individually (*3) # I(s) = Σ P(s) Δs = Σ Σ N_rep/N_tot * 1/Δs * Δs = Σ Σ N_rep/N_tot # (*bin_size) w = [wi * bin_size * 3 for wi in w] # cumulative relative spacing histogram, I(s) # Don't plot if not all levels are used if not max_energy: fname = 'I(s).pdf' histogram(rel_sp, cumulative=True, bins=np.linspace(0, 4, count), ylabel=r'$I(s)$', xlabel='$s$', fname=fname, weights=w, label=[rnames[i] for i in reps], count=count, use_wigner=True, use_poisson=True, figsize=(5.8, 4.5)) # Fitted I(s) fname = 'I(s)_fit' + \ ('_max_e_' + str(max_energy) + '.pdf' if max_energy else '.pdf') histogram(rel_sp, cumulative=True, bins=np.linspace(0, 4, count), fit=True, ylabel=r'$I(s)$', xlabel='$s$', weights=w, count=count, label=[rnames[i] for i in reps], fname=fname, ylim=(0, 1.05), figsize=(5.8, 3.7)) # Version with open('version.txt', 'w') as f: f.write('1.5.0') os.chdir("../../Scripts")
def main(b, d, n, delta_n, st_epsilon, lvl_epsilon, stable_only=True): start = timer() tools.cd(b, d, n) # Get data E, ket = eigensystem.get(return_ket=True) ir_reps = eigensystem.levels(E, ket, lvl_epsilon) if stable_only: # choose all levels or only the stable ones stable_levels = int(np.loadtxt('stable.txt')[0]) E = E[:stable_levels] rebde = np.loadtxt('rebde.dat', usecols=(0, ), unpack=True) reuna = np.loadtxt('reuna.dat', usecols=(0, ), unpack=True) reuns = np.loadtxt('reuns.dat', usecols=(0, ), unpack=True) # Bi-directional search E_in_rebde = np.in1d(E, rebde, assume_unique=False) E_in_reuna = np.in1d(E, reuna, assume_unique=False) E_in_reuns = np.in1d(E, reuns, assume_unique=False) rebde_in_E = np.in1d(rebde, E, assume_unique=False) reuna_in_E = np.in1d(reuna, E, assume_unique=False) reuns_in_E = np.in1d(reuns, E, assume_unique=False) if E.size < max(rebde.size, reuna.size, reuns.size): rebde = rebde[:E.size] reuna = reuna[:E.size] reuns = reuns[:E.size] rebde_in_E = rebde_in_E[:E.size] reuna_in_E = reuna_in_E[:E.size] reuns_in_E = reuns_in_E[:E.size] # Padding rebde = np.pad(rebde, pad_width=(0, E.size - rebde.size), mode='constant') reuna = np.pad(reuna, pad_width=(0, E.size - reuna.size), mode='constant') reuns = np.pad(reuns, pad_width=(0, E.size - reuns.size), mode='constant') rebde_in_E = np.pad(rebde_in_E, pad_width=(False, E.size - rebde_in_E.size), mode='constant') reuna_in_E = np.pad(reuna_in_E, pad_width=(False, E.size - reuna_in_E.size), mode='constant') reuns_in_E = np.pad(reuns_in_E, pad_width=(False, E.size - reuns_in_E.size), mode='constant') files = np.array([ E, rebde, reuna, reuns, E_in_rebde, E_in_reuna, E_in_reuns, rebde_in_E, reuna_in_E, reuns_in_E ]) with open("table.tex", "w") as f: f.write("\\documentclass{article}\n\n") f.write("\\usepackage[margin=0.2in]{geometry}\n") f.write("\\usepackage{longtable}\n") f.write("\\usepackage[table]{xcolor}\n\n") f.write("\\begin{document}\n\n") f.write("\\begin{longtable}{" + " | ".join(["c"] * 4) + "}\n") f.write("energy levels & rebde.dat & reuna.dat & reuns.dat\t\\\\\n") f.write("\\hline\n\\endfirsthead\n") for row in range(files.shape[1]): # Find the representation of the energy level c = 0 for x in range(1, 4): if files[x + 3][row]: c = x line = color(c, True) + '{:.18f}'.format(files[0][row]) + " & " + \ " & ".join(color(x, files[x + 6][row]) + '{:.18f}'.format(files[x][row]) for x in range(1, 4)) f.write('\t' + line + " \\\\\n") f.write("\\end{longtable}") f.write("\n\n\\end{document}") os.chdir("../../Scripts") end = timer() print('total: ', end - start)
def get_file_with_name(self, fname): with tools.cd(self.path): if os.path.exists(fname): return File(fname, read=True) raise IOError('File not found', fname, 'in', self.path)
def main(b, d, n): cd(b, d, n) # Get states E, eigenvectors, ket = eigensystem.get(return_eigv=True, return_ket=True) # Select stable levels st_idx = int(np.loadtxt('stable.txt')[0]) E, eigenvectors, ket = E[:st_idx], eigenvectors[:st_idx], ket[:st_idx] # Get reference states cd(0.0, d, n) E_ref, eigenvectors_ref, ket_ref = eigensystem.get(return_eigv=True, return_ket=True) # Select stable reference levels st_idx = int(np.loadtxt('stable.txt')[0]) E_ref, eigenvectors_ref, ket_ref = E_ref[:st_idx], \ eigenvectors_ref[:st_idx], ket_ref[:st_idx] # Reference participation ratio P_ref = compute_p(eigenvectors_ref) # Find the index of the states with energy closest to the reference ones # idx = np.searchsorted(E, E_ref) # idx = np.clip(idx, 1, len(E) - 1) # left = E[idx - 1] # right = E[idx] # idx -= E_ref - left < right - E_ref # Compute the participation ratio P = compute_p(eigenvectors) cd(b, d, n) ylim = () ylim = plot(E, P, E_ref, P_ref, label='$B=' + str(b) + '$', fname='participation_ratio_cmp.pdf') # Get irreducible representations ir_reps = eigensystem.levels(E, ket) rebde = np.loadtxt('rebde.dat', usecols=(0, )) reuna = np.loadtxt('reuna.dat', usecols=(0, )) reuns = np.loadtxt('reuns.dat', usecols=(0, )) # Compute participation ratio for each representation P_b = compute_p(eigenvectors, condition=np.where(ir_reps == 2)) P_a = compute_p(eigenvectors, condition=np.where(ir_reps == 1)) P_s = compute_p(eigenvectors, condition=np.where(ir_reps == 0)) # Plot the participation ratio for each representation plot(rebde, P_b[::2], E_ref, P_ref, label='$\Gamma_b$', ylim=ylim, fname='participation_ratio_cmp_rebde.pdf') plot(reuna, P_a, E_ref, P_ref, label='$\Gamma_a$', ylim=ylim, fname='participation_ratio_cmp_reuna.pdf') plot(reuns, P_s, E_ref, P_ref, label='$\Gamma_s$', ylim=ylim, fname='participation_ratio_cmp_reuns.pdf')
def main(): parser = argparse.ArgumentParser(description='Mecacell project generator') parser.add_argument("name", help='project name') parser.add_argument('-c', '--nocmake', help='don\'t generate CmakeList') parser.add_argument('-v', '--noviewer', help='no qt viewer, console code only') args = parser.parse_args() baseDirectory = os.path.dirname(os.path.abspath(__file__)) + \ '/templates' env = Environment(loader=FileSystemLoader(searchpath=baseDirectory)) viewerEnabled = True if args.noviewer is not None: viewerEnabled = False if (os.path.exists(args.name)): msg = t.colors.WARNING + 'path ' + args.name + \ ' already exists. ' + t.colors.ENDC + 'Overwrite?' if not t.queryYN(msg, 'no'): sys.exit() else: os.makedirs(args.name) def mkdir(pname): if not os.path.exists(pname): os.makedirs(pname) with t.cd(args.name): projName = args.name.split('/')[-1] print t.colors.HEADER, 'Generating project', args.name, t.colors.ENDC sys.stdout.write(' * creating project architecture') if args.nocmake is None: mkdir('bin') mkdir('build') mkdir('src') mkdir('src/core') t.OK() tmplScenario = t.queryYN( 'Do you want your scenario to be a class template (with cell type as parameter)?', 'no') print 'Name of your scenario class? [default = "Scenario"]', scName = raw_input() if not scName: scName = 'Scenario' print 'Name of your cell class? [default = "Cell"]', cellName = raw_input() if not cellName: cellName = 'Cell' sys.stdout.write(' * generating base source files') with open("src/mainconsole.cpp", "wb") as fh: fh.write(env.get_template('mainconsole.cpp').render( tmplScenario=tmplScenario, Scenario=scName, Cell=cellName)) if viewerEnabled: with open("src/mainviewer.cpp", "wb") as fh: fh.write(env.get_template('mainviewer.cpp').render( tmplScenario=tmplScenario, Scenario=scName, Cell=cellName)) with open("src/core/" + cellName.lower() + ".h", "wb") as fh: fh.write(env.get_template('cell.h').render(Cell=cellName)) with open("src/core/" + cellName.lower() + ".cpp", "wb") as fh: fh.write(env.get_template('cell.cpp').render(Cell=cellName)) if tmplScenario: with open("src/core/" + scName.lower() + ".hpp", "wb") as fh: fh.write(env.get_template('scenario.hpp').render( Scenario=scName, Cell=cellName)) else: with open("src/core/" + scName.lower() + ".h", "wb") as fh: fh.write(env.get_template('scenario.h').render( Scenario=scName, Cell=cellName)) with open("src/core/" + scName.lower() + ".cpp", "wb") as fh: fh.write(env.get_template('scenario.cpp').render( Scenario=scName, Cell=cellName)) t.OK() if args.nocmake is None: sys.stdout.write(' * generating CMakeLists.txt') with open("CMakeLists.txt", "wb") as fh: fh.write(env.get_template('CMakeLists.txt').render( Project=projName, viewerEnabled=viewerEnabled)) t.OK()
def compute(self): """ Run Q: Forward trough inputfile-list to inputfile without logfile and run it. """ with tools.cd(self.path): # Check if we're done arleady if self.is_finished(): try: logger.warning('Nothing to compute. %s %s', self.if_pos, self.inputfiles[self.if_pos]) except IndexError: logger.warning('Nothing to compute. %s', self.if_pos) # TODO: 1) automatic mapping else: # TODO: add restart-capability if not self.is_finished(): if len(self.wus) > self.if_pos: old_input = self.wus[self.if_pos].inputfile[0] new_input = self.inputfiles[self.if_pos] if old_input == new_input: if self.wus[self.if_pos].checklogfile() == 0: # this WorkUnit is finished we; load next one logger.warning( 'this run is already done skipping %s', self.wus[self.if_pos].inputfile[0]) self.if_pos += 1 self.compute() return # Generate new compute units, until one w/o logfile exists while True: if self.is_finished(): return self.cwu = self.create_next_workunit() if (self.cwu.status is not None and self.cwu.status == 0): logger.debug('skip step %s', self.inputfiles[self.if_pos][0]) self._check_eq_and_map() self.wus.append(self.cwu) self.if_pos += 1 continue break exe = self.q_dyn5_exe self.check_exe() if len(self.wus) != self.cwu.unitnumber: raise (Exception, 'discrepancy in input file order') if self.cwu.run(exe) == 0: self.wus.append(self.cwu) self._check_eq_and_map() else: err = 'There was a problem with step: ' err += str(self.if_pos) err += ', in inputfile' err += str(self.inputfiles[self.if_pos][0]) err += NLC + 'The status Code was:' err += str(self.cwu.status) err += NLC + NLC + 'The Error Messages where: ' err += NLC + NLC + 'Directory' + os.getcwd() err += str(self.cwu.errMsg) + NLC err += 'Will Raise Exception...' logger.warning(err) raise (Exception, 'computation failed') # increment for next step self.if_pos += 1