def configure_alpine(self): @processify(msg=text.get("step_msg", "setting_up_alpine") % self.alpine_version, interval=1.5) def configure(): logger.debug("Extracting %s to %s" % (self.alpine_installer, self.alpine_release_path)) ret, stdout, stderr = self.executor.run( "sh %s --target %s --noexec" % (self.alpine_installer, self.alpine_release_path)) if ret != 0: raise Exception(stderr) logger.debug("Preparing Alpine Data Repository") alpine_data_repo = os.path.join(self.options.chorus_path, "shared/ALPINE_DATA_REPOSITORY") if os.path.exists(alpine_data_repo): logger.debug("Alpine Data Repository existed, skipped") #if not os.path.exists(os.path.join(alpine_data_repo, "configuration/hadoop_version.properties")): self._cp_f(os.path.join(self.alpine_release_path, "ALPINE_DATA_REPOSITORY/configuration/hadoop_version.properties"),\ os.path.join(alpine_data_repo, "configuration/hadoop_version.properties")) migrate_alpine_conf(os.path.join(alpine_data_repo, "configuration/alpine.conf"), \ os.path.join(self.alpine_release_path, "ALPINE_DATA_REPOSITORY/configuration/alpine.conf")) else: shutil.copytree( os.path.join(self.alpine_release_path, "ALPINE_DATA_REPOSITORY"), alpine_data_repo) configure()
def configure_alpine(self): @processify(msg=text.get("step_msg", "setting_up_alpine") % self.alpine_version, interval=1.5) def configure(): logger.debug("Extracting %s to %s" % (self.alpine_installer, self.alpine_release_path)) ret, stdout, stderr = self.executor.run( "sh %s --target %s --noexec" % (self.alpine_installer, self.alpine_release_path) ) if ret != 0: raise Exception(stderr) logger.debug("Preparing Alpine Data Repository") alpine_data_repo = os.path.join(self.options.chorus_path, "shared/ALPINE_DATA_REPOSITORY") if os.path.exists(alpine_data_repo): logger.debug("Alpine Data Repository existed, skipped") # if not os.path.exists(os.path.join(alpine_data_repo, "configuration/hadoop_version.properties")): self._cp_f( os.path.join( self.alpine_release_path, "ALPINE_DATA_REPOSITORY/configuration/hadoop_version.properties" ), os.path.join(alpine_data_repo, "configuration/hadoop_version.properties"), ) migrate_alpine_conf( os.path.join(alpine_data_repo, "configuration/alpine.conf"), os.path.join(self.alpine_release_path, "ALPINE_DATA_REPOSITORY/configuration/alpine.conf"), ) else: shutil.copytree(os.path.join(self.alpine_release_path, "ALPINE_DATA_REPOSITORY"), alpine_data_repo) configure()
def main(): starttime = time.time( ) # timer to see how long the software takes to compile as it takes a while heart_name = get_program_parameters() # get the arguments with open('VascuSynthLocation.txt', 'r') as location: # get location of VascuSynth vascusynthlocation = location.read().splitlines()[0] configure(vascusynthlocation, 'config_multi') # configures vascusynth oxygen_map(heart_name, 2.1) # make oxygenation map file_name = heart_name.split('/')[2].split(".")[0] file_location = heart_name.split( "." )[0] # get and format the name of the file to use to create an apporpriate directory for results if not os.path.exists('results/' + file_name): os.makedirs('results/' + file_name) print("Running VascuSynth") # run VascuSynth try: startpoints = [] startpointsdirs = file_location.split("/") startpointlocation = str(startpointsdirs[0] + "/" + startpointsdirs[1] + "/points/") for files in os.listdir(startpointlocation): if os.path.isfile(os.path.join(startpointlocation, files)): findstart(str(startpointlocation + files)) save_name = str(file_name + '/' + files.split(".")[0]) with open( vascusynthlocation + '/imageNames.txt', "w" ) as file: # write the file name into this file so when VascuSynth file.writelines(files.split(".")[0]) print(files) os.system( "cd " + vascusynthlocation + " ; ./VascuSynth paramFiles.txt imageNames.txt 0.005") print("Vascular data generated") copymultivs( vascusynthlocation, file_name, save_name, file_location ) # copy the images into the same file for easy viewing gxl_to_vtk( str(vascusynthlocation + '/' + save_name.split("/")[1] + '/tree_structure.xml'), str("results/" + save_name)) # convert gxl coordinate file to a vtk one print("Vascular data converted from GXL format to VTK") except: print("Error: VascuSynth not found.\nAborting") sys.exit(1) print("%f Seconds since start of generation" % (time.time() - starttime)) # display time it took to generate print("Generation Complete")
def patch_env(request, monkeypatch): test_name = request._pyfuncitem.name try: import settings reload(settings) from configure import configure settings.KIVY_GRAPHICS_WIDTH = 1 settings.KIVY_GRAPHICS_HEIGHT = 1 configure() settings.DATABASE_NAME = "test-%s-db.sqlite3" % test_name settings.DATABASE_PATH = os.path.join(settings.PROJECT_DIR, settings.DATABASE_NAME) # try to import module overriding module_overrides = getattr(request.module, "SETTINGS_OVERRIDE", {}) for option_name, option_value in module_overrides.items(): setattr(settings, option_name, option_value) from kivy.config import ConfigParser ConfigParser._named_configs = {} # apply overriding from the function itself if hasattr(request.function, 'settings'): function_overrides = request.function.settings for option_name, option_value in function_overrides.items(): setattr(settings, option_name, option_value) sys.modules["settings"] = settings monkeypatch.setattr('kivy.animation.Animation.start', start) monkeypatch.setattr('kivy.clock.Clock.create_trigger', lambda c, t=None: c) monkeypatch.setattr('kivy.core.audio.SoundLoader.load', load) def fin(): from managers.database import database_manager if database_manager._connection: database_manager._connection.close() database_manager._connection = None if os.path.exists("test-%s-db.sqlite3" % test_name): os.remove("test-%s-db.sqlite3" % test_name) if os.path.exists("kognitivo-test-%s.ini" % test_name): os.remove("kognitivo-test-%s.ini" % test_name) request.addfinalizer(fin) except: if os.path.exists("test-%s-db.sqlite3" % test_name): os.remove("test-%s-db.sqlite3" % test_name) if os.path.exists("kognitivo-test-%s.ini" % test_name): os.remove("kognitivo-test-%s.ini" % test_name) raise
def connect(): """ Connect to the PostgreSQL database server """ conn = None try: # read connection parameters params = configure() # connect to the PostgreSQL server print('Connecting to the PostgreSQL database...') conn = psycopg2.connect(**params) # create a cursor cur = conn.cursor() # execute a statement print('PostgreSQL database version:') cur.execute('SELECT version()') # display the PostgreSQL database server version db_version = cur.fetchone() print(db_version) # close the communication with the PostgreSQL cur.close() except (Exception, psycopg2.DatabaseError) as error: print(error) finally: if conn is not None: conn.close() print('Database connection closed.')
def main(): # Change into our root directory. ROOT = os.path.abspath(os.path.dirname(sys.argv[0])) os.chdir(ROOT) # Parse the arguments. ap = argparse.ArgumentParser(description="Build an android package.") ap.add_argument("command", help="The command to run. One of install_sdk, configure, or build.") ap.add_argument("argument", nargs='*', help="The arguments to the selected command.") args = ap.parse_args() iface = interface.Interface() def check_args(n): if len(args.argument) != n: iface.fail("The {} command expects {} arguments.".format(args.command, n)) return args.argument if args.command == "installsdk": check_args(0) install_sdk.install_sdk(iface) elif args.command == "configure": directory, = check_args(1) configure.configure(iface, directory) elif args.command == "setconfig": directory, var, value = check_args(3) configure.set_config(iface, directory, var, value) elif args.command == "build": if len(args.argument) < 2: iface.fail("The build command expects at least 2 arguments.") build.build(iface, args.argument[0], args.argument[1:]) elif args.command == "logcat": subprocess.call([ plat.adb, "logcat", "-s", "python:*"] + args.argument) elif args.command == "test": iface.success("All systems go!") else: ap.error("Unknown command: " + args.command)
def dbfilename(): try: output = configure(["get", "db_location"]); if( output == None ): raise ValueError(); else: return output; except: return os.path.dirname(__file__)+"/db.json";
def cmd_configure(*argv): parser = argparse.ArgumentParser(prog="crom configure") parser.add_argument("path", help="path to the project sources") parser.add_argument("-f", "--force", help="force re-configuration regardless of timestamps", default=False, action='store_true') args = parser.parse_args(*argv) return configure.configure(args.path, args.force, cmd=True)
def __init__(self, remoteShell, domainAdmin="admin", domain=None): self.remoteShell = remoteShell self.vastoolPath = "/opt/quest/bin/vastool" self.domainAdmin = domainAdmin self.defaultDomain = domain self.info = info.info(self.run) self.flush = flush.flush(self.run) self.create = create.create(self.run, self.defaultDomain) self.delete = delete.delete(self.run) self.timesync = timesync.timesync(self.run) self.nss = nss.nss(self.run) self.group = group.group(self.run) self.isvas = isvas.isvas(self.run) self.list = list.list(self.run) self.auth = auth.auth(self.run, self.defaultDomain) self.cache = cache.cache(self.run) self.configure = configure.configure(self.run) self.configureVas = configureVas.configureVas(self.run) self.schema = schema.schema(self.run) self.merge = merge.merge(self.run) self.unmerge = unmerge.unmerge(self.run) self.user = User.user(self.run) self.ktutil = ktutil.ktutil(self.run) self.load = load.load(self.run) self._license = License.License(self.run) self.License = self._license.License self.parseLicense = self._license.parseLicense self.compareLicenses = self._license.compareLicenses #self.vasUtilities = vasUtilities.vasUtilities(self.remoteShell) self.unconfigure = unconfigure.unconfigure(self.run) self.nssdiag = nssdiag(self.run) isinstance(self.info, info.info) isinstance(self.flush, flush.flush) isinstance(self.create, create.create) isinstance(self.delete, delete.delete) isinstance(self.timesync, timesync.timesync) isinstance(self.nss, nss.nss) isinstance(self.group, group.group) isinstance(self.isvas, isvas.isvas) isinstance(self.list, list.list) isinstance(self.auth, auth.auth) isinstance(self.cache, cache.cache) isinstance(self.configure, configure.configure) isinstance(self.configureVas, configureVas.configureVas) isinstance(self.schema, schema.schema) isinstance(self.merge, merge.merge) isinstance(self.unmerge, unmerge.unmerge) isinstance(self.user, User.user) isinstance(self.ktutil, ktutil.ktutil) isinstance(self.load, load.load) #isinstance(self.vasUtilities, vasUtilities.vasUtilities) isinstance(self.unconfigure, unconfigure.unconfigure) isinstance(self.nssdiag, nssdiag)
def cmd_test(*argv): parser = argparse.ArgumentParser(prog="crom test") parser.add_argument("path", help="path to the project sources") args = parser.parse_args(*argv) cfg_file = tools.get_project_file(args.path) project = tools.load_project(cfg_file) test_exe = project.get_test_executable() if test_exe is None: print("No tests defined for project %s!" % project.name) return 1 # FIXME: we should launch build in all cases, but build shouldn't launch tests... return (configure.configure(args.path, project=project) or call(os.path.join(os.getcwd(), 'bin', test_exe)))
def __init__(self, name=None, **kwargs): from utils import NaiveProfiler self.profiler = NaiveProfiler() self.initialize_cprofile() self.profiler.fix_from("---app-init") # lazy attributes self._tracker = None self._sounds = None self.billing = None self._storage = None self.lang = None self.manager = None self._google_client = None self.profiler.fix_from("super init") super(KognitivoApp, self).__init__(**kwargs) self.profiler.fix_to("super init") self.db_path = None self.profiler.fix_from("import-configure") from configure import configure self.profiler.fix_to("import-configure") self.profiler.fix_from("configure") configure() self.profiler.fix_to("configure") from settings import DEVELOPMENT_VERSION if name is None: KognitivoApp.name = "kognitivo-dev" if DEVELOPMENT_VERSION else "kognitivo" else: KognitivoApp.name = name self.service = None self.profiler.fix_to("---app-init")
def getConnection(): """ Connect to the PostgreSQL database server """ conn = None try: # read connection parameters params = configure() # connect to the PostgreSQL server print('Connecting to the PostgreSQL database...') conn = psycopg2.connect(**params) except (Exception, psycopg2.DatabaseError) as error: print(error) finally: return conn
def initialize_options (self): """ Configure the compilation before starting.- """ build_ext.initialize_options (self) all_found = configure (['-i', '.', '-i', '/usr/lib/dbus-1.0/include', '-i', '/usr/include/dbus-1.0/dbus', '-i', '/usr/lib/glib-2.0/include', '-i', '/usr/include/glib-2.0']) if not all_found: sys.stderr.write ('*** ERROR not all files, required for compilation, have been found.\n') sys.stderr.write ('*** ERROR Please make sure you have installed all the requirements.\n') sys.stderr.write ('*** ERROR Also check that the path list in "setup.py" is correct.\n') sys.exit (1)
def main(my_build, target): start_time = time.time() nuwrf_dir = os.environ.get("NUWRFDIR") if target in my_build.utils_exe: build_dir = nuwrf_dir + "/utils" else: build_dir = nuwrf_dir + "/" + my_build.target_dir[target] os.chdir(build_dir) logger.debug("Entering " + os.getcwd()) build_options = my_build.options #is_wrflis = "wrf" in target or "lis" in target clean_wrf = "force_clean_wrf" in my_build.env_vars # and is_wrflis if clean_wrf or "cleanfirst" in build_options: rc = clean.clean_it(my_build, target) if os.environ.get("DEBUG_BUILD") is None: if rc != 0: logger.error("Clean failed") return rc = configure.configure(my_build, target) if os.environ.get("DEBUG_BUILD") is None: if rc != 0: logger.error("Configure failed") return if target == "utils" or target in my_build.utils_exe: compile_utils(my_build, target) else: compile_it(my_build, target) if clean_wrf: del my_build.env_vars["force_clean_wrf"] # Done end_time = time.time() - start_time logger.info("[" + target + "] build time taken = %f" % end_time)
def loglist(args): argLength = len(args); db = DB().Load(); if(len(db.database) == 0): print "Database is empty"; return; reversed_list = db.database[::-1]; template = """{0} | {1} | {2} | {3} | {4}"""; if( argLength == 1 ): template = """{0} | {1} | {2} | {3} | {4}"""; try: display_amount = int(args[0]); except (ValueError, IndexError): if(args[0] == "all"): display_amount = int(10000); # that's a dumb one, but still elif(args[0] == "csv"): display_amount = int(10000); template = """{0},{1},{2},{3},{4}"""; else: print "Undefined amount"; return; reversed_list = reversed_list[ :display_amount ]; else: maxentries = configure(["get", "listmax"]); if maxentries == None: maxentries = len(reversed_list); reversed_list = reversed_list[ :int( maxentries ) ] for entry in reversed_list: print template.format( entry['i'], entry['s'], entry['p'], entry['d'], timeDifference( entry['e'], entry['b'], stripseconds=True ))
log = open(log_file, 'r') cifProcessed += int(log.readline()) log.close() remove(log_file) cifNoFile = open("logs/cif2process.log", 'r') cifNo = int(cifNoFile.readline()) cifNoFile.close() progressSummary = open("logs/progressSummary.log", 'w') progressSummary.write("Przetworzono: " + str(cifProcessed) + "/" + str(cifNo) + "\n") progressSummary.close() config = configure() numberOfProcesses = config["N"] scratch = config["scratch"] # pool = Pool(numberOfProcesses) argumentsList = [] argumentsList.append(["logs/anionPi.log", join(scratch, "anionPi*.log")]) argumentsList.append(["logs/cationPi.log", join(scratch, "cationPi*.log")]) argumentsList.append(["logs/piPi.log", join(scratch, "piPi*.log")]) argumentsList.append( ["logs/anionCation.log", join(scratch, "anionCation*.log")]) argumentsList.append(["logs/hBonds.log", join(scratch, "hBonds*.log")]) argumentsList.append(
def build(mode=None, optimize=None): """ Build SORD code. """ cf = util.namespace(configure.configure()) if not optimize: optimize = cf.optimize if not mode: mode = cf.mode if not mode: mode = 'sm' base = ( 'globals.f90', 'diffcn.f90', 'diffnc.f90', 'hourglass.f90', 'bc.f90', 'surfnormals.f90', 'util.f90', 'frio.f90', ) common = ( 'arrays.f90', 'fieldio.f90', 'stats.f90', 'parameters.f90', 'setup.f90', 'gridgen.f90', 'attenuation.f90', 'material.f90', 'source.f90', 'inivolstress.f90', 'rupture.f90', 'resample.f90', 'checkpoint.f90', 'timestep.f90', 'stress.f90', 'acceleration.f90', 'sord.f90', ) cwd = os.getcwd() path = os.path.realpath(os.path.dirname(__file__)) f = os.path.join(path, 'bin') if not os.path.isdir(f): os.mkdir(f) new = False os.chdir(os.path.join(path, 'src')) if 's' in mode: source = base + ('serial.f90', ) + common for opt in optimize: object_ = os.path.join('..', 'bin', 'sord-s' + opt) compiler = cf.fortran_serial + cf.fortran_flags[opt] new |= util.make(compiler, object_, source) if 'm' in mode and cf.fortran_mpi: source = base + ('mpi.f90', ) + common for opt in optimize: object_ = os.path.join('..', 'bin', 'sord-m' + opt) compiler = cf.fortran_mpi + cf.fortran_flags[opt] new |= util.make(compiler, object_, source) os.chdir(path) if new: try: import bzrlib except ImportError: print( 'Warning: bzr not installed. Install bzr if you want to save a\ copy of the source code for posterity with each run.') else: os.system('bzr export sord.tgz') os.chdir(cwd) return
import argparse import mediapipe as mp import numpy as np import matplotlib.pyplot as plt #visualisation import pandas as pd import matplotlib.gridspec as gridspec # insert at 1, 0 is the script path (or '' in REPL) sys.path.insert(1, '../logging/') import configure as logs from scipy.signal import butter, filtfilt from progress.bar import Bar # Configure logger logs.configure() """ Used to build synthetic training set """ HandLandmarkLabel = [ "WRIST", "THUMB_CMC", "THUMB_MCP", "THUMB_IP", "THUMB_TIP", "INDEX_FINGER_MCP", "INDEX_FINGER_PIP", "INDEX_FINGER_DIP", "INDEX_FINGER_TIP", "MIDDLE_FINGER_MCP", "MIDDLE_FINGER_PIP", "MIDDLE_FINGER_DIP", "MIDDLE_FINGER_TIP", "RING_FINGER_MCP", "RING_FINGER_PIP", "RING_FINGER_DIP", "RING_FINGER_TIP", "PINKY_MCP", "PINKY_PIP", "PINKY_DIP", "PINKY_TIP" ] mp_drawing = mp.solutions.drawing_utils mp_hands = mp.solutions.hands
from configure import configure from dijkstra import Graph import numpy as np if __name__ == '__main__': conf = configure() graph = Graph(graph=conf.dis) num = graph.VertexNum table = np.zeros(shape=[num, num], dtype=int) for i in range(num): for j in range(num): dist, path = graph.Dijkstra(i, j) Path = [] for t in range(len(path)): Path.append(graph.labels[path[len(path) - 1 - t]]) if (len(Path) > 1): table[i][j] = Path[1] else: table[i][j] = Path[0] print('从节点{}到节点{}的最短路径为:\n{}\n最短路径长度为:{}'.format(i, j, Path, dist)) # print(table) for i in range(num): print('节点{}的路由表为:'.format(i)) print('-------------------------------------------------------') for j in range(num): if i == j: continue else: print('到节点{}\t转发给节点{}'.format(j, table[i][j])) print('-------------------------------------------------------')
# Omer Khan # CS461 Project 4 # Brian Hare import pandas as pd import tensorflow as tf from tensorflow import keras from configure import configure trainingData = 'ramen-ratings-training.csv' testData = 'ramen-ratings-test.csv' validationData = 'ramen-ratings-validation.csv' oldFile = 'ramen-ratings.csv' configure(oldFile, trainingData, testData, validationData) df_training = pd.read_csv(trainingData) df_test = pd.read_csv(testData) df_validation = pd.read_csv(validationData) df_training['Brand'] = pd.Categorical(df_training['Brand']) df_training['Brand'] = df_training.Brand.cat.codes df_training['Style'] = pd.Categorical(df_training['Style']) df_training['Style'] = df_training.Style.cat.codes df_training['Country'] = pd.Categorical(df_training['Country']) df_training['Country'] = df_training.Country.cat.codes df_test['Brand'] = pd.Categorical(df_test['Brand']) df_test['Brand'] = df_test.Brand.cat.codes df_test['Style'] = pd.Categorical(df_test['Style']) df_test['Style'] = df_test.Style.cat.codes
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sat Apr 21 14:04:23 2018 @author: michal """ from configure import configure configure() from Bio.PDB import Selection, NeighborSearch from ringDetection import getSubstituents, isFlat, isFlatPrimitive, molecule2graph from anionTemplateCreator import anionMatcher import json from os.path import join from glob import glob from networkx.readwrite.json_graph import node_link_graph from copy import copy from biopythonUtilities import createResId, createResIdFromAtom #from collections import defaultdict #from supramolecularLogging import writeAdditionalInfo #from time import time class AnionData: def __init__(self, anionType, charged, anionId, properties): self.anionType = anionType self.charged = charged self.anionId = anionId self.hBondsAnalyzed = False self.properties = properties
def stage(inputs): """ Setup, and optionally launch, a SORD job. """ import glob, time, getopt, shutil import setup # Save start time starttime = time.asctime() print('SORD setup') # Read defaults pm = {} f = os.path.join(os.path.dirname(__file__), 'parameters.py') exec open(f) in pm if 'machine' in inputs: cf = configure.configure(machine=inputs['machine']) else: cf = configure.configure() # Merge inputs inputs = inputs.copy() util.prune(inputs) util.prune(pm) util.prune(cf, pattern='(^_)|(^.$)') for k, v in inputs.iteritems(): if k in cf: cf[k] = v elif k in pm: pm[k] = v else: sys.exit('Unknown parameter: %s = %r' % (k, v)) cf = util.namespace(cf) cf.rundir = os.path.expanduser(cf.rundir) pm = prepare_param(util.namespace(pm), cf.itbuff) # Command line options opts = [ 'n', 'dryrun', 's', 'serial', 'm', 'mpi', 'i', 'interactive', 'q', 'queue', 'd', 'debug', 'g', 'debugging', 't', 'testing', 'p', 'profiling', 'O', 'optimized', 'f', 'force', ] options = ''.join(opts[::2]) long_options = opts[1::2] opts = getopt.getopt(sys.argv[1:], options, long_options)[0] for o, v in opts: if o in ('-n', '--dry-run'): cf.prepare = False elif o in ('-s', '--serial'): cf.mode = 's' elif o in ('-m', '--mpi'): cf.mode = 'm' elif o in ('-i', '--interactive'): cf.run = 'i' elif o in ('-q', '--queue'): cf.run = 'q' elif o in ('-d', '--debug'): cf.optimize = 'g' cf.run = 'g' elif o in ('-g', '--debugging'): cf.optimize = 'g' elif o in ('-t', '--testing'): cf.optimize = 't' elif o in ('-p', '--profiling'): cf.optimize = 'p' elif o in ('-O', '--optimized'): cf.optimize = 'O' elif o in ('-f', '--force'): if os.path.isdir(cf.rundir): shutil.rmtree(cf.rundir) else: sys.exit('Error: unknown option: ' + o) if not cf.prepare: cf.run = False # Partition for parallelization pm.nn = tuple(int(i) for i in pm.nn) maxtotalcores = cf.maxnodes * cf.maxcores if not cf.mode and maxtotalcores == 1: cf.mode = 's' np3 = pm.np3[:] if cf.mode == 's': np3 = [1, 1, 1] nl = [(pm.nn[i] - 1) / np3[i] + 1 for i in range(3)] i = abs(pm.faultnormal) - 1 if i >= 0: nl[i] = max(nl[i], 2) pm.np3 = tuple((pm.nn[i] - 1) / nl[i] + 1 for i in range(3)) cf.np = pm.np3[0] * pm.np3[1] * pm.np3[2] if not cf.mode: cf.mode = 's' if cf.np > 1: cf.mode = 'm' # Resources if cf.maxcores: cf.nodes = min(cf.maxnodes, (cf.np - 1) / cf.maxcores + 1) cf.ppn = (cf.np - 1) / cf.nodes + 1 cf.cores = min(cf.maxcores, cf.ppn) cf.totalcores = cf.nodes * cf.maxcores else: cf.nodes = 1 cf.ppn = cf.np cf.cores = cf.np cf.totalcores = cf.np # RAM and Wall time usage if pm.oplevel in (1, 2): nvars = 20 elif pm.oplevel in (3, 4, 5): nvars = 23 else: nvars = 44 nm = (nl[0] + 2) * (nl[1] + 2) * (nl[2] + 2) cf.pmem = 32 + int(1.2 * nm * nvars * int(cf.dtype[-1]) / 1024 / 1024) cf.ram = cf.pmem * cf.ppn ss = (pm.nt + 10) * cf.ppn * nm / cf.cores / cf.rate sus = int(ss / 3600 * cf.totalcores + 1) mm = ss / 60 * 3.0 + 10 if cf.maxtime: mm = min(mm, 60 * cf.maxtime[0] + cf.maxtime[1]) mm = mm * 3 hh = mm / 60 mm = mm % 60 cf.walltime = '%d:%02d:00' % (hh, mm) #v1.1.01 2 times of orignal hours cf.walltime = '1:00:00' # used in v1.1 print('Machine: ' + cf.machine) print('Cores: %s of %s' % (cf.np, maxtotalcores)) print('Nodes: %s of %s' % (cf.nodes, cf.maxnodes)) print('RAM: %sMb of %sMb per node' % (cf.ram, cf.maxram)) print('Time limit: ' + cf.walltime) print('SUs: %s' % sus) if cf.maxcores and cf.ppn > cf.maxcores: print('Warning: exceding available cores per node (%s)' % cf.maxcores) if cf.ram and cf.ram > cf.maxram: print('Warning: exceding available RAM per node (%sMb)' % cf.maxram) # Compile code if not cf.prepare: return cf setup.build(cf.mode, cf.optimize) # Create run directory print('Run directory: ' + cf.rundir) try: os.makedirs(cf.rundir) except (OSError): sys.exit('%r exists or cannot be created. Use --force to overwrite.' % cf.rundir) for f in 'in', 'out', 'prof', 'stats', 'debug', 'checkpoint': os.mkdir(os.path.join(cf.rundir, f)) # Copy files to run directory cwd = os.path.realpath(os.getcwd()) cf.rundate = time.asctime() cf.name = os.path.basename(cf.rundir) cf.rundir = os.path.realpath(cf.rundir) os.chdir(os.path.realpath(os.path.dirname(__file__))) cf.bin = os.path.join('.', 'sord-' + cf.mode + cf.optimize) path = os.path.join('bin', 'sord-' + cf.mode + cf.optimize) shutil.copy(path, cf.rundir) if os.path.isfile('sord.tgz'): shutil.copy('sord.tgz', cf.rundir) if cf.optimize == 'g': for f in glob.glob(os.path.join('src', '*.f90')): shutil.copy(f, cf.rundir) f = os.path.join('conf', cf.machine, 'templates') if not os.path.isdir(f): f = os.path.join('conf', 'default', 'templates') for d in os.path.join('conf', 'common', 'templates'), f: for f in glob.glob(os.path.join(d, '*')): ff = os.path.join(cf.rundir, os.path.basename(f)) out = open(f).read() % cf.__dict__ open(ff, 'w').write(out) shutil.copymode(f, ff) # Combine metadata meta = util.namespace(pm.__dict__) for k in 'name', 'rundate', 'rundir', 'user', 'os_', 'dtype': setattr(meta, k, getattr(cf, k)) meta.indices = {} meta.xi = {} for f in meta.fieldio: op, filename = f[0], f[8] if filename != '-': meta.indices[filename] = f[7] if 'wi' in op: meta.xi[filename] = f[4] meta.shape = {} for k in meta.indices: nn = [(i[1] - i[0]) / i[2] + 1 for i in meta.indices[k]] nn = [n for n in nn if n > 1] if nn == []: nn = [1] meta.shape[k] = nn # Write files os.chdir(cf.rundir) log = open('log', 'w') log.write(starttime + ': setup started\n') util.save('conf.py', cf, prune_pattern='(^_)|(^.$)') util.save('parameters.py', pm, expand=['fieldio']) util.save('meta.py', meta, expand=['shape', 'xi', 'indices', 'fieldio']) # Return to initial directory os.chdir(cwd) return cf
import psycopg2 from configure import configure db = configure() conn = psycopg2.connect(database="cricket", user="******", password="******", host='localhost', port=5432) a = 3 + 5 conn.close()
import sys sys.path.append("./python_files/") import configure configuration = configure.configure() print configuration configuration.read() print configuration configuration.write_all()
def package(self): configure.configure(self, os.path.join("win32", "physicaletoys.nsi.in"), os.path.join(self.tmpDir(), "physicaletoys.nsi"))
def main(): # Dictionaries linking arguments and functions to call PREPARE_FUNCTIONS = { 'all': All, 'organize': OrganiseAllFiles, 'filter': FilterAllOrganisedFiles, 'envelope': ExtractAllEnvelopes, 'label': GenerateLabelData, 'input': GenerateInputData } CNN_FUNCTIONS = { 'train': TrainAndPlotLoss, 'eval': EvaluateOneWavFile, # Applies the CNN to one specified file 'evalnoise': EvaluateWithNoise, # Applies the CNN to one specified file 'evalrand': EvaluateRandom } PLOT_FUNCTIONS = {'gtg': PlotEnvelopesAndFormantsFromFile} # Help texts for some argument groups preparationHelpText = """Data Processing Commands:\n\t\ organize:\tOrganizes the files as needed for the rest\n\t\t\t(Check OrganiseFiles.py documentation)\n\t\ filter:\t\tApplies the GammaTone FilterBank to the organized files.\n\t\t\tSaves its outputs in .GFB.npy format\n\t\ envelope:\tExtracts the filtered files' envelopes.\n\t\t\tUsing --cutoff CUTOFF as low pass filter cutoff frequency.\n\t\t\tSaves them in .ENV1.npy format\n\t\ label:\t\tGenerates Labeling data for the CNN\n\t\ input\t\tGenerates Input data for the CNN, requires label first\n\t\ all:\t\tDoes all of the above, can take some time. """ cnnHelpText = """CNN Related Commands:\n\t\ train:\tTrains the CNN.\n\t\tUse --file command to give the path to an input data numpy matrix\n\t\tOtherwise, uses the input_data.npy file in trainingData/ directory.\n\t\ eval:\tEvaluates a keras model using one WAV file.\n\t\t evalrand:\tEvaluates all the .WAV files in resources/f2cnn/* in a random order.\n\t\tMay be interrupted whenever, if needed. """ fileHelpText = "Used to give a file path as an argument to some scripts." inputHelpText = "Used to give a path to an input numpy file as an argument to some scripts." labelHelpText = "Used to give a path to a label csv file as an argument to some scripts." modelHelpText = "Used to give the path to a keras model as an argument to some scripts." # ##### PARSING parser = argparse.ArgumentParser( description="F2CNN Project's entry script.", epilog= "For additional information, add -h after any positional argument") parser.add_argument('--configure', '-c', action='store_true', help='Generates a configuration file for the project') subparsers = parser.add_subparsers() # Parser for data processing purposes parser_prepare = subparsers.add_parser( 'prepare', help='Runs the command given in argument.', formatter_class=argparse.RawTextHelpFormatter) parser_prepare.add_argument( '--cutoff', '-c', action='store', dest='CUTOFF', type=int, help= "If used, low pass filter of given argument as cutoff frequency will be used" ) parser_prepare.add_argument('prepare_command', choices=PREPARE_FUNCTIONS.keys(), help=preparationHelpText) parser_prepare.add_argument('--file', '-f', action='store', dest='file', nargs='?', help=fileHelpText) parser_prepare.add_argument('--input', '-i', action='store', dest='inputFile', nargs='?', help=inputHelpText) parser_prepare.add_argument('--label', '-l', action='store', dest='labelFile', nargs='?', help=labelHelpText) # Parser for plotting purposes parser_plot = subparsers.add_parser( 'plot', help='For plotting spectrogram-like figure from .WAV file.') parser_plot.add_argument( 'plot_type', choices=PLOT_FUNCTIONS.keys(), help= "gtg: Plots a spectrogram like figure from the output of a GammaTone FilterBank applied\ to the given file, and if a .FB file exists in the dir, also plots the Formants." ) parser_plot.add_argument('--file', '-f', action='store', dest='file', nargs='?', help=fileHelpText) # Parser for the CNN parser_cnn = subparsers.add_parser( 'cnn', help='Commands related to training, testing and using the CNN.', formatter_class=argparse.RawTextHelpFormatter) parser_cnn.add_argument('--file', '-f', action='store', dest='file', nargs='?', help=fileHelpText) parser_cnn.add_argument('--input', '-i', action='store', dest='inputFile', nargs='?', help=inputHelpText) parser_cnn.add_argument('--label', '-l', action='store', dest='labelFile', nargs='?', help=labelHelpText) parser_cnn.add_argument('--model', '-m', action='store', dest='model', nargs='?', help=modelHelpText) parser_cnn.add_argument('cnn_command', choices=CNN_FUNCTIONS.keys(), help=cnnHelpText) parser_cnn.add_argument('--count', '-c', action='store', type=int, help="Number of files to be evaluated") parser_cnn.add_argument('--lpf', action='store', type=int, dest='CUTOFF', help="Use Low Pass Filtering on Input Data") parser_cnn.add_argument('--noise', '-n', action='store', type=float, dest='SNRdB', help="To use with evalnoise to give a SNR in dB.") # Processes the input arguments args = parser.parse_args() # print("Arguments:") # for arg in args.__dict__.keys(): # print("\t{}: {}".format(arg,args.__dict__[arg])) # Calls to functions according to arguments if 'prepare_command' in args: prepare_args = {} if args.prepare_command in [ 'envelope', 'input', 'all' ]: # In case we need to use a low pass filter prepare_args['LPF'] = False if args.CUTOFF is None else True prepare_args['CUTOFF'] = args.CUTOFF if args.prepare_command == 'input': if args.labelFile is not None: prepare_args['labelFile'] = args.labelFile if args.inputFile is not None: prepare_args['inputFile'] = args.inputFile PREPARE_FUNCTIONS[args.prepare_command](**prepare_args) elif 'plot_type' in args: if args.file is None: print("Please use --file or -f to give input file") else: print("Plotting for file {}...".format(args.file)) PlotEnvelopesAndFormantsFromFile(args.file) elif 'cnn_command' in args: if args.cnn_command == 'train': inputFile = args.file or os.path.join('trainingData', 'last_input_data.npy') labelFile = args.labelFile or os.path.join('trainingData', 'label_data.csv') if not os.path.isfile(inputFile): print( "Please first generate the input data file with 'prepare input',\n\ or give a path to an input data file with --input") print( "Reminder: input data files generated with 'prepare input' are stored in \n\ trainingData/ as 'input_data_LPFX.npy or 'input_data_NOLPF.npy', depending on Low Pass Filtering used." ) if not os.path.isfile(labelFile): print( "Please first generate a label data file with 'prepare label',\n\ or give a path to a label data file with --label") print( "Reminder: label data files generated with 'prepare label' are stored in \n\ trainingData/ as 'label_data.csv'.") CNN_FUNCTIONS[args.cnn_command](labelFile=labelFile, inputFile=inputFile) return elif 'file' in args and args.file is not None: evalArgs = {'file': args.file} if 'CUTOFF' in args and args.CUTOFF is not None: evalArgs['LPF'] = True evalArgs['CUTOFF'] = args.CUTOFF if 'model' in args and args.model is not None: evalArgs['model'] = args.model if args.cnn_command == 'evalnoise' and 'SNRdB' in args and args.SNRdB is not None: evalArgs['SNRdB'] = args.SNRdB if args.cnn_command == 'evalrand' and 'count' in args and args.count is not None: evalArgs['COUNT'] = args.count CNN_FUNCTIONS[args.cnn_command](**evalArgs) elif args.configure: configure() else: print("No valid command given.") print( "For help, use python3 f2cnn.py --help or -h, or check the documentation on github." )
# Basic parameters kilo = 1000 mega = 1000 * kilo giga = 1000 * mega tera = 1000 * giga peta = 1000 * tera seconds_per_year = 86400 * 365 seconds_per_month = 86400 * 30 running_time = 7.8E06 modelNames = None if len(sys.argv) > 1: modelNames=[] for a in sys.argv[1:]: modelNames = modelNames+ a.split(',') model = configure(modelNames) # The very important list of years YEARS = list(range(model['start_year'], model['end_year']+1)) # Get the performance year by year which includes the software improvement factor reco_time = {year: performance_by_year(model, year, 'RECO', data_type='data')[0] for year in YEARS} lhc_sim_time = {year: performance_by_year(model, year, 'GENSIM', data_type='mc', kind='2017')[0] + performance_by_year(model, year, 'DIGI', data_type='mc', kind='2017')[0] + performance_by_year(model, year, 'RECO', data_type='mc', kind='2017')[0] for year in YEARS} hllhc_sim_time = {year: performance_by_year(model, year, 'GENSIM',
def __init__(self, parent=None): super(MainWindow, self).__init__() self.setFixedSize(800, 600) # 微信登录处理函数 self.wechat_handle = run_wechat() # 多线程,微信运行线程 self.thread = QThread() # 连接登录状态信号和槽函数 self.wechat_handle.login_signal.connect(self.login_ui_set) self.wechat_handle.logout_signal.connect(self.logout_ui_set) self.wechat_handle.get_username_signal.connect( self.get_uername_success) self.wechat_handle.msg_withdraw_signal.connect(self.show_withdraw_msg) self.wechat_handle.robot_reply_signal.connect( self.show_robot_reply_msg) # 将处理函数与多线程绑定 self.wechat_handle.moveToThread(self.thread) # 连接线程退出信号 self.wechat_handle.finished.connect(self.thread.quit) # 连接线程启动函数 self.thread.started.connect(self.wechat_handle.log_in) # 多线程,好友分析线程 self.analyze = analyze_friends() self.analyze_thread = QThread() self.analyze.moveToThread(self.analyze_thread) self.analyze_thread.started.connect(self.analyze.do_analyze) self.analyze.finished.connect(self.analyze_friends_finished) self.analyze.finished.connect(self.thread.quit) self.ui = Ui_wechat_tools() self.ui.setupUi(self) # 托盘图标 self.create_actions() self.create_tray_icon() self.trayIcon.activated.connect(self.icon_activated) # 菜单栏 self.ui.file_quit.triggered.connect(self.close) self.ui.setting_file_storage_path.triggered.connect( self.setting_cliked) self.ui.help_about.triggered.connect(self.help_about_clicked) self.ui.help_guide.triggered.connect(self.help_guide_clicked) self.ui.help_contact.triggered.connect(self.help_contact_clicked) # 按钮 self.ui.open_file_folder.clicked.connect(self.open_file_folder) self.ui.button_background.clicked.connect(self.run_in_background) self.ui.clear_display.clicked.connect(self.ui_show_clear) self.ui.button_login.clicked.connect(self.button_loggin_cliked) self.ui.button_analyze.clicked.connect(self.button_analyze_cliked) self.ui.button_delete_detection.clicked.connect( self.button_detection_cliked) self.ui.button_withdraw.clicked.connect(self.button_withdraw_message) self.ui.button_robot.clicked.connect(self.button_robot_cliked) # 按钮全部置灰,登录后才可使用 self.disable_function_buttons(True) # 从配置文件中读取设置 self.my_config = configure() self.read_config_file()