def SvcDoRun(self): config.init([]) config.init_logging() p = os.path.dirname(__file__) f = open(os.path.join(p, 'log.txt'), 'w') sys.stdout = f sys.stderr = f port = config.getPort() httpd = httpserver.TivoHTTPServer(('', int(port)), httpserver.TivoHTTPHandler) for section, settings in config.getShares(): httpd.add_container(section, settings) b = beacon.Beacon() b.add_service('TiVoMediaServer:%s/http' % port) b.start() if 'listen' in config.getBeaconAddresses(): b.listen() httpd.set_beacon(b) while 1: sys.stdout.flush() (rx, tx, er) = select.select((httpd,), (), (), 5) for sck in rx: sck.handle_request() rc = win32event.WaitForSingleObject(self.stop_event, 5) if rc == win32event.WAIT_OBJECT_0: b.stop() break
def run(): try: start_time = time() init() log.info("Start time %s" % (start_time)) log.debug("Checking for new data") if not is_new_data_available(): return log.debug("New data detected getting results from API") # New data detected run the API comsuption process get_results_API(tmp_storage) log.debug("Finished retrieving the API data") # Transform the data t_results_API(tmp_storage, final_dictionaries) log.debug("Finished transforming the API data") write_API_data(final_dictionaries) log.debug("Finished writing the JSON results") # QuienEsQuien functionality log.debug("Start transforming candidates file") candidatesQeQ = t_candidates_percentage(tmp_storage) log.debug("Finished transforming candidates file") write_JSON_file(JSON_DATA_PATH, "quienesquien", candidatesQeQ) log.debug("Finished generating QeQ JSON file") # FrontPage Ranking vizualization log.debug("Start transforming front page file") front_page_ranking = t_ranking(final_dictionaries) log.debug("Finished transforming ranking file") write_JSON_file(JSON_DATA_PATH, "anexo", front_page_ranking) log.debug("Finished generating ranking JSON file") log.info("Execution time: %s seconds ---" % (time() - start_time)) except Paso2015, e: log.error("Exit with exception in %s module" % (str(e)))
def register(): print 'registering what.cd plugin' sb_dir = qc.query('scatterbrainz_dir') what_dir =os.path.join(sb_dir,'external/dbs/what') if not os.path.isdir(what_dir): os.mkdir(what_dir) print "Register what.cd for which SB user?" sb_user = raw_input('username: '******'what',sb_user): print "Plugin already registered for " + sb_user print "... exiting" exit(1) print "\nWhat.cd username?" what_user = raw_input('username: '******'password: '******'...configuring' wc.configure(what_user,what_pass,what_dir,sb_user) print '...initializing' wc.init(sb_user) import dbs.config.prefs as prefs w_user = prefs.readPref('what_user',sb_user) print 'Success! set up what.cd pluging for ' +sb_user+' with what.cd account: ' + w_user pc.register('what',sb_user) exit(0)
def setup(in_service=False): config.init(sys.argv[1:]) config.init_logging() sys.excepthook = exceptionLogger port = config.getPort() httpd = httpserver.TivoHTTPServer(('', int(port)), httpserver.TivoHTTPHandler) logger = logging.getLogger('pyTivo') logger.info('Last modified: ' + last_date()) logger.info('Python: ' + platform.python_version()) logger.info('System: ' + platform.platform()) for section, settings in config.getShares(): httpd.add_container(section, settings) b = beacon.Beacon() b.add_service('TiVoMediaServer:%s/http' % port) b.start() if 'listen' in config.getBeaconAddresses(): b.listen() httpd.set_beacon(b) httpd.set_service_status(in_service) logger.info('pyTivo is ready.') return httpd
def do_build(args): #load config config_file = os.path.join(args.src_dir,"_config.py") try: config.init(config_file) except config.ConfigNotFoundException: print >>sys.stderr, ("No configuration found: %s" % config_file) parser.exit(1, "Want to make a new site? Try `blogofile init`\n") logger.info("Running user's pre_build() function..") writer = Writer(output_dir="_site") if config.blog_enabled == True: config.pre_build() posts = post.parse_posts("_posts") if args.include_drafts: drafts = post.parse_posts("_drafts", config) for p in drafts: p.draft = True else: drafts = None writer.write_blog(posts, drafts) else: #Build the site without a blog writer.write_site() logger.info("Running user's post_build() function..") config.post_build()
def start(server): config.init(server.upper()) from bottle_mysql import MySQLPlugin import controllers install(MySQLPlugin(**config.db_conf)) run(host="0.0.0.0", port=config.port, debug=True, reloader=True, server='twisted')
def config_init(args): try: # Always load the _config.py from the current directory. # We already changed to the directory specified with --src-dir config.init("_config.py") except config.ConfigNotFoundException: # pragma: no cover print >>sys.stderr, "No configuration found in source dir: {0}".format(args.src_dir) parser.exit(1, "Want to make a new site? Try `blogofile init`\n")
def startup(self, *args): reload(config) config.init(self.directory) config.path_root = os.path.dirname(self.directory) #changing path_states could cause errors config.path_states = "%s/states/" % config.path_root self.tempLabel.set_text(str(config.main_temperature)) self.changeImage() self.energy_changed() self.processtable()
def run(): config.init() from game import Game level = logging.WARNING if config.config["debug"]: level = logging.DEBUG fname = config.config.get("debug_out", None) logging.basicConfig(level=level, filename=fname) g = Game() g.loop()
def setValues(self): from ConfigParser import NoSectionError import config as config try: config.init() self.domainBox.setText(config.domain) self.usernameBox.setText(config.username) self.passBox.setText(config.password) self.portBox.setText(config.bindport) except NoSectionError, e: print e
def main(): # Read config file config.init() # base_icon = gtk.gdk.pixbuf_new_from_file('ui/icon.svg') # icon = base_icon.scale_simple(128, 128, gtk.gdk.INTERP_BILINEAR) # gtk.window_set_default_icon(icon) go_obj = Pygo() # Let's see if we can avoid using threads in this implementation # gtk.gdk.threads_init() # gtk.gdk.threads_enter() gtk.main()
def init(): global _config_profile global _base_dir _config_profile = os.environ.get('BUILDTOOL_PROFILE', config.DEFAULT_PROFILE) if _config_profile and (not packageinfo.valid_name(_config_profile)): raise ValueError('Invalid profile name: ' + _config_profile) _base_dir = os.environ.get('BUILDTOOL_BASE_DIR', os.getcwd()) config.init(_config_profile) toolchain.init() packageinfo.init(_base_dir, _config_profile)
def main(argv): config.init() confFile = None try: opts, args = getopt.getopt(argv, "hc:i", "config=") except getopt.GetoptError: print 'server.py -c <configfile>' sys.exit(2) for opt, arg in opts: if opt == '-h': print 'server.py -c <configfile> [-i]' print 'To initialize a configuration file: use switch -i' sys.exit() elif opt in ("-c", "--config"): confFile = arg Config = ConfigParser.ConfigParser() for opt, arg in opts: if opt == '-i': if confFile != None: cfgfile = open(confFile,'w') Config.add_section('Server') Config.add_section('Database') Config.set('Server', 'Port', 8080) Config.set('Database', 'File', 'rcb.db') Config.set('Database', 'Driver', 'sqlite') Config.write(cfgfile) cfgfile.close() print 'New configuration file has been created. Change the parameters as needed and start the program with -c flag.' sys.exit() else: print 'Please specify the configuration file name. Use -c flag when using -i option.' sys.exit(2) if confFile == None: Config.read("/Users/harh/Codes/ZHAW/Eclipse/workspace/icclab-rcb/config.ini") else: Config.read(confFile) dbPath = Config.get("Database", "File") config.globals.append(dbPath) #Now performing sanitaion checks on the database dbtest = checkdbconsistency(dbPath) dbinitstatus = True if dbtest != True: dbinitstatus = initializedb(dbPath) serverPort = Config.get("Server", "Port") if dbinitstatus != False: print "Starting the ICCLab RCB Online Service\n" startServer('localhost', serverPort) else: print 'DB is not consistent, and it could not be initialized properly!'
def main(debug=False): """ Read the configuration file """ config.init(CONFIGFILE) ltime = config.get('Run','time') ntime = now() if ltime is not None: print 'Last time was {}.'.format(ltime) otime = asc2time(ltime) else: print 'Never run before.' otime = ntime - 30*24*60*60 print 'Current time is {}.'.format(time2asc(ntime)) if ntime - otime < 86400: otime = (int(ntime/3600)-24)*3600 print 'Reading since {}.'.format(time2asc(otime)) """ Read the database """ d = {'time': [ntime,otime], 'data': {}, 'keys': {}} with getdb() as db: d = db.run(d) """ Form the document """ docfn = "{}_{}.pdf".format( config.get('Report','namebase','report'), time2fmt('%Y-%m-%d(%H)',ntime)) print docfn #try: report(docfn,d) #except Exception, exc: # config.close() # raise exc """ Email the document """ txt = EMAILMSG.format( config.get('Site','name'), time2esk(ntime), time2hms(ntime), time2esk(conf2time('Run','from',otime)), time2hms(conf2time('Run','from',otime)), time2esk(conf2time('Run','to',ntime)), time2hms(conf2time('Run','to',ntime)) ) if debug: emailer.nsend(txt) else: emailer.send(txt,[docfn]) """ Saving configuration """ config.set('Run','time',time2asc(ntime)) config.close()
def main(): config.init() new try: opts, args = getopt.getopt(sys.argv[1:], 'gfteio') # executes program, based on options, executes training or filtering args_hash = {} for opt in opts: args_hash[opt[0]] = _split_args(opt[1]) # TODO, handle arguments, assert no clashes print "Filtering..." print "Done!" except getopt.GetoptError as e: print e.args[0]
def init(): config.init() cp = config.load() if cp.has_option('global', 'source_manager'): cla = cp.get('global', 'source_manager') logger.debug(cla) cla = resolve(cla) else: cla = source.SimpleSourceManager sm = cla(cp) global player player = Player(sm) player.start() saveCookieThread = config.SaveCookie() saveCookieThread.start()
def setup(in_service=False): config.init(sys.argv[1:]) config.init_logging() sys.excepthook = exceptionLogger port = config.getPort() httpd = httpserver.TivoHTTPServer(('', int(port)), httpserver.TivoHTTPHandler) logger = logging.getLogger('pyTivo') logger.info('Last modified: ' + last_date()) logger.info('Python: ' + platform.python_version()) logger.info('System: ' + platform.platform()) for section, settings in config.getShares(): httpd.add_container(section, settings) # Precaching of files: does a recursive list of base path if settings.get('precache', 'False').lower() == 'true': plugin = GetPlugin(settings.get('type')) if hasattr(plugin, 'pre_cache'): logger.info('Pre-caching the ' + section + ' share.') pre_cache_filter = getattr(plugin, 'pre_cache') def build_recursive_list(path): try: for f in os.listdir(path): f = os.path.join(path, f) if os.path.isdir(f): build_recursive_list(f) else: pre_cache_filter(f) except: pass build_recursive_list(settings.get('path')) b = beacon.Beacon() b.add_service('TiVoMediaServer:%s/http' % port) b.start() if 'listen' in config.getBeaconAddresses(): b.listen() httpd.set_beacon(b) httpd.set_service_status(in_service) logger.info('pyTivo is ready.') return httpd
def main(): myQuestionLib = QuestionLib() db = config.init() cursor = db.cursor() '''get topic from database''' cursor.execute("SELECT * FROM ZHIHUHOT_FULL_DATA.QUESTION") results = cursor.fetchall() for result in results: if result[4] == 0 and result[5] == 0 and result[8] == 0: continue myQuestionLib.questions.append((result[1], result[2], result[3], result[4], result[5], result[8])) if result[1] not in myQuestionLib.topics: myQuestionLib.topics.append(result[1]) while 1: print "Hot Quesion of Zhihu" print "Select Actions" print "1. Topics List" print "2. Hottest question from a topic" print "3. Hottest question from all topics" print "4. Exit" response = raw_input() if response == "1": listTopic(myQuestionLib) elif response == "2": hotQuestionOfTopic(myQuestionLib) elif response == "3": hotQuestionOfAllTopics(myQuestionLib) elif response =="4": db.close() exit() else: print "Incorrect Input, Please Enter Again"
def do_build(args): #load config try: # Always load the _config.py from the current directory. # We already changed to the directory specified with --src-dir config.init("_config.py") except config.ConfigNotFoundException: print >>sys.stderr, ("No configuration found in source dir: %s" % args.src_dir) parser.exit(1, "Want to make a new site? Try `blogofile init`\n") writer = Writer(output_dir="_site") logger.debug("Running user's pre_build() function..") config.pre_build() writer.write_site() logger.debug("Running user's post_build() function..") config.post_build()
def __init__(self, globalconfig, localconfig): self.globalconfig = globalconfig self.localconfig = localconfig self.server = Rest() thread = threading.Thread(target=self.serverthread) thread.start() # Grab the config and output our values self.myconfig = str(config.init()).replace(',', ',\n')
def init( config_file = None ): cfg = config.init( config_file ) lib = cfg.get_path( 'library' ) if( not os.path.isdir( lib ) ): os.makedirs( lib ) model.init( os.path.join( lib, HIGURASHI_DB_NAME ) )
def setup(in_service=False): config.init(sys.argv[1:]) config.init_logging() sys.excepthook = exceptionLogger port = config.getPort() httpd = httpserver.TivoHTTPServer(("", int(port)), httpserver.TivoHTTPHandler) logger = logging.getLogger("pyTivo") for section, settings in config.getShares(): httpd.add_container(section, settings) # Precaching of files: does a recursive list of base path if settings.get("precache", "False").lower() == "true": plugin = GetPlugin(settings.get("type")) if hasattr(plugin, "pre_cache"): logger.info("Pre-caching the " + section + " share.") pre_cache_filter = getattr(plugin, "pre_cache") def build_recursive_list(path): try: for f in os.listdir(path): f = os.path.join(path, f) if os.path.isdir(f): build_recursive_list(f) else: pre_cache_filter(f) except: pass build_recursive_list(settings.get("path")) b = beacon.Beacon() b.add_service("TiVoMediaServer:%s/http" % port) b.start() if "listen" in config.getBeaconAddresses(): b.listen() httpd.set_beacon(b) httpd.set_service_status(in_service) logger.info("pyTivo is ready.") return httpd
def main(): config.init() # Should we have some kind of sanity-check module/function somewhere? fnames = [os.path.basename(f) for f in glob.glob(os.path.join(config.path_pot, '*'))] if 'pos.con' in fnames: print "WARNING: pos.con found in potfiles path. Are you sure you want this? It will overwrite the pos.con in the calculation directory when your jobs are being run." job = config.main_job.lower() if job == 'akmc': akmc.main() elif job == 'parallel_replica' or job == 'unbiased_parallel_replica': parallelreplica.main() elif job == 'basin_hopping': basinhopping.main() elif job == 'escape_rate': escaperate.main() else: import communicator import shutil config.path_scratch = config.path_root comm = communicator.get_communicator() invariants = {} # Merge potential files into invariants invariants = dict(invariants, **io.load_potfiles(config.path_pot)) job = {} files = [ f for f in os.listdir(".") if os.path.isfile(f) ] for f in files: fh = open(f) if(len(f.split('.')) > 1): #f_passed = f.split('.')[0] + "_passed." + f.split('.')[1] f_passed = f.split('.')[0] + "." + f.split('.')[1] job[f_passed] = StringIO(fh.read()) fh.close() job["id"] = "output" if os.path.isdir("output_old"): shutil.rmtree("output_old") if os.path.isdir("output"): shutil.move("output", "output_old") comm.submit_jobs([job], invariants)
def main(): config.init() # load configuration pygame.init() eventmanager.init() battlecontroller.init() #guicontroller.init() try: # # Build world # cfg = config.config pygame.mouse.set_visible(1) em = eventmanager.evManager cpu = cpucontroller.CPUController() kyb = keyboardcontroller.KeyboardController() cur = cursorcontroller.CursorController() bat = battlecontroller.battle #gui = guicontroller.gui #testing mech = mechsprite.Mech() bat.AddSprite(mech) bat.Start() em.RegisterListener(cpu) em.RegisterListener(kyb) em.RegisterListener(cur) em.RegisterListener(bat) #em.RegisterListener(gui) cpu.Run() except: log.exception("Main loop caught fatal exception") finally: log.info("Finalizing...") pygame.quit()
def init(): if os.name == 'posix': # We need to force stereo in many cases. try: mixer.pre_init(44100, -16, 2) except pygame.error: pass pygame.init() config.init(rc_file) os.chdir(angrydd_path) pygame.display.set_caption("Angry, Drunken Programmers") try: pygame.display.set_icon(pygame.image.load("angrydd.png")) except pygame.error: pass pygame.mouse.set_visible(False) if config.getboolean("settings", "fullscreen"): flags = FULLSCREEN else: flags = 0 pygame.display.set_mode([800, 600], flags) import game import menu import boxes import characters boxes.TickBox.sound = load.sound("tick.wav") boxes.TickBox.sound.set_volume(0.3) boxes.BreakBox.sound = load.sound("break.wav") boxes.BreakBox.sound.set_volume(0.3) boxes.Special.sound = load.sound("select-confirm.wav") boxes.Special.sound.set_volume(0.5) game.FallingBoxes.rotate_sound = load.sound("rotate.wav") game.FallingBoxes.rotate_sound.set_volume(0.2) menu.Menu.bg = load.image("menu-bg.png").convert() characters.init() mixer.music.load(os.path.join("music", "intro.ogg")) mixer.music.play(-1)
def main(): """ The main function for starting the Giant Multiplayer Robot client for Linux which creates the account and then does some fancy stuff with it. Should call the user interface later. """ config.init() userAccount = account.Account(config) filemanager.FileManager.saveDirectory = config.config.get("GMR", "SaveDir") print 'Your current games are:' print userAccount.games print 'You have turns for these games:' print userAccount.getCurrentTurns() for current_turn in userAccount.getCurrentTurns(): current_turn.playTurn() # Probably? waits for the file to change before moving turn. Untested. print 'Goodbye.'
def main(): for opt in sys.argv[1:]: if opt in ('-h', '--help'): usage() return elif opt in ('-d', '--dev'): config.ENABLE_INSPECTOR = True else: print "hotot: unrecognized option '%s'" % opt usage() sys.exit(1) try: import i18n except: from gettext import gettext as _ try: import prctl prctl.set_name('hotot') except: pass #g_thread_init has been deprecated since version 2.32 if GLib.check_version(2, 32, 0): GObject.threads_init() Gdk.threads_init() Gtk.init(None) config.init(); agent.app = Hotot() Gdk.threads_enter() Gtk.main() Gdk.threads_leave()
def run(): utils.init_logging() logger.info('Monobox fetcher starting up') config.init() database.init(config.get('common', 'database_uri')) max_age = config.get('fetcher', 'max_age') if max_age: timedelta = utils.str_to_timedelta(max_age) if timedelta is None: logger.error('Cannot convert configuration parameter ' 'max_age (%s) to timedelta' % max_age) sys.exit(1) else: timedelta = None urls = config.get('fetcher', 'sc_urls').split(' ') listeners_min = config.getint('fetcher', 'listeners_min') for url in urls: merge_sc_stations(url, listeners_min) if timedelta is not None: purge_old_stations(timedelta)
def __init__(self): self.manager = mp.Manager() self.ns = self.manager.Namespace() self.config = config.init() self.default_midi_device = self.config.get('device', None) self.ns.device = 'default' self.ns.grid = False self.divcc = self.config.get('divcc', None) self.divs = [24, 12, 6, 8, 3] # Defines the order for MIDI control self.params = ParamManager(self.ns) self.ticks = { 24: mp.Event(), # Quarter note 12: mp.Event(), # Eighth note 8: mp.Event(), # Eighth note triplets 6: mp.Event(), # Sixteenth note 3: mp.Event(), # 32nd note } self.generators = self.findGenerators() self.looping = {} self.armed = [] self.patterns = {} self.osc_servers = {}
sys.path.append('../') sys.path.append('/root/') from loglizer.models import PCA from loglizer import preprocessing from workflow.BGL_workflow.data_generator import load_BGL from workflow import dataloader import config datasets = ['BGL', 'HDFS', 'OpenStack'] if __name__ == '__main__': for dataset in datasets: print('########### Start Invariant Mining on Dataset ' + dataset + ' ###########') config.init('PCA_' + dataset) if dataset == 'BGL': data_instances = config.BGL_data (x_train, y_train), (x_test, y_test), (_, _) = load_BGL(data_instances, 0.3, 0.6) if dataset == 'HDFS': data_instances = config.HDFS_data (x_train, y_train), (x_test, y_test), (_, _) = dataloader.load_HDFS( data_instances, train_ratio=0.3, split_type='uniform', test_ratio=0.6,
from db import Database from query import Query from config import init import sys from random import choice, randint, sample, seed init() db = Database() db.open('~/ECS165') # Student Id and 4 grades grades_table = db.create_table('Grades', 5, 0) query = Query(grades_table) records = {} seed(3562901) for i in range(0, 1000): key = 92106429 + i records[key] = [ key, randint(0, 20), randint(0, 20), randint(0, 20), randint(0, 20) ] query.insert(*records[key]) keys = sorted(list(records.keys())) print("Insert finished") # for key in keys:
def main(): optpar = optparse.OptionParser(usage="usage: %prog [options] config.ini") optpar.add_option("-q", "--quiet", action="store_true", dest="quiet", default=False, help="only write to the log file") optpar.add_option( "-n", "--no-submit", action="store_true", dest="no_submit", default=False, help="don't submit searches; only register finished results") optpar.add_option("-R", "--reset", action="store_true", dest="reset", default=False, help="reset the simulation, discarding all data") options, args = optpar.parse_args() if len(args) > 1: print "takes only one positional argument" sys.argv = sys.argv[0:1] if len(args) == 1: sys.argv += args if len(sys.argv) > 1: config.init(sys.argv[-1]) else: config.init() # set options.path_root to be where the config file is if given as an arg if config.path_root.strip() == '.' and len(args) == 1: config.path_root = os.path.abspath(os.path.dirname(args[0])) os.chdir(config.path_root) if config.comm_job_bundle_size != 1: print "error: Parallel Replica only supports a bundle size of 1" sys.exit(1) if options.no_submit: config.comm_job_buffer_size = 0 if options.reset: res = raw_input( "Are you sure you want to reset (all data files will be lost)? (y/N) " ).lower() if len(res) > 0 and res[0] == 'y': rmdirs = [ config.path_jobs_out, config.path_jobs_in, config.path_states, config.path_scratch ] if config.debug_keep_all_results: rmdirs.append(os.path.join(config.path_root, "old_searches")) for i in rmdirs: if os.path.isdir(i): shutil.rmtree(i) #XXX: ugly way to remove all empty directories containing this one os.mkdir(i) os.removedirs(i) dynamics_path = os.path.join(config.path_results, "dynamics.txt") info_path = os.path.join(config.path_results, "info.txt") log_path = os.path.join(config.path_results, "pr.log") prng_path = os.path.join(config.path_results, "prng.pkl") for i in [info_path, dynamics_path, log_path, prng_path]: if os.path.isfile(i): os.remove(i) print "Reset" sys.exit(0) # setup logging logging.basicConfig( level=logging.DEBUG, filename=os.path.join(config.path_results, "pr.log"), format="%(asctime)s %(levelname)s:%(name)s: %(message)s", datefmt="%F %T") logging.raiseExceptions = False if not options.quiet: rootlogger = logging.getLogger('') console = logging.StreamHandler() console.setLevel(logging.INFO) formatter = logging.Formatter("%(message)s") console.setFormatter(formatter) rootlogger.addHandler(console) lock = locking.LockFile(os.path.join(config.path_results, "lockfile")) if lock.aquirelock(): if config.comm_type == 'mpi': from mpiwait import mpiwait while True: mpiwait() parallelreplica() parallelreplica() else: logger.warning("Couldn't get lock") sys.exit(1)
def train(dataLoader, validate_after=5, resume=False, perform_training=True, save_best=False, model_='cnn'): """ Perform training and validation of model. Args: dataLoader : DataLoader object validate_after : Number of epochs after which validation is performed. The model is also saved after this. resume : If True, a previously saved model file is loaded. perform_training : If False, training step is skipped, and final testing is done. save_best : If True, save session for epoch with minimum validation loss. model_ : String denoting the neural network model to use (RNN or CNN) """ model = None if model_ == 'cnn': model = models.cnn_model() elif model_ == 'rnn': model = models.rnn_model() sess = tf.Session() saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) if resume: try: prev_session = config.resume_ckpt saver.restore(sess, prev_session) print("Using previous session: {}".format(prev_session)) except Exception as exp: print(exp) print("Creating a new session.") if save_best: MIN_VAL_LOSS = 100000000000 if perform_training: config.init() train_writer = tf.summary.FileWriter( os.path.join(config.logdir, "train"), sess.graph) valid_writer = tf.summary.FileWriter( os.path.join(config.logdir, "validation"), sess.graph) for e in range(config.EPOCHS): epoch_loss = 0.0 for sensor, label in dataLoader.next_train(): # Run the graph. loss, _, tb = sess.run( [model['loss'], model['train'], model['summary']], feed_dict={ model['sensor_data']: sensor, model['label']: label, model['training']: True }) epoch_loss += loss avg_loss = epoch_loss / dataLoader.train_batches print("Average loss for epoch {} = {}".format(e, avg_loss)) if e % validate_after == 0: val_loss = validation(sess, model, dataLoader, valid_writer, e) if save_best: if val_loss < MIN_VAL_LOSS: path = saver.save(sess, config.ckpt, global_step=e) print("Saved model to {}".format(path)) MIN_VAL_LOSS = val_loss else: path = saver.save(sess, config.ckpt, global_step=e) print("Saved model to {}".format(path)) train_writer.add_summary(tb, e) print("===========================================") print("Calculating validation accuracy...") accuracies = [] positives = negatives = 0 true_positives = true_negatives = false_positives = false_negatives = 0 for sensor, label in dataLoader.next_validation(): # Run the graph. pred = sess.run(model['prediction'], feed_dict={ model['sensor_data']: sensor, model['label']: label, model['training']: False }) label = np.argmax(label, axis=1) positives += np.count_nonzero(label == 1) negatives += np.count_nonzero(label == 0) # detects the condition when the condition is present. true_positives += np.count_nonzero(pred + label == 2) # does not detect the condition when the condition is absent. true_negatives += np.count_nonzero(pred + label == 0) # wrongly indicates that a particular condition or attribute is present. false_positives += np.count_nonzero(pred > label) # wrongly indicates that a particular condition or attribute is absent. false_negatives += np.count_nonzero(pred < label) accuracies.append( np.count_nonzero(pred == label) / pred.shape[0] * 100) accuracies = np.array(accuracies) # print(positives, negatives) # print("True positives : {}".format(true_positives)) # print("False negatives: {}".format(false_negatives)) # print("False positives: {}".format(false_positives)) # print("True negatives: {}".format(true_negatives)) print("Sensitivity: {}".format(true_positives / positives)) print("Specificity: {}".format(true_negatives / negatives)) print("Precision: {}".format(true_positives / (true_positives + false_positives))) print("Min Validation set accuracy: {} %".format(accuracies.min())) print("Max Validation set accuracy: {} %".format(accuracies.max())) print("Average Validation set accuracy: {} %".format(accuracies.mean())) sess.close()
# Module: main.py # Created on: 4/25/17 # Author: Jake Sacks import config import ddp import dynamics import network import numpy as np import matplotlib matplotlib.use('Qt5Agg') import matplotlib.pyplot as plt # global and MPC variables config.init() # initialize global variables x_0 = np.matrix([0, 0, np.pi, 0]).T # initial state x_f = np.matrix([0, 0, 0, 0]).T # final state mpc_N = 200 mpc_mode = False online_training = False # vectors to keep track of results x_vect = np.matrix(np.zeros([config.num_states, mpc_N + 1])) x_vect[:, 0] = x_0 x_pred = x_vect.copy() u_vect = np.matrix(np.zeros([config.num_inputs, mpc_N])) dx_vect = np.matrix(np.zeros([config.num_states, mpc_N])) cost_vect = np.zeros([mpc_N]) # load the neural network
def __init__(self): config.init()
from log import Log, err def allowed_upload_file(filename=None): return '.' in filename and \ filename.rsplit('.', 1)[1].lower() in config.conf[ "global"]["data_extensions"] def allowed_conf_file(filename=None): return '.' in filename and \ filename.rsplit('.', 1)[1].lower() in config.conf[ "global"]["recipe_extensions"] config.init() config.read_conf() auth = LoginManager() app = Flask(__name__) try: app.config['LOGIN_DISABLED'] = config.conf["global"]["api"]["no_auth"] except: pass app.wsgi_app = ProxyFix(app.wsgi_app) app.secret_key = config.conf["global"]["api"]["secret_key"] auth.session_protection = "strong" auth.init_app(app) api = Api(app, version="0.1", title="matchID API",
def dump(output, metadata): for key in metadata: value = metadata[key] if type(value) == list: for item in value: output.write('%s: %s\n' % (key, item.encode('utf-8'))) else: if key in HUMAN and value in HUMAN[key]: output.write('%s: %s\n' % (key, HUMAN[key][value])) else: output.write('%s: %s\n' % (key, value.encode('utf-8'))) if __name__ == '__main__': if len(sys.argv) > 1: metadata = {} config.init([]) logging.basicConfig() fname = force_utf8(sys.argv[1]) ext = os.path.splitext(fname)[1].lower() if ext == '.tivo': metadata.update(from_tivo(fname)) elif ext in ['.mp4', '.m4v', '.mov']: metadata.update(from_moov(fname)) elif ext in ['.dvr-ms', '.asf', '.wmv']: metadata.update(from_dvrms(fname)) elif ext == '.wtv': vInfo = plugins.video.transcode.video_info(fname) metadata.update(from_mscore(vInfo['rawmeta'])) dump(sys.stdout, metadata)
def main(args): config.init() feat_net = 'resnet50' print("") print("") print("Running experiment on net: " + feat_net) trainset_name = config.DATASET + '_so' trainset = common.feat_dataset(trainset_name, feat_net) validset = common.feat_dataset(config.DATASET + '_so_test', feat_net) valid_data = validset.data, validset.getLabelsVec() valid_split = 0 in_shape = config.feat_shape_dict[feat_net] out_shape = trainset.labelsize def addestra(model, name, optimizer, epochs, callbacks, chk_period=-1, loss_in_name=False): shallow_path = common.shallow_path(name, trainset_name, feat_net, ext=False) if chk_period > 0: name = shallow_path + '.weights.{epoch:02d}' + ( '-{val_loss:.2f}.h5' if loss_in_name else '.h5') checkpoint = ModelCheckpoint(name, monitor='val_acc', save_weights_only=True, period=chk_period) callbacks.append(checkpoint) bestpoint = ModelCheckpoint(shallow_path + '.weights.best.h5', monitor='val_loss', save_best_only=True, save_weights_only=True) callbacks.append(bestpoint) model.compile(optimizer=optimizer, loss=LOSS, metrics=METRIC) #model.summary() #print("Valid split: " + str(valid_split)) model.fit(trainset.data, trainset.getLabelsVec(), nb_epoch=epochs, batch_size=BATCH, callbacks=callbacks, shuffle=True, validation_data=valid_data, validation_split=valid_split) save_model_json(model, shallow_path + '.json') model.save_weights(shallow_path + '.weights.last.h5') def for_resnet50(): early_stopping = EarlyStopping('val_loss', min_delta=0.01, patience=7, verbose=1) reduceLR = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=2, verbose=1, epsilon=0.01, cooldown=0, min_lr=0) callbacks = [early_stopping, reduceLR] A = new_model(in_shape, out_shape) optimizer = SGD(lr=0.01, momentum=0.9, decay=1e-6, nesterov=True) addestra(A, "A_5ep", optimizer, 100, callbacks, chk_period=1, loss_in_name=True) shallow_path = common.shallow_path("A_5ep", trainset_name, feat_net, ext=False) early_stopping = EarlyStopping('val_loss', min_delta=0.001, patience=10, verbose=1) reduceLR = ReduceLROnPlateau('val_loss', factor=0.1, patience=4, verbose=1, epsilon=0.0001) callbacks = [early_stopping, reduceLR] LF = new_model(in_shape, out_shape, lf=True, lf_decay=0.03) LF.load_weights(shallow_path + '.weights.best.h5', by_name=True) optimizer = SGD(lr=0.001, momentum=0.9, decay=1e-6, nesterov=True) addestra(LF, "LF_FT_A", optimizer, epochs=100, callbacks=callbacks, chk_period=1) def for_vgg(): pass # m = new_model(in_shape, out_shape, hiddens=[Hidden(4096, 0.5), Hidden(4096, 0.5)]) # addestra(m, "H4K_H4K", SGD(lr=0.0001, momentum=0.9, decay=1e-6, nesterov=True), epochs=100, callbacks=callbacks) # # m = new_model(in_shape, out_shape, hiddens=[Hidden(4096, 0.5)]) # addestra(m, "H4K", SGD(lr=0.0001, momentum=0.9, decay=1e-6, nesterov=True), epochs=100, callbacks=callbacks) if feat_net == 'resnet50': for_resnet50() if feat_net.startswith('vgg'): for_vgg()
def put(self, dataset, action): '''action = validation : configure the frontend to point to this dataset''' import config config.init() config.read_conf() if (action == "validation"): if (not (dataset in config.conf["datasets"].keys())): return api.abort(404, { "dataset": dataset, "status": "dataset not found" }) if not ("validation" in config.conf["datasets"][dataset].keys()): return api.abort(403, { "dataset": dataset, "status": "validation not allowed" }) if ((config.conf["datasets"][dataset]["validation"] == True) | (isinstance(config.conf["datasets"][dataset]["validation"], OrderedDict))): try: props = {} try: cfg = deepupdate( config.conf["global"]["validation"], config.conf["datasets"][dataset]["validation"]) except: cfg = config.conf["global"]["validation"] for conf in cfg.keys(): configfile = os.path.join( config.conf["global"]["paths"]["validation"], secure_filename(conf + ".json")) dic = { "prefix": config.conf["global"]["api"]["prefix"], "domain": config.conf["global"]["api"]["domain"], "dataset": dataset } props[conf] = replace_dict(cfg[conf], dic) # with open(configfile, 'w') as outfile: # json.dump(props[config],outfile,indent=2) return { "dataset": dataset, "status": "to validation", "props": props } except: return api.abort(500, { "dataset": dataset, "status": "error: " + err() }) else: return api.abort(403, { "dataset": dataset, "status": "validation not allowed" }) elif (action == "search"): if (not (dataset in config.conf["datasets"].keys())): return api.abort(404, { "dataset": dataset, "status": "dataset not found" }) if not ("search" in config.conf["datasets"][dataset].keys()): return api.abort(403, { "dataset": dataset, "status": "search not allowed" }) if ((config.conf["datasets"][dataset]["search"] == True) | (isinstance(config.conf["datasets"][dataset]["search"], OrderedDict))): try: props = {} try: cfg = deepupdate( config.conf["global"]["search"], config.conf["datasets"][dataset]["search"]) except: cfg = config.conf["global"]["search"] for config in cfg.keys(): configfile = os.path.join( config.conf["global"]["paths"]["search"], secure_filename(config + ".json")) dic = { "prefix": config.conf["global"]["api"]["prefix"], "domain": config.conf["global"]["api"]["domain"], "dataset": dataset } props[config] = replace_dict(cfg[config], dic) # with open(configfile, 'w') as outfile: # json.dump(props[config],outfile,indent=2) return { "dataset": dataset, "status": "to search", "props": props } except: return api.abort(500, { "dataset": dataset, "status": "error: " + err() }) else: return api.abort(403, { "dataset": dataset, "status": "search not allowed" }) else: api.abort(404)
def main(): locally_seen_macs = set() # Set that contains unique locally seen beacons locally_seen_uids = set() # Set that contains unique locally seen beacons authorized = set() authorized.add('b0b448fba565') authorized.add('123456781234123412341234567890ab') unauthorized = set(['b0b448c87401']) config.init() buzzer.init() log_level = config.getint('BASE', 'log_level') init_logger(log_level) __logger.info('Started with log level: ' + logging.getLevelName(log_level)) #autoupdate.check() last_update_check = datetime.now() # need to try catch and retry this as it some times fails... subprocess.call([config.HCICONFIG_FILE_PATH, 'hci0', 'up']) init_ble_advertiser() ble_queue = Queue.Queue() ble_thread = threading.Thread(target=ble_scanner, args=(ble_queue,)) ble_thread.daemon = True ble_thread.start() __logger.info('BLE scanner thread started') last_respawn_date = datetime.strptime(config.get('DEVICE', 'last_respawn_date'), '%Y-%m-%d').date() print "Going into the main loop..." print "Authorized: ", authorized print "Unauthorized: ", unauthorized try: while True: now = datetime.now() now_timestamp = int(time.time() * 1000) # if configured daily_respawn_hour, stop the ble_thread and respawn the process # if now.date() > last_respawn_date and now.hour == config.getint('BASE', 'daily_respawn_hour'): # autoupdate.respawn_script(ble_thread) # autoupdate.restart_pi() # elif now > last_update_check + timedelta(minutes=5): # autoupdate.check(ble_thread) # last_update_check = datetime.now() # Take new sightings from queue sightings = [] for i in range(100): if ble_queue.empty(): break else: sighting = ble_queue.get() sighting_key = sighting['beacon_mac'] + sighting['beacon_uid'] ignore_sighting = now_timestamp - __invalid_detector_check_timestamp < IGNORE_INTERVAL if not ignore_sighting: __ignore_sightings_lock.acquire() ignore_sighting_timestamp = __ignore_sightings.get(sighting_key, 0) if ignore_sighting_timestamp > 0 and \ now_timestamp - ignore_sighting_timestamp < IGNORE_INTERVAL: ignore_sighting = True elif sighting_key in __ignore_sightings: del __ignore_sightings[sighting_key] __ignore_sightings_lock.release() if not ignore_sighting: sightings.append(sighting) # TODO Only add this beacon to the list if we have "events" for it ## Probably join all unauthorized lists into one and see if this new exists there or not if sighting['beacon_mac'] != '': locally_seen_macs.add(sighting['beacon_mac']) # Append the beacon_mac of the latest sighting if sighting['beacon_uid'] != '': locally_seen_uids.add(sighting['beacon_uid']) # Append the beacon_uid of the latest sighting # Launch threading.timer here else: print 'sighting ignored: ' + sighting_key # print locally_seen # Local events handling if not locally_seen_macs.isdisjoint(unauthorized) or \ not locally_seen_uids.isdisjoint(unauthorized): # Rogue beacon is trying to escape!! # TODO Add delay to checking authorized sightings print "oh oh" if (len(locally_seen_macs) == 0 or locally_seen_macs.isdisjoint(authorized)) and \ (len(locally_seen_uids) == 0 or locally_seen_uids.isdisjoint(authorized)): # no authorized beacon in sigh buzzer.play_alert(3) print "All your base are belong to us." locally_seen_macs.clear() locally_seen_uids.clear() # else: # print "What? Nothing to do..." # if new sightings, send them to the server if len(sightings) > 0: send_sightings(sightings) time.sleep(1) except Exception: buzzer.end() # Ensure we leave everything nice and clean __logger.exception('main() loop failed with error:') autoupdate.respawn_script(ble_thread)
import codecs import config import sqlite3 config.init('data/bot_config.txt') ## Make the new master database, with tables set up as we want them def make_new_database(): db_conn = sqlite3.connect(config.DB_FILENAME) db_conn.execute("""CREATE TABLE user_data (racer_id integer, discord_id bigint UNIQUE ON CONFLICT REPLACE, discord_name text, twitch_name text UNIQUE ON CONFLICT ABORT, steam_id int, timezone text, PRIMARY KEY (racer_id))""") db_conn.execute("""CREATE TABLE match_data (racer_1_id int REFERENCES user_data (racer_id), racer_2_id int REFERENCES user_data (racer_id), week_number int, timestamp bigint DEFAULT 0, racer_1_wins int DEFAULT 0, racer_2_wins int DEFAULT 0, draws int DEFAULT 0, noplays int DEFAULT 0, cancels int DEFAULT 0, flags int DEFAULT 0, number_of_races int DEFAULT 0, cawmentator_id int DEFAULT 0, PRIMARY KEY (racer_1_id, racer_2_id, week_number) ON CONFLICT REPLACE)""")
if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--class_num", default=3, type=int, help="class number") args = parser.parse_args() if args.class_num == 2: cfg = config.ConfigBinaryClassification() elif args.class_num == 3: cfg = config.ConfigTripleClassification() else: raise ValueError("class number should be 2 or 3") config.init(cfg) # process train and test data x = [] y = [] for file_name in cfg.RAW_DATA_FILES: data_file_path = os.path.join(cfg.RAW_DATA_PATH, file_name) book = xlrd.open_workbook(data_file_path) sheet = book.sheet_by_index(0) rawdata = sheet.col_values(0) tmp_x = [utils.clean_text(line) for line in rawdata] tmp_y = [cfg.label_map[file_name.replace(".xlsx", "")]] * len(tmp_x) x += tmp_x y += tmp_y
import media import fresh_tomatoes import httplib import json import requests import config # Apply API_KEY API_KEY = config.init() conn = httplib.HTTPSConnection("api.themoviedb.org") payload = "{}" # Get the upcoming movie's id, title, poster_path, through request url conn.request("GET", "/3/movie/upcoming?page=1&language=en-US&api_key=" + API_KEY, payload) res = conn.getresponse() data = res.read() # JSON the results of the data of all movies informations = data.decode("utf-8") json_informations = json.loads(informations) # print(json_informations) # print(json_informations["results"][1]["poster_path"]) # print(len(json_informations["results"])) # iterate data of all movies to obtain their'id, title, poser_image_url movies = []
def main(): config.init() global WORLD_SEQ, AMZ_SEQ, TRUCK_NUM, WORLD_ID #Connect the world world_socket, WORLD_ID = connectWorld(TRUCK_NUM) print('Successfully connected to world with id ' + str(WORLD_ID)) speedMsg = world_ups_pb2.UCommands() speedMsg.simspeed = 100000 sender(world_socket, speedMsg) #Accept connection from the amazon amz_socket = createAmzSocket() print('Created amazon socket') #Send worldid to Amazon sendWorldID(amz_socket, WORLD_ID, AMZ_SEQ) print('Sent world id to amazon') AMZ_SEQ += 1 #clear database con = connectDB() clearDB(con) print('Truncated database!') #add trucks to database initTrucks(con, TRUCK_NUM) print('Successfully created trucks!') try: #Select and read the messages from world/amazon while True: socket_list = [world_socket, amz_socket] read_sockets, write_sockets, error_sockets = select.select(socket_list, [], []) for rs in read_sockets: if not read_sockets: print("Timed out, retry...") continue if rs == world_socket: world_response = world_ups_pb2.UResponses() message = receiver(world_socket) world_response.ParseFromString(message) print("RECV from world ====================\n" + str(world_response)) world_t = threading.Thread( target = process_wTask, args = (con, world_response, world_socket, amz_socket, AMZ_SEQ)) AMZ_SEQ += len(world_response.completions) + len(world_response.delivered) world_t.start() elif rs == amz_socket: amz_msg = IG1_pb2.AMsg() message = receiver(amz_socket) amz_msg.ParseFromString(message) print("RECV from amazon ====================\n" + str(amz_msg)) amazon_t = threading.Thread( target = process_aTask, args = (con, amz_msg, world_socket, amz_socket, AMZ_SEQ, WORLD_SEQ)) AMZ_SEQ += len(amz_msg.asendtruck) WORLD_SEQ += len(amz_msg.asendtruck) + len(amz_msg.afinishloading) amazon_t.start() for es in error_sockets: print('Error from ', es.getpeername()) except KeyboardInterrupt: print("WORLD_RECV_SEQS ==========\n" + config.WORLD_RECV_SEQS) print("SEQ_TO_WORLD ==========\n" + config.SEQ_TO_WORLD) #Close the sockets world_socket.close() amz_socket.close() #Disconnect database disconnectDB(con)
def main(args): config.init() feat_net = 'resnet50' print("") print("") print("Running experiment on net: " + feat_net) # if config.USE_TOY_DATASET: # trainset_name = config.DATASET + '_train' # else: # trainset_name = config.DATASET + '_train_ds' testset_name = config.DATASET + '_test' testset = common.feat_dataset(testset_name, feat_net) in_shape = config.feat_shape_dict[feat_net] out_shape = testset.labelsize def for_resnet50(): #shallow_path = common.shallow_path("LF_FT_A", trainset_name, feat_net, ext=False) LF = new_model(in_shape, out_shape) shallow_path = config.SHALLOW_PATH + "shallow_AB__feat_dbp3120_train_ds__resnet50__avg_pool.weights.best.h5" LF.load_weights(shallow_path, by_name=True) score = test_net(LF, testset) write_net_score(score, "AB best", testset_name, "test_results.csv", detailed_csv=True) shallow_path = config.SHALLOW_PATH + "shallow_AB__feat_dbp3120_train_ds__resnet50__avg_pool.weights.last.h5" LF.load_weights(shallow_path, by_name=True) score = test_net(LF, testset) write_net_score(score, "AB last", testset_name, "test_results.csv", detailed_csv=True) shallow_path = config.SHALLOW_PATH + "shallow_A__feat_dbp3120_train_ds__resnet50__avg_pool.weights.best.h5" LF.load_weights(shallow_path, by_name=True) score = test_net(LF, testset) write_net_score(score, "A best (5ep)", testset_name, "test_results.csv", detailed_csv=True) shallow_path = config.SHALLOW_PATH + "shallow_A__feat_dbp3120_train_ds__resnet50__avg_pool.weights.last.h5" LF.load_weights(shallow_path, by_name=True) score = test_net(LF, testset) write_net_score(score, "A last (5ep)", testset_name, "test_results.csv", detailed_csv=True) shallow_path = config.SHALLOW_PATH + "shallow_LF_FT_A__feat_dbp3120_train_ds__resnet50__avg_pool.weights.best.h5" LF.load_weights(shallow_path, by_name=True) score = test_net(LF, testset) write_net_score(score, "LF A best", testset_name, "test_results.csv", detailed_csv=True) shallow_path = config.SHALLOW_PATH + "shallow_LF_FT_A__feat_dbp3120_train_ds__resnet50__avg_pool.weights.00.h5" LF.load_weights(shallow_path, by_name=True) score = test_net(LF, testset) write_net_score(score, "LF A 0", testset_name, "test_results.csv", detailed_csv=True) shallow_path = config.SHALLOW_PATH + "shallow_LF_FT_A__feat_dbp3120_train_ds__resnet50__avg_pool.weights.17.h5" LF.load_weights(shallow_path, by_name=True) score = test_net(LF, testset) write_net_score(score, "LF A 17", testset_name, "test_results.csv", detailed_csv=True) shallow_path = config.SHALLOW_PATH + "shallow_LF_FT_A__feat_dbp3120_train_ds__resnet50__avg_pool.weights.41.h5" LF.load_weights(shallow_path, by_name=True) score = test_net(LF, testset) write_net_score(score, "LF A 41", testset_name, "test_results.csv", detailed_csv=True) shallow_path = config.SHALLOW_PATH + "shallow_LF_FT_A__feat_dbp3120_train_ds__resnet50__avg_pool.weights.60.h5" LF.load_weights(shallow_path, by_name=True) score = test_net(LF, testset) write_net_score(score, "LF A 60", testset_name, "test_results.csv", detailed_csv=True) if feat_net == 'resnet50': for_resnet50()
if output == config.LOGGING_OUTPUT_SYSLOG: facility = logging_facility(syslog_facility) handler = logging.handlers.SysLogHandler(address=syslog_address, facility=facility) else: handler = logging.StreamHandler(sys.stdout) if _format == config.LOGGING_FORMAT_JSON: json_logging.ENABLE_JSON_LOGGING = True json_logging.init_non_web() else: handler.setFormatter(logging.Formatter(format_string)) logger.addHandler(handler) if __name__ == "__main__": args = config.init() logging_config = config.Config["Logging"] logging_init(level=logging_config["level"], output=logging_config["output"], _format=logging_config["format"], format_string=logging_config["format_string"], syslog_address=logging_config["syslog_address"], syslog_facility=logging_config["syslog_facility"]) spam_filter = None spam_filter_path = config.Config["Learning"]["filter_path"] if os.path.isfile(spam_filter_path): logger.info(f"loading spam filter from a file {spam_filter_path}") try: with open(spam_filter_path, 'rb') as f: spam_filter = pickle.load(f) except Exception as e:
import logging import facenet import align.detect_face import error # flask app app = Flask(__name__) # logging init logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') _logger = logging.getLogger(__name__) # load project config config.init(config_file='_config.yml') if config.has('restful') and config.has('templates'): restful_conf = config.get('restful') tem_conf = config.get('templates') _logger.info("loaded profile sucess") else: _logger.info("load profile failed, exitting...") app.config['UPLOAD_FOLDER'] = restful_conf['upload_dir'] app.config['CROP_IMG_FOLDER'] = restful_conf['crop_face_dir'] # create if not exists dir basedir = os.path.abspath(os.path.dirname(__file__)) if not os.path.exists(os.path.join(basedir, app.config['UPLOAD_FOLDER'])): os.makedirs(os.path.join(basedir, app.config['UPLOAD_FOLDER']))
from twisted.internet import reactor, protocol from twisted.manhole.telnet import ShellFactory import sys, os reload(sys) sys.setdefaultencoding('utf-8') from os.path import abspath, dirname, join, normpath PREFIX = normpath(dirname(abspath(__file__))) for path in (PREFIX, normpath(join(PREFIX, '../lib'))): if path not in sys.path: sys.path = [path] + sys.path import config SERVER_NAME = 'GAMESERVER' config.init(SERVER_NAME) import log log.init(config.conf) from server import Server @defer.inlineCallbacks def test(): from gscharacter import GSCharacter from gsfellow import GSFellowManager from gsuser import g_UserMgr c = GSCharacter(9527) c.syncCharacterToCS() _shrine = c.getShrine(1)
import collection import config import dataset import db_management import developer import order import schema import user import utils # avoid pylint warnings for using slots on g and session # pylint: disable=assigning-non-slot app = flask.Flask(__name__) # pylint: disable=invalid-name appconf = config.init() db_management.check_db(appconf) app.config.update(appconf) if app.config["dev_mode"]["api"]: app.register_blueprint(developer.blueprint, url_prefix="/api/v1/developer") app.register_blueprint(dataset.blueprint, url_prefix="/api/v1/dataset") app.register_blueprint(order.blueprint, url_prefix="/api/v1/order") app.register_blueprint(collection.blueprint, url_prefix="/api/v1/collection") app.register_blueprint(user.blueprint, url_prefix="/api/v1/user") app.register_blueprint(schema.blueprint, url_prefix="/api/v1/schema") oauth = OAuth(app) for oidc_name in app.config.get("oidc_names"):
if __name__== "__main__": vM = 13 # Maximum velocity vm = 2.78 # Minimum velocity aM = 2.5 # Maximum acceleration am = -2.9 # Minimum acceleration v0 = 3.69 # Initial velocity TG = 10 # Green light interval TR = 10 # Red light interval D0 = 0.5 # Fraction of time remaining of starting light interval SGR = 1 # Starting light (green=1, red=0) L = 70 # Road segment length t0 = 10 # Optimization start parameter tau a0 = 0 # Optimization start parameter a b0 = 1.5 # Optimization start parameter b # Initialize global variables cfg.init(thresh=None, road_len=L, ctrl_env_start=None, ctrl_env_end=None, max_vel=vM, min_vel=vm, max_acc=aM, min_acc=am, green=TG, red=TR, start_frac=D0, start_len=None, start_light=SGR) # Create instance of optimization problem class obj = OptimalAcc(v0, TG, TR, D0, SGR, L, t0, a0, b0) # Solve problem and return optimal acceleration profile u_t = obj.solve().x print(u_t[0]) print(u_t[1]) print(u_t[2])
#import external module import os import sys sys.path.append("geopy/") sys.path.append("geographiclib/") import dijsktra import config import time nodes, edges = config.init() g = dijsktra.Graph() g.get_node(nodes) g.get_edge(edges) print("請參考 'all_spot.txt' 內的座標名稱,輸入起點和終點") time.sleep(1) os.startfile('all_spot.txt') while True: _from = input("請輸入起點:") _to = input("請輸入終點:") distance, paths = dijsktra.shortest_path(g, _from, _to) ans = "" for i, path in enumerate(paths): if (i != len(paths) - 1): ans += path + "," else: ans += path print("最短路徑:", ans)
import datetime import logging import utils import requests import config logger = logging.getLogger("SONARR") logger.setLevel(logging.DEBUG) cfg = config.init() def wanted(title, download_link, indexer): global cfg approved = False logger.debug("Notifying Sonarr of release from %s: %s @ %s", indexer, title, download_link) headers = {'X-Api-Key': cfg['sonarr.apikey']} params = { 'title': utils.replace_spaces(title, '.'), 'downloadUrl': download_link, 'protocol': 'Torrent', 'publishDate': datetime.datetime.now().isoformat(), 'indexer': indexer } requestUrl = f"{cfg['sonarr.url']}/api/release/push" logger.debug(f"REQ_HEADERS: {headers}") logger.debug(f"REQ_PARAMS: {params}")
def main(): argv = terminal.convert_command_line_to_utf8() __run__ = Runner() try: __opts__, __args__ = optval.gnu_getopt(argv[1:], "cf:F:hHlLmrTwx:", [ "checkout-missing:true", "exclude=", "file-types=", "format=", "hard:true", "help", "list-file-types:true", "localize-output:true", "metrics:true", "responsibilities:true", "since=", "grading:true", "timeline:true", "until=", "version", "weeks:true" ]) for arg in __args__: __run__.repo = arg #We need the repo above to be set before we read the git config. config.init(__run__) clear_x_on_next_pass = True for o, a in __opts__: if o == "-c": missing.set_checkout_missing(True) elif o == "--checkout-missing": missing.set_checkout_missing(optval.get_boolean_argument(a)) elif o in ("-h", "--help"): help.output() sys.exit(0) elif o in ("-f", "--file-types"): extensions.define(a) elif o in ("-F", "--format"): if not format.select(a): raise format.InvalidFormatError( _("specified output format not supported.")) elif o == "-H": __run__.hard = True elif o == "--hard": __run__.hard = optval.get_boolean_argument(a) elif o == "-l": __run__.list_file_types = True elif o == "--list-file-types": __run__.list_file_types = optval.get_boolean_argument(a) elif o == "-L": __run__.localize_output = True elif o == "--localize-output": __run__.localize_output = optval.get_boolean_argument(a) elif o == "-m": __run__.include_metrics = True elif o == "--metrics": __run__.include_metrics = optval.get_boolean_argument(a) elif o == "-r": __run__.responsibilities = True elif o == "--responsibilities": __run__.responsibilities = optval.get_boolean_argument(a) elif o == "--since": interval.set_since(a) elif o == "--version": version.output() sys.exit(0) elif o == "--grading": grading = optval.get_boolean_argument(a) __run__.include_metrics = grading __run__.list_file_types = grading __run__.responsibilities = grading __run__.grading = grading __run__.hard = grading __run__.timeline = grading __run__.useweeks = grading elif o == "-T": __run__.timeline = True elif o == "--timeline": __run__.timeline = optval.get_boolean_argument(a) elif o == "--until": interval.set_until(a) elif o == "-w": __run__.useweeks = True elif o == "--weeks": __run__.useweeks = optval.get_boolean_argument(a) elif o in ("-x", "--exclude"): if clear_x_on_next_pass: clear_x_on_next_pass = False filtering.clear() filtering.add(a) __check_python_version__() __run__.output() except (filtering.InvalidRegExpError, format.InvalidFormatError, optval.InvalidOptionArgument, getopt.error) as exception: print(sys.argv[0], "\b:", exception.msg, file=sys.stderr) print(_("Try `{0} --help' for more information.").format(sys.argv[0]), file=sys.stderr) sys.exit(2)
#!/usr/bin/env python # -*- coding: utf-8 -*- # .. import logging, argparse # ... import yask, config, urls, models app = yask.App() # ... config.init(app) app.init_logger() urls.init(app) models.init(app) # ... if __name__ == "__main__": # ... parser = argparse.ArgumentParser(description='Setup defaults') # .. parser.add_argument('--host', dest='host', default=app.config.HOST, help='Default hostname') # ... parser.add_argument('--port', dest='port', type=int, default=app.config.PORT, help='Default port')
if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--model_cache_path', help='model cache path') parser.add_argument('--type', help='train or inference', default='inference') parser.add_argument('--input', help='input file') parser.add_argument('--style', help='use style') parser.add_argument('--condition_count', type=int, default=100) parser.add_argument('--beamwidth', type=int, default=50) args = parser.parse_args() print args if args.type == 'inference': config.init(args.model_cache_path) z = generate.load_all() if args.style: s = generate.story(z, args.input, args.condition_count, args.beamwidth, lyric=True) else: s = generate.story(z, args.input, args.condition_count, args.beamwidth) #s = generate.story(z, args.input) output_file = '/data/output/{}.txt'.format(str(int(time.time()))) with open(output_file, "w") as f: f.write('{}'.format(s))
# USERS sub-commands users_parser = subparsers.add_parser('users') users_subparsers = users_parser.add_subparsers(dest='users_command', help='Sub-commands') # USERS ADD add_parser = users_subparsers.add_parser('add') add_parser.add_argument('user_name') add_parser.set_defaults(func=add_user) # USERS REMOVE remove_parser = users_subparsers.add_parser('remove') remove_parser.add_argument('user_name') remove_parser.set_defaults(func=remove_user) # USERS REMOVE clear_parser = users_subparsers.add_parser('clearall') clear_parser.set_defaults(func=clear_users) # KEYS subcommands keys_parser = subparsers.add_parser('keys') keys_parser.set_defaults(func=gen_key) args = parent_parser.parse_args() if hasattr(args, 'func'): config.init(server.persistent_path) args.func(args) else: parent_parser.print_help()
import coloredlogs, logging import config import resources import database import antiflood from telegram.ext import Filters import transcriberbot from transcriberbot import TranscriberBot coloredlogs.install( level='DEBUG', fmt= '%(asctime)s - %(name)s - %(levelname)s - %(filename)s [%(funcName)s:%(lineno)d] - %(message)s' ) logger = logging.getLogger(__name__) if __name__ == '__main__': config.init('../config') resources.init("../values") antiflood.init() transcriberbot.init() database.init_schema(config.get_config_prop("app")["database"]) TranscriberBot.get().start(config.get_config_prop("telegram")["token"])
def main_init(): if cfg.init() != 0: return 1 return 0
def main(): terminal.check_terminal_encoding() terminal.set_stdin_encoding() argv = terminal.convert_command_line_to_utf8() __run__ = Runner() try: __opts__, __args__ = optval.gnu_getopt(argv[1:], "f:F:hHlLmrTwx:", ["exclude=", "file-types=", "format=", "hard:true", "help", "list-file-types:true", "localize-output:true", "metrics:true", "responsibilities:true", "since=", "grading:true", "timeline:true", "until=", "version", "weeks:true"]) for arg in __args__: __run__.repo = arg #We need the repo above to be set before we read the git config. config.init(__run__) clear_x_on_next_pass = True for o, a in __opts__: if o in("-h", "--help"): help.output() sys.exit(0) elif o in("-f", "--file-types"): extensions.define(a) elif o in("-F", "--format"): if not format.select(a): raise format.InvalidFormatError(_("specified output format not supported.")) elif o == "-H": __run__.hard = True elif o == "--hard": __run__.hard = optval.get_boolean_argument(a) elif o == "-l": __run__.list_file_types = True elif o == "--list-file-types": __run__.list_file_types = optval.get_boolean_argument(a) elif o == "-L": __run__.localize_output = True elif o == "--localize-output": __run__.localize_output = optval.get_boolean_argument(a) elif o == "-m": __run__.include_metrics = True elif o == "--metrics": __run__.include_metrics = optval.get_boolean_argument(a) elif o == "-r": __run__.responsibilities = True elif o == "--responsibilities": __run__.responsibilities = optval.get_boolean_argument(a) elif o == "--since": interval.set_since(a) elif o == "--version": version.output() sys.exit(0) elif o == "--grading": grading = optval.get_boolean_argument(a) __run__.include_metrics = grading __run__.list_file_types = grading __run__.responsibilities = grading __run__.grading = grading __run__.hard = grading __run__.timeline = grading __run__.useweeks = grading elif o == "-T": __run__.timeline = True elif o == "--timeline": __run__.timeline = optval.get_boolean_argument(a) elif o == "--until": interval.set_until(a) elif o == "-w": __run__.useweeks = True elif o == "--weeks": __run__.useweeks = optval.get_boolean_argument(a) elif o in("-x", "--exclude"): if clear_x_on_next_pass: clear_x_on_next_pass = False filtering.clear() filtering.add(a) __check_python_version__() __run__.output() except (filtering.InvalidRegExpError, format.InvalidFormatError, optval.InvalidOptionArgument, getopt.error) as exception: print(sys.argv[0], "\b:", exception.msg, file=sys.stderr) print(_("Try `{0} --help' for more information.").format(sys.argv[0]), file=sys.stderr) sys.exit(2)
def set_behavior_parents(behavior_path): ''' Sets the "parent" behavior for each behavior in a list of behaviors. The parent is the behavior that is next in sequence. This is used so that a child behavior can set some info for a parent (since find_actions_leading_to_goals works backwards, sometimes a parent behavior won't have information dependant on a child behavior to set)''' last_behavior_index = len(behavior_path) - 1 for i, behavior in enumerate(behavior_path): if i < last_behavior_index: # Set parent to the behavior that comes before it behavior.parent = behavior_path[i + 1] if __name__ == '__main__': from it import BasicWorldBrain g.init() # No traits test_entity_normal = TestEntity() # Honest trait test_entity_moral = TestEntity() test_entity_moral.creature.traits['honest'] = 2 # Disonest Trait test_entity_amoral = TestEntity() test_entity_amoral.creature.traits['dishonest'] = 2 begin = time() best_path = test_entity_normal.world_brain.set_goal(goal_state=HaveItem( item_name=GOAL_ITEM, entity=test_entity_normal), reason='because') print 'done in {0}'.format(time() - begin) #print [b.behavior for b in best_path]