def main(): # define arguments parser = argparse.ArgumentParser( description="o'Reilly Author Social Inspector") parser.add_argument('-s', '--server', type=str, default='127.0.0.1', required=False) parser.add_argument('-p', '--port', type=str, default=8080, required=False) parser.add_argument('-q', '--query', type=str, default='', required=False) parser.add_argument('-l', '--page_limit', type=int, default=0, required=False) parser.add_argument('-d', '--delay', type=int, default=0, required=False) args = parser.parse_args() # We can't set a boolean default for type int argument max_pages = False if args.page_limit: max_pages = args.page_limit init(hostname=args.server, port=args.port, query=args.query, max_pages=max_pages, delay=args.delay)
def cmd_init(overwrite): try: core.init(overwrite == "yes") except core.WontOverwriteError: print( "Won't overwrite existing data file. Call as 'init yes' to overwrite." )
def on_load_CSV_click(self): tb = self.builder.get_object('Path') path = tb.get() try: core.init(path) print 'Initialised !' print 'Base weights : ' print core.hiddenLayerWeights for epoch in range(50): for i in range(len(core.inputData)): data = core.inputData[i] #print 'Load line ', i tmpFinalPrediction, tmpFinalError, tmpFinalX, tmpPredictions, tmpErrors, tmpFinalXs = core.learnOne( data) print "prediction : ", tmpFinalPrediction, " | true : ", data[ -1] #print "error : ", tmpFinalError core.retropropagation(tmpErrors, tmpFinalError, data) #print 'Final weights : ' #print core.hiddenLayerWeights except IOError: print 'error'
def main(opts): log_level = logging.CRITICAL parser = optparse.OptionParser("%prog [options]", version="%prog 0.01") parser.add_option("-W", "--screen-width", action = "store", type = "int", default = "800", dest = "width", help = "set custom screen width") parser.add_option("-H", "--screen-height", action = "store", type = "int", default = "600", dest = "height", help = "set custom screen height") group = optparse.OptionGroup(parser, "Dev Options") group.add_option("-d", "--debug", action = "store_true", dest = "debug", help = "enables debug mode") parser.add_option_group(group) (opts, args) = parser.parse_args() if opts.debug: log_level = logging.DEBUG try: core.init(log_level) core.run(opts.width, opts.height) except KeyboardInterrupt: g_logger.info("Keyboard interrupt received, shutting down...") except SystemExit: raise except error.NoImageFound, e: g_logger.critical("Failed to find image: %s" % e) return error.NO_IMAGE
def init_config(): const.config = _unpack(_load_config()) const.log = const.logzero.setup_logger(formatter=const._formatter, level=const.config.log_level) api.init() core.init()
def launch(): core.init() module_store = core.get_plugin_store() database.init(module_store) graph_renderer = GraphRenderer() window = LavenderWindow(graph_renderer, width=1000, height=600) pyglet.app.run()
def main(): opts = {} opts['width'] = 640 opts['height'] = 480 opts['debug'] = 0 opts['fullscreen'] = False game = process_args(opts) core.init(opts) core.game = games.load(game) core.game.run()
def main(): parser = argparse.ArgumentParser() parser.add_argument('-c', '--config', help="Configuration path", required=True) args = parser.parse_args() # Initialize all necessary objects. core.init(args.config) logger.init() # Run service. service.run()
def __init__(self, dummyAggregate=True): #from lumiversepython import Rig #self.rig = Rig('/home/teacher/Lumiverse/PBridge.rig.json') #self.rig.init() #self.rig.run() self.ticks = 0 self.aggregate = [] if dummyAggregate: # self.aggregate = [[1,0,0], [.2,.2,.7], [.5, 0, .5]] self.aggregate = [core.rand_color() for i in xrange(6)] print len(self.aggregate) #print self.aggregate self.rig = core.init(False, False)
def start_lift(file_path=None, pallet_arg=None): log.info('starting forklift') git_errors = git_update() start_seconds = clock() pallets_to_lift, all_pallets = _sort_pallets(file_path, pallet_arg) start_process = clock() core.init(log) lift.process_crates_for(pallets_to_lift, core.update, config.get_config_prop('configuration')) log.info('process_crates time: %s', seat.format_time(clock() - start_process)) start_process = clock() lift.process_pallets(pallets_to_lift) log.info('process_pallets time: %s', seat.format_time(clock() - start_process)) start_copy = clock() copy_results = lift.copy_data(pallets_to_lift, all_pallets, config.get_config_prop('copyDestinations')) log.info('copy_data time: %s', seat.format_time(clock() - start_copy)) start_post_copy_process = clock() lift.process_pallets(pallets_to_lift, is_post_copy=True) log.info('post_copy_process time: %s', seat.format_time(clock() - start_post_copy_process)) elapsed_time = seat.format_time(clock() - start_seconds) report_object = lift.create_report_object(pallets_to_lift, elapsed_time, copy_results, git_errors) _send_report_email(report_object) log.info('Finished in {}.'.format(elapsed_time)) report = _format_dictionary(report_object) log.info('%s', report) return report
def train_ubm(X, n_components, weight=None, means=None, covars=None, epslion=1e-6, max_iters=500, save_step=1, save_path=None, save_name=None): m, d = np.shape(X) if weight.any() == None: weight, means, covars = init(X, n_components) pre_llh = sys.float_info.min n_iter = 0 print "instance size {}, demensions {}, number of clusters {}".format( m, d, n_components) start = time.time() pp, llh = expectation(X, weight, means, covars) llh = np.sum(llh) while n_iter < max_iters and np.abs(llh - pre_llh) >= epslion: pre_llh = llh middle = time.time() weight, means, covars = maximization(X, pp) end = time.time() print "iteratrion {}:".format(n_iter) print "log likehood: {}, expectation take: {}s, maximization take: {}s".format( llh, middle - start, end - middle) start = time.time() pp, llh = expectation(X, weight, means, covars) llh = np.sum(llh) n_iter = n_iter + 1 if save_step >= 1 and n_iter % save_step == 0 and save_path != None: model = {'w': w, 'mean': mean, 'cov': cov} full_path = os.path.join( save_path, "".join((save_name, '_', str(n_iter), '.yml'))) with open(full_path, 'wb') as f: yaml.dump(model, f, default_flow_style=False) return w, mean, cov
def main(): (size, start, end, heuristic, force) = core.init() if not isSolvable(start, size, end): print("This puzzle is not solvable") sys.exit() nSelectedStates, nMaxStates, solution = solve.solve( size, start, end, heuristic, force) nStates, solution = solution for state in solution: core.display(state) print("Total number of states ever selected in open set: {:d}".format( nSelectedStates)) print( "Maximum number of states ever represented in memory at the same time: {:d}" .format(nMaxStates)) print("Number of moves required to solve the puzzle: {:d}".format(nStates))
def init(): core.init()
import core print(core.getOverview("disk", core.init("All", "data/")))
def main() -> int: core.init() parser = argparse.ArgumentParser() parser.add_argument( "-l", "--level", help="AI player Level. Default is 0 (Easy). Higher is harder)", default=cfg.LEVEL, type=int) parser.add_argument('-d', '--debug', help="Debug mode", action='store_true') parser.add_argument('-C', '--cache', help="Enable persistent memoize cache", action='store_true') options = parser.parse_args() cfg.LEVEL = options.level cfg.__DEBUG__ = options.debug cfg.CACHE_ENABLED = options.cache log('Quoridor AI game, (C) 2009 by Jose Rodriguez (a.k.a. Boriel)') log('This program is Free') log('Initializing system...') pygame.init() clock = pygame.time.Clock() pygame.display.set_mode((800, 600)) pygame.display.set_caption(cfg.GAME_TITLE) screen = pygame.display.get_surface() screen.fill(Color(255, 255, 255)) board = core.BOARD = Board(screen) board.draw() log('System initialized OK') if cfg.CACHE_ENABLED: if not os.path.exists(cfg.CACHE_DIR): log('Cache directory {} not found. Creating it...'.format( cfg.CACHE_DIR)) os.makedirs(cfg.CACHE_DIR, exist_ok=True) if not os.path.isdir(cfg.CACHE_DIR): log( 'Could not create cache directory {}. Caching disabled'.format( cfg.CACHE_DIR), LogLevel.ERROR) cfg.CACHE_ENABLED = False cont = True while cont: clock.tick(cfg.FRAMERATE) pygame.display.flip() if not board.computing and not board.finished: if board.current_player.AI: board.computing = True thread = threading.Thread(target=board.computer_move) thread.start() cont = dispatch(pygame.event.get(), board) del board.rows pygame.quit() if cfg.NETWORK_ENABLED: board.server.terminate() if cfg.CACHE_ENABLED: for pawn in board.pawns: if pawn.AI is not None: pawn.AI.flush_cache() log('Memoized nodes: %i' % core.MEMOIZED_NODES) log('Memoized nodes hits: %i' % core.MEMOIZED_NODES_HITS) for pawn in board.pawns: log('Memoized distances for [%i]: %i' % (pawn.id, pawn.distances.MEMO_COUNT)) log('Memoized distances hits for [%i]: %i' % (pawn.id, pawn.distances.MEMO_HITS)) log('Exiting. Bye!') return 0
from core import init init()
import core as cuda cuda.init() dev = cuda.get_device(id = 0) ctx = dev.ctx_create()
# """ # return Communicator(self.split(color,key)) # def comm_dup(self): # """ # """ # return Communicator(self.dup()) # def comm_create(self, group ): # """ # """ # return Communicator(self.create(group)) # Request Objects from request import Request ### Python MPI Initialization and Finalization """ Specifically, importing the module should initialize MPI. Finalization should happen automatically when the process exits. Essentially, these are the PyMPI rules for init and finalize. """ rank,size = core.init( len( sys.argv ), sys.argv ) # Overloaded MPI_COMM_WORLD COMM_WORLD=Communicator( _mpi.MPI_COMM_WORLD ) atexit.register( core.finalize )
try: input = raw_input except NameError: pass # Try to fetch email and username from git config author_name = (os.popen('git config --global user.name').read()).rstrip() author_email = (os.popen('git config --global user.email').read()).rstrip() # Fallback for email and username if not found in git config if not (author_email and author_name): print("Enter your details, to avoid this set your email to git config.") if not author_email: author_email = str(input("Email: ")) if not author_name: author_name = str(input("Username: "******"Enter the message you want to display in your Github profile: (This will be trimmed to 8 chars)" ) message = str(input()) # Trigger the build git_dir = core.init(message, author_name, author_email) # Print the response print( "\nGit commit history generated, now:\ncd " + git_dir + "\ngit remote add origin https://github.com/example/yourrepo.git\ngit push origin master" )
def init(self): self.rig = core.init(upload=True, run=False, wipe=True, fire=True)
import matplotlib.pyplot as plt import numpy as np from core import expectation, expectation_old, maximization, adaption, init from scipy.stats import multivariate_normal a = np.array([1, 2, 3]) X1 = np.random.multivariate_normal([10, 1], [[2, 1], [1, 2]], 10) X2 = np.random.multivariate_normal([1, 10], [[2, 1], [1, 2]], 10) X3 = np.random.multivariate_normal([5, 10], [[2, 1], [1, 2]], 10) X = np.concatenate((X1, X2)) w, m, c = init(X, 2) for i in range(20): pp, llh = expectation(X, w, m, c) print llh w, m, c = maximization(X, pp) X = np.concatenate((X, X3)) ''' for i in range(10): pp, llh = expectation(X, w, m, c) print llh pp, llh = expectation_old(X, w, m, c) print llh w, m, c = adaption(X, pp, w, m, c) print w
import core from core import locale from objects import Job import numpy as np from scipy.stats import ttest_ind from typing import List, cast, Tuple import matplotlib.pyplot as plt import ujson import zlib import filters dataFile: str = core.init("All", "data") data: List[bytes] = core.loadJobsFromFile(dataFile) stageList: List[str] = [] for line in data: job: Job = Job(**ujson.loads(zlib.decompress(line))) if job.stage is not None: if not (getattr(job.stage.name, locale) in stageList): stageList.append(getattr(job.stage.name, locale)) listOfData: List[Tuple[List[bytes], List[bytes]]] = [] for stage in stageList: listOfData.append( cast(Tuple[List[bytes], List[bytes]], filters.onStages("mem", data, [stage])) ) with open("reports/stages.txt", "w", encoding="utf-8") as writer: i: int = 1 for stageFiles in listOfData: plt.figure(i) withVal: List[bytes] = stageFiles[0] withoutVal: List[bytes] = stageFiles[1] withValClearWaves: List[float] = []
import core from objects import Job import ujson import gzip data = core.init("User", "data/", ujson.load(open("keys.json"))["statink_key"]) scoresDict: dict = { "All": None, "All (no night)": None, "Princess": None, "None": {"low": None, "normal": None, "high": None}, "mothership": {"low": None, "normal": None, "high": None}, "fog": {"low": None, "normal": None, "high": None}, "rush": {"low": None, "normal": None, "high": None}, "cohock_charge": {"low": None, "normal": None, "high": None}, "griller": {"low": None, "normal": None, "high": None}, "goldie_seeking": {"low": None, "normal": None, "high": None}, } for keyA in scoresDict: if isinstance(scoresDict[keyA], dict): for keyB in scoresDict[keyA]: scoresDict[keyA][keyB] = { "dam": { "total": 0, "url": None, }, "shaketoba": { "total": 0, "url": None, }, "tokishirazu": {
async def on_ready(): await core.start_process(bundes_program) await core.start_process(message_transfer.program) core.init(client) print('We have logged in as {0.user}'.format(client))
def post(self, url): jid = self.request.get('from').split('/')[0] logging.debug("Message from jid: '"+jid+"'") logging.debug("Message at url: '"+url+"'") if url == 'subscription/unsubscribe/': dm.del_user(jid) return # Commented out to avoid multiple welcome messages, some clients send several subscribe events # if url == 'subscription/subscribe/': # self.welcome(jid) # return if url != 'message/chat/': return body = self.request.get('body') if body is None: logging.debug("Request w/o body. Stopped.") return message = xmpp.Message(self.request.POST) logging.debug("Message body: '"+message.body+"'") if message.body[0:5].lower() == 'plus ': core.init(jid, message.body[5:]) message.reply("Subscribed!") return user = dm.get_user(jid) if user is None: self.welcome(jid) return if message.body.lower() == 'f' or message.body.lower() == 'friends': # friends user follows message.reply(core.get_friends(user.plus_id)['message'], raw_xml=False) return if message.body.lower() == 'last' or message.body.lower() == 'latest': # latest (cached) posts last = memcache.get("last_" + jid) if last is None: message.reply("No last posts message found.", raw_xml=False) else: message.reply(last, raw_xml=False) return if message.body.lower() == 'on': # make user active if dm.enable_user(jid): message.reply("Delivery enabled!", raw_xml=False) return if message.body.lower() == 'off': # make user active if dm.disable_user(jid): message.reply("Delivery disabled!", raw_xml=False) return if message.body[0:2].lower() == 's:' and jid == OWNER_JID: # system commands if message.body[2:].lower() == 'cache_reset': result = "Success!" if memcache.flush_all() else "Failed!" message.reply(result, raw_xml=False) return if message.body[2:].lower() == 'cache_stats': result = str(memcache.get_stats()) message.reply(result, raw_xml=False) return if message.body[2:].lower() == 'test': message.reply(str(time.time()), raw_xml=False) return self.help(message)
import sys sys.path.insert(0, ".") import core from objects import Job import ujson import gzip from typing import Dict data = core.init("All", "data/") eventDict: Dict[str, dict] = { "None": { "key": "none", "count": 0.0 }, "mothership": { "key": "mothership", "count": 0.0 }, "fog": { "key": "fog", "count": 0.0 }, "rush": { "key": "rush", "count": 0.0 }, "cohock_charge": { "key": "cohock_charge", "count": 0.0 },
auto_spin_config, pass_args=True, allow_edited=True, pass_job_queue=True)) dp.add_handler(CommandHandler('stat', top, pass_args=True)) dp.add_handler(feedback_handler) dp.add_handler(MessageHandler(Filters.status_update, svc_handler)) dp.add_handler( CallbackQueryHandler(pages_handler, pattern=r"^top:page_[1-9]+[0-9]*$")) dp.add_handler(CallbackQueryHandler(help_button_handler, pattern=r"^help:.+$")) dp.add_handler(MessageHandler(Filters.all, update_cache, edited_updates=True), group=-1) dp.add_error_handler(handle_error) core.init(bot=updater.bot, job_queue=updater.job_queue, callback=auto_spin) if config.TELESOCKET_TOKEN: from TeleSocketClient import TeleSocket updater.bot.set_webhook() sock = TeleSocket() sock.login(config.TELESOCKET_TOKEN) sock.add_telegram_handler(lambda update: core.read_update(updater, update)) webhook = sock.set_webhook(updater.bot.username) updater._clean_updates() updater.bot.set_webhook(url=webhook.url, allowed_updates=ALLOWED_UPDATES) updater.job_queue.start() updater._init_thread(updater.dispatcher.start, "dispatcher") updater.running = True elif config.USE_WEBHOOKS: updater.start_webhook(listen='0.0.0.0',
#! /usr/bin/python import sys, pygame import core import data from scenario import Scenario pygame.mixer.pre_init(22050, -16, 2, 2048) pygame.init() core.width = 640 core.height = 480 core.appName = "Depths of Mars" core.init() data.init() framerate = 40 clock = pygame.time.Clock() pygame.time.set_timer(pygame.USEREVENT+1, 1000/framerate) #snd = pygame.mixer.Sound("snd/31855__HardPCM__Chip015.wav") scenario = Scenario() while not scenario.quit: for event in pygame.event.get(): if event.type == pygame.QUIT: scenario.quit = True if event.type == pygame.KEYDOWN: core.controls.onKeyDn(event.key) if event.type == pygame.KEYUP: core.controls.onKeyUp(event.key) if event.type == pygame.USEREVENT+1: scenario.behave()
] = {} filterPaths: Tuple[List[bytes], List[bytes]] = cast( Tuple[List[bytes], List[bytes]], filters.duringRotationInts("mem", data, [rotation]), ) withVal: List[bytes] = filterPaths[0] withoutVal: List[bytes] = filterPaths[1] if hasJobs("mem", withVal): if (hasJobs("mem", withVal)) and (hasJobs("mem", withoutVal)): result["name"] = rotation result["data"] = core.findWeaponsAndStageByRotation( "mem", withVal, rotation ) result["value"] = ( statSummary("mem", withVal, stat)[0] - statSummary("mem", withoutVal, stat)[0] ) rotationResultsList.append(result) pprint.pprint( sorted(rotationResultsList, key=lambda val: cast(float, val["value"])) ) if __name__ == "__main__": # fullPath: str = core.init("User", "disk", ujson.load(open("keys.json", "r"))["statink_key"]) fullPath: str = cast(str, core.init("All", "disk")) # sortStages(core.loadJobsFromFile(fullPath), "clear_waves") sortWeapons(core.loadJobsFromFile(fullPath), "clear_waves") # sortSpecial(core.loadJobsFromFile(fullPath), "clear_waves") # sortRotation(core.loadJobsFromFile(fullPath), "clear_waves")
#!/usr/bin/env python import core core.init() # this has to be run before anything else can be imported import sys, os, traceback import UTdebug import pose import percepts import lights import classBvr import util import cfgkick import commands behavior = classBvr def init(): global firstFrame firstFrame = True initMemory() initNonMemory() print "Python initialized" def initMemory(): core.initMemory() def initNonMemory(): pass def processFrame(): try: global firstFrame
import sys import core from objects import Job import filters from scipy.stats import ttest_ind import numpy as np import matplotlib.pyplot as plt from typing import List, Union, Tuple, cast import ujson import zlib dataFile: str = core.init( "User", "data/", ujson.load(open("keys.json", "r"))["statink_key"] ) data: List[bytes] = core.loadJobsFromFile(dataFile) print("Rotation") print("Player") print("Weapon") print("Stage") stat: str = input("Choose a stat to run analysis on: ") if stat == "Player": playerId: List[str] = core.findPlayerIdByName( "mem", data, input("Enter a player name to run analysis on: ") ) print(playerId) val: str = playerId[int(input("Pick the player id by index: "))] result: Tuple[List[bytes], List[bytes]] = cast( Tuple[List[bytes], List[bytes]], filters.hasPlayers("mem", data, [val]) ) elif stat == "Rotation":
import zlib import ujson import time import psutil from typing import List from objects import Job import core tic = time.perf_counter() jobs: List[bytes] = core.loadJobsFromFile(core.init("All", "data/")) print("Time taken: {}".format(time.perf_counter() - tic)) print(psutil.virtual_memory().percent)
printJobs(dataList) elif mode == "HypothesisTesting": hypothesisTesting(dataList) elif mode == "SortAttributeByStat": sortAttributeByStat(dataList) elif mode == "WaveClearPercentageWithWeapon": waveClearPercentageWithWeapon(dataList) else: sys.exit() print() print("PrintOverview") print("PrintJobs") print("HypothesisTesting") print("SortAttributeByStat") print("WaveClearPercentageWithWeapon") print("Quit") mode = input("What would you like to do: ") if __name__ == "__main__": print("All") print("User") scope: str = input("Pick an analysis scope: ") dataFile: str = core.init( scope, ujson.load(open("keys.json", "r"))["statink_key"]) data: List[List[bytes]] = [core.loadJobsFromFile(dataFile)] while input("Add a filter [Y/N]: ") == "Y": data = filterBy(data) processData(data)
#!/usr/bin/python2 # clear.py # nroberts 4/16/2017 # clears bridge of all lights import core if __name__ == "__main__": core.init(upload=False, run=False, wipe=True, fire=False)
import core as cuda cuda.init() dev = cuda.get_device(id=0) ctx = dev.ctx_create()
"""Functions used for initializing and running vision thread.""" from __future__ import print_function from __future__ import division from __future__ import absolute_import import os import sys import traceback import core core.init() # this has to be run before anything else can be imported import memory import mem_objects # import sys, os, traceback import logging logging.disable(logging.ERROR) import lights import primary_bvr as behavior import cfgwalk import cfgmap import UTdebug import cfglocalization def init(): """Initialize global information.""" global firstFrame firstFrame = True initMemory() initNonMemory() print("Python initialized")
import core import sys from core import getValMultiDimensional from objects import Job import gzip import os import ujson import numpy as np from typing import List, Dict, Union, cast """ Get the overviews for the top 100 most prolific players and write it to reports/players.txt. """ startFile: str = core.init("All", "data/") usersDetails: dict = {} stats: List[List[Union[str, int]]] = [ ["clear_waves"], ["my_data", "golden_egg_delivered"], ["my_data", "power_egg_collected"], ["my_data", "rescue"], ["my_data", "death"], ["danger_rate"], ] with gzip.open(startFile) as reader: for line in reader: job = Job(**ujson.loads(line)) userId = job.my_data.splatnet_id if userId not in usersDetails: usersDetails[userId] = { "id": userId,