Example #1
0
 def lastPlayedCallback(self, selection):
     self.subComponent = None
     if (selection != None):
         if (selection["type"] == "native"):
             Runner.runNative(selection)
         elif (selection["type"] == "emulator"):
             Runner.runEmu(selection, selection["rom"])
    def main_wrapper():
        """Wrapper around the main call - converts input arguments"""
        if "-h" in sys.argv[1:]:
            print_help(sys.argv)
            sys.exit( 0 )
        elif len(sys.argv) <> 7:
            print "Invalid number of arguments"
            print_help(sys.argv)
            sys.exit( 1 )
        else:
            epoch_budget = convert( sys.argv[1] )
            count = convert( sys.argv[2] )

            gen_str = sys.argv[3].split(":")
            gen_args = map( convert, gen_str[1:] )
            gen_type = gen_str[0]

            agent_str = sys.argv[4].split(":")
            agent_args = map( convert, agent_str[1:] )
            agent_type = Runner.load_agent( agent_str[0] )

            env_str = sys.argv[5].split(":")
            env_args = map( convert, env_str[1:] )
            env_type = Runner.load_env( env_str[0] )

            file_prefix = sys.argv[ 6 ]

            main( epoch_budget, count, gen_type, gen_args, agent_type, agent_args, env_type, env_args, file_prefix )
Example #3
0
def ingestSheet(sheet, enum_gender):
    runners = []
    number_of_rows = sheet.nrows
    number_of_columns = sheet.ncols
    invalidRunnersCount = 0
    for row in range(1, number_of_rows):
        parseError = False
        values = []
        for col in range(number_of_columns):
            value = (sheet.cell(row, col).value)
            try:
                sanitized_value = Runner.col_sanitizer(col, value)
                values.append(sanitized_value)
            except:
                parseError = True
                values.append(value)
        values.append(enum_gender)
        if parseError:
            parseError_Log.critical(Runner.Runner(*values))
            invalidRunnersCount += 1
        else:
            runners.append(Runner.Runner(*values))

    parseError_Log.critical("Invalid %s Runners Count: %d" %
                            (enum_gender, invalidRunnersCount))
    return runners
Example #4
0
def handle(msg):
    chat_id = msg['chat']['id']
    command = msg['text']

    print('Received:')
    print(command)

    # The following if statement contains the Triggering Condition. The block will thus contain
    # necessary actions.
    if command == '/get':
        _bot.sendMessage(chat_id, str('Processing Your Request...'))

        Runner.run()  # triggering the main sequence here

        _bot.sendMessage(
            chat_id,
            str('Your Command has been Processed! Check your Mailbox!'))

    # Currently, to avoid a security breach, I have set up this crude method (elif block) that would
    # shut the Pi down as a whole. It requires a passcode, defined as _emer that is known only
    # to the user.
    # Despite searching for and trying to develop snippets to stop the telebot thread, all my
    # attempts were unsuccessful.
    # Recast this method.

    elif command == _emer:  # Requires more Security
        _bot.sendMessage(chat_id, str('Halting the Code'))
        _stop_and_shutdown()
Example #5
0
    def main_wrapper():
        """Wrapper around the main call - converts input arguments"""
        if "-h" in sys.argv[1:]:
            print_help(sys.argv)
            sys.exit(0)
        elif len(sys.argv) <> 7:
            print "Invalid number of arguments"
            print_help(sys.argv)
            sys.exit(1)
        else:
            iterations = convert(sys.argv[1])
            ensembles = convert(sys.argv[2])
            episodes = convert(sys.argv[3])

            agent_str = sys.argv[4].split(":")
            agent_args = map(convert, agent_str[1:])
            agent_type = Runner.load_agent(agent_str[0])

            env_str = sys.argv[5].split(":")
            env_args = map(convert, env_str[1:])
            env_type = Runner.load_env(env_str[0])

            file_prefix = sys.argv[6]

            main(iterations, ensembles, episodes, agent_type, agent_args,
                 env_type, env_args, file_prefix)
Example #6
0
    def main_wrapper():
        """Wrapper around the main call - converts input arguments"""
        if "-h" in sys.argv[1:]:
            print_help(sys.argv)
            sys.exit( 0 )
        elif len(sys.argv) <> 7:
            print "Invalid number of arguments"
            print_help(sys.argv)
            sys.exit( 1 )
        else:
            iterations = convert( sys.argv[1] )
            ensembles = convert( sys.argv[2] )
            episodes = convert( sys.argv[3] )

            agent_str = sys.argv[4].split(":")
            agent_args = map( convert, agent_str[1:] )
            agent_type = Runner.load_agent( agent_str[0] )

            env_str = sys.argv[5].split(":")
            env_args = map( convert, env_str[1:] )
            env_type = Runner.load_env( env_str[0] )

            file_prefix = sys.argv[ 6 ]

            main( iterations, ensembles, episodes, agent_type, agent_args, env_type, env_args, file_prefix )
Example #7
0
 def customListCallback(self, selection):
     self.subComponent = None
     print(json.dumps(selection, indent=2))
     if (selection != None):
         if (selection["type"] == "native"):
             Runner.runNative(selection)
         elif (selection["type"] == "emulator"):
             Runner.runEmu(selection, selection["rom"])
Example #8
0
def main(cfg: DictConfig) -> None:

    mlflow.set_tracking_uri(cfg.params.tracking_uri)
    mlflow.set_experiment(cfg.params.experiment_name)
    mlflow.start_run(run_name=cfg.params.run_name)
    mlflow.log_params(cfg.params)
    mlflow.log_param("cwd", os.getcwd())
    mlflow.log_artifacts(".hydra", "hydra")

    runner = Runner(cfg)
    runner.run()
Example #9
0
    def run(self):
        """
        call the QC code & confirm the running is ok. if not throw error messages.
        """
        # collect input file list
        filelist = tools.load_data("filelist.dat")
        # Run gaussian
        g = Runner()
        g.caller(filename=filelist)

        return
Example #10
0
    def grabRunnerJobs():
        print("")
        print("Sending Jobs to the runner...")

        # Sample data before the api starts feeding the client
        runnerJobs_JSON = {}
        runnerJobs_JSON['numJobs'] = 4
        runnerJobs_JSON['0'] = "C:\Temp\DocumentA.rtf"
        runnerJobs_JSON['1'] = "C:\Temp\DocumentB.docx"
        runnerJobs_JSON['2'] = "C:\Temp\DocumentC.pdf"
        runnerJobs_JSON['3'] = "C:\Temp\DocumentD.txt"
        runnerJobs_JSON['checksum'] = "MD5"
        Runner.runJobs(runnerJobs_JSON)
 def __init__(self):
     self.runner = Runner()  #Runner Game we will be using
     #Hyperparameters
     self.input = 3
     self.middle = 3
     self.output = 3
     self.learning_rate = 0  #Will be set by the user
     # In the output the player can go up or down
     #Weights for each layer based on the Hyperparameters
     #Weights are initialized to random rather than 0
     self.weights_1 = np.random.randn(self.input, self.middle)
     self.weights_2 = np.random.randn(self.middle, self.output)
     #Incase you wanna look at the weights and compare them
     print(self.weights_1)
     print(self.weights_2)
def learn_point_option( env, s, epochs, agent_type, agent_args ):
    """Learn an option to a state"""
    # Reset the rewards of the environment to reward the 
    env.R = {}
    for s_ in xrange( env.S ):
        env.R[ (s_,s) ] = env.domain.REWARD_SUCCESS - env.domain.REWARD_BIAS
    
    agent = agent_type( env.Q, *agent_args )
    Runner.run( env, agent, epochs )
    pi = agent.greedy_policy()

    I = set( pi.keys() )
    I.remove( s )
    B = { s: 1.0 }

    return Option( I, pi, B )
    def run(self):
        r = Runner.DynamicRunner(self.net, self.runFile)
        i = 0

        #start simulation and log simTime
        startTime = time.time()
        r.createRandomTrips(self.pop)
        rc = RouteChanger.RouteChanger()
        rc.getFile()
        rc.writeFile()
        r.runNoGui()  #   r.runNoGui()
        simTime = time.time() - startTime

        #Analyse for traffic density fitness
        densityFitness = getFitness.main()

        #Create FileEditor object
        f = FileEditor.FileEditor(simTime, densityFitness)
        f.getOutput()
        f.getTimeStamps()
        #f.setBreakpoints()
        #f.writeFile()

        if f.getTeleports() > 0:

            fitness = round((densityFitness / f.getTeleports()), 2)
            print("Fitness: " + str(fitness))
        else:
            fitness = densityFitness
        print("Fit: " + str(fitness))
        print(str("Teleports: " + str(f.getTeleports())))

        return fitness
    def compile(self):
        debug('build: compile called')
        self.flush()
        self.generator = Runner.Parallel(self, Options.options.jobs)

        def dw(on=True):
            if Options.options.progress_bar:
                if on: sys.stderr.write(Logs.colors.cursor_on)
                else: sys.stderr.write(Logs.colors.cursor_off)

        debug('build: executor starting')
        back = os.getcwd()
        os.chdir(self.bldnode.abspath())
        try:
            try:
                dw(on=False)
                self.generator.start()
            except KeyboardInterrupt:
                dw()
                self.save()
                raise
            except Exception:
                dw()
                raise
            else:
                dw()
                self.save()
            if self.generator.error:
                raise BuildError(self, self.task_manager.tasks_done)
        finally:
            os.chdir(back)
Example #15
0
    def run(self):
        r = Runner.Runner(self.net, self.runFile)
        i = 0
        while i < self.simNum:
            #start simulation and log simTime
            print("Count: " + str(i))
            startTime = time.time()
            r.createRandomTrips()
            r.runNoGui()
            simTime = time.time() - startTime

            #Analyse for traffic density fitness
            densityFitness = getFitness.main()

            #Create FileEditor object
            f = FileEditor.FileEditor(simTime, densityFitness)
            f.getOutput()
            f.getTimeStamps()
            f.setBreakpoints()
            f.writeFile()

            if f.getTeleports() > 0:
                fitness = round((densityFitness / f.getTeleports()), 2)
            else:
                fitness = densityFitness
            f.setFitness(fitness)

            print("SimRun F: " + str(fitness))
            #if collisions occur copy route data, warning file and breakpoint file
            print("Collision Num: " + str(f.getCollisionWarnings()))
            if f.getWarnings() > 0 and f.getTeleports() < 10:
                f.copyDir()
                f.copyFile()
                f.writeBreakPointFile()
            i += 1
Example #16
0
def train(config_filename, resume):
    """
    Entry point to start training run(s).
    """
    configs = [load_config(f) for f in config_filename]
    for config in configs:
        Runner(config).train(resume)
Example #17
0
def learn_point_option(env, s, epochs, agent_type, agent_args):
    """Learn an option to a state"""
    # Reset the rewards of the environment to reward the
    env.R = {}
    for s_ in xrange(env.S):
        env.R[(s_, s)] = env.domain.REWARD_SUCCESS - env.domain.REWARD_BIAS

    agent = agent_type(env.Q, *agent_args)
    Runner.run(env, agent, epochs)
    pi = agent.greedy_policy()

    I = set(pi.keys())
    I.remove(s)
    B = {s: 1.0}

    return Option(I, pi, B)
Example #18
0
def main(iterations, ensembles, episodes, agent_type, agent_args, env_type,
         env_args, file_prefix):
    """RL Testbed.
    @arg iterations: Number of environments to average over
    @arg ensembles: Number of bots to average over
    @arg episodes: Number of episodes to run for
    @arg agent_type: String name of agent
    @arg agent_args: Arguments to the agent constructor
    @arg env_type: String name of environment
    @arg env_args: Arguments to the environment constructor
    """
    # Load agent and environment

    progress = ProgressBar(0, ensembles * iterations, mode='fixed')
    # Needed to prevent glitches
    oldprog = str(progress)

    # Counters
    ret = np.zeros(episodes, dtype=float)
    min_, max_ = np.inf * \
        np.ones(episodes, dtype=float), -np.inf * \
        np.ones(episodes, dtype=float)
    var = np.zeros(episodes, dtype=float)

    env = env_type.create(*env_args)
    for i in xrange(1, iterations + 1):
        env = env.domain.reset_rewards(env, *env_args)

        ret_ = np.zeros(episodes, dtype=float)
        # Initialise environment and agent
        for j in xrange(1, ensembles + 1):
            agent = agent_type(env.Q, *agent_args)
            ret__ = Runner.run(env, agent, episodes)
            ret__ = np.cumsum(ret__)
            # Add to ret_
            ret_ += (ret__ - ret_) / j

            # print progress
            progress.increment_amount()
            if oldprog != str(progress):
                print progress, "\r",
                sys.stdout.flush()
                oldprog = str(progress)

        ret += (ret_ - ret) / i
        min_ = np.min(np.vstack((min_, ret_)), axis=0)
        max_ = np.max(np.vstack((max_, ret_)), axis=0)

        var_ = np.power(ret_, 2)
        var += (var_ - var) / i
    print "\n"

    var = np.sqrt(var - np.power(ret, 2))

    f = open("%s-return.dat" % (file_prefix), "w")
    # Print ret
    for i in xrange(len(ret)):
        f.write("%d %f %f %f %f\n" % (i + 1, ret[i], min_[i], max_[i], var[i]))
    f.close()
Example #19
0
	def __init__(self, runner = None, listeners = None):

		if not runner:
			runner = Runner.DefaultRunner()
		self.Runner = runner
		self.Listeners = []
		if listeners:
			self.Listeners.extend(listeners)
Example #20
0
def main( iterations, ensembles, episodes, agent_type, agent_args, env_type, env_args, file_prefix ):
    """RL Testbed.
    @arg iterations: Number of environments to average over
    @arg ensembles: Number of bots to average over
    @arg episodes: Number of episodes to run for
    @arg agent_type: String name of agent
    @arg agent_args: Arguments to the agent constructor
    @arg env_type: String name of environment
    @arg env_args: Arguments to the environment constructor
    """
    # Load agent and environment

    progress = ProgressBar( 0, ensembles*iterations, mode='fixed' )
    # Needed to prevent glitches
    oldprog = str(progress)

    # Counters
    ret = np.zeros( episodes, dtype=float )
    min_, max_ = np.inf * np.ones( episodes, dtype=float) , -np.inf * np.ones( episodes, dtype=float)
    var = np.zeros( episodes, dtype=float )

    env = env_type.create( *env_args )
    for i in xrange( 1, iterations+1 ):
        env = env.domain.reset_rewards( env, *env_args )

        ret_ = np.zeros( episodes, dtype=float )
        # Initialise environment and agent
        for j in xrange( 1, ensembles+1 ):
            print "iter, ens", i, j
            agent = agent_type( env.Q, *agent_args )
            ret__ = Runner.run( env, agent, episodes )
            ret__ = np.cumsum( ret__ ) #Varun
            # Add to ret_
            ret_ += (ret__ - ret_) / j

            # print progress
            progress.increment_amount()
            if oldprog != str(progress):
                print progress, "\r",
                sys.stdout.flush()
                oldprog=str(progress)

        ret += (ret_ - ret) / i
        min_ = np.min( np.vstack( ( min_, ret_ ) ), axis=0 )
        max_ = np.max( np.vstack( ( max_, ret_ ) ), axis=0 )

        var_ = np.power( ret_, 2 )
        var += (var_ - var) / i
    print "\n"

    var = np.sqrt( var - np.power( ret, 2 ) )

    f = open("%s-return.dat"%( file_prefix ), "w")
    # Print ret
    for i in xrange( len( ret ) ):
        f.write( "%d %f %f %f %f\n"%( i+1, ret[ i ], min_[i], max_[i], var[ i ] ) )
    f.close()
def learn_path_option( env, start, dest, epochs, agent_type, agent_args ):
    """Learn an option to a state"""
    # Reset the rewards of the environment to reward the 
    env.R = {}
    env.start_set = set([start])
    env.end_set = set([dest])
    for s_ in xrange( env.S ):
        env.R[ (s_,dest) ] = env.domain.REWARD_SUCCESS - env.domain.REWARD_BIAS
    
    agent = agent_type( env.Q, *agent_args )
    Runner.run( env, agent, epochs )
    pi = agent.greedy_policy()

    I = set( [start] )
    I.remove( dest )
    B = { state: 1.0 }

    return Option( I, pi, B )
Example #22
0
def learn_path_option(env, start, dest, epochs, agent_type, agent_args):
    """Learn an option to a state"""
    # Reset the rewards of the environment to reward the
    env.R = {}
    env.start_set = set([start])
    env.end_set = set([dest])
    for s_ in xrange(env.S):
        env.R[(s_, dest)] = env.domain.REWARD_SUCCESS - env.domain.REWARD_BIAS

    agent = agent_type(env.Q, *agent_args)
    Runner.run(env, agent, epochs)
    pi = agent.greedy_policy()

    I = set([start])
    I.remove(dest)
    B = {state: 1.0}

    return Option(I, pi, B)
Example #23
0
def rageltaskfun(task):
	env = task.env
	ragelbin = env.get_flat('RAGEL')
	if ragelbin:
		cmd = '%s -s -o %s -C -T1 %s' % (ragelbin, task.outputs[0].bldpath(env), task.inputs[0].srcpath(env))
	else:
		src = task.inputs[0].srcpath(env)
		src = src[:src.rfind('.')] + '.c'
		cmd = 'cp %s %s' % (src, task.outputs[0].bldpath(env))
	return Runner.exec_command(cmd)
Example #24
0
def load_runner(ID,
                data_path='/local/scratch/public/va304/dagan/runners/data'):
    """
    Loades a runner object. 
    
    This function should be updated
    """
    fname = 'runner_%d.npz' % (ID)
    fname = join(data_path, fname)
    data = np.load(fname)

    lr = float(data['lr'])
    momentum = float(data['momentum'])
    smoothing_eps = float(data['smoothing_eps'])
    la = float(data['la'])
    v = data['v']
    r = data['r']
    x0 = data['x0']
    mask = data['mask']
    optimizer = str(data['optimizer'])
    backlog = str(data['backlog'])
    max_itr = int(data['max_itr'])
    max_r_norm = float(data['max_r_norm'])
    max_diff_norm = float(data['max_diff_norm'])
    ps = str(data['ps'])
    psf = float(data['psf'])
    ws = str(data['ws'])
    wsf = float(data['wsf'])

    length_r = r.shape[0]
    r_list = []
    v_list = []
    for i in range(length_r):
        r_list.append(r[i])
        v_list.append(v[i])

    runner = Runner.Runner(
        max_itr,
        max_r_norm,
        max_diff_norm,
        la=la,
        warm_start=ws,  # ||f(x+r) - f(x) + p||_{2}^{2} 
        warm_start_factor=wsf,
        perp_start=ps,
        perp_start_factor=psf,
        optimizer=optimizer,
        momentum=momentum,
        smoothing_eps=smoothing_eps,
        learning_rate=lr)
    runner.backlog = backlog
    runner.v = v_list
    runner.r = r_list
    runner.x0 = [x0]
    runner.mask = [mask]
    return runner
Example #25
0
def PlatformInitialize():
	Context.Runner = Runner.DefaultRunner()
	if os.name == "posix":
		pass
	elif os.name == "nt":
		print "WARNING: Win32 support is experimental."
		import signal
		signal.signal(signal.SIGBREAK, signal.default_int_handler)
	else:
		print "Unsupported system %s" % os.name
		raise ValueError
 def __init__(self):
     self.runner = Runner()
     self.input = 3
     self.middle = 3
     self.output = 3
     #In the output the player can go up or down
     self.weights_1 = np.random.randn(self.middle, self.input)
     self.weights_2 = np.random.randn(self.middle, self.output)
     weight_fileA = open("weights_1.txt", "r")
     weight_fileB = open("weights_2.txt", "r")
     '''
     weightA = float(weight_fileA.readline())
     weightB = float(weight_fileB.readline())
     if weightA != None:
         self.weights_1 = np.array([weightA,weightA,weightA])
     if weightB != None:
         self.weights_2 = np.array([weightB,weightB,weightB])
     '''
     print(self.weights_1)
     print(self.weights_2)
Example #27
0
    def __init__(self, screen, suspend):
        print("Main Menu Init")
        self.screen = screen
        self.suspend = suspend
        self.banderole = pygame.Surface((self.config["screenWidth"], 80),
                                        pygame.SRCALPHA)

        Runner.setMainMenu(self)

        self.systems = [None] * len(self.config["mainMenu"])
        self.systembackgrounds = [None] * len(self.config["mainMenu"])
        self.currentIndex = self.getNext()
        self.currentIndex = self.getPrev()

        self.header = Header.Header(24)

        res = ResumeHandler.getResumeFile()
        if (res != None and res["mainIndex"] != None and res["main"] != None):
            self.currentIndex = res["mainIndex"]

            if ("useSelection" in self.config["mainMenu"][self.currentIndex]
                    and
                    self.config["mainMenu"][self.currentIndex]["useSelection"]
                    != None):
                if (self.config["mainMenu"][self.currentIndex]["useSelection"]
                        == True):
                    self.openSelection(
                        self.config["mainMenu"][self.currentIndex])
                else:
                    ResumeHandler.clearResume()

            else:
                self.openSelection(self.config["mainMenu"][self.currentIndex])

        else:
            if ("firstStart" in self.config and self.config["firstStart"]):
                self.overlay = InfoOverlay.InfoOverlay("theme/info.png",
                                                       self.infoCallback)

        self.loadSystemImages()
Example #28
0
    def emuSelectionCallback(self, index, selection):

        if (self.subComponent != None):
            self.subComponent.setOverlay(None)
        else:
            self.overlay = None

        if (index != -1):
            self.subComponent = None

            if (self.selectedFile != None):
                data = copy.deepcopy(
                    self.config["mainMenu"][self.currentIndex])
                data["cmd"] = data["emu"][index]["cmd"]
                data["workingDir"] = data["emu"][index]["workingDir"]
                if "params" in data["emu"][index]:
                    data["params"] = data["emu"][index]["params"]
                Runner.runEmu(data, self.selectedFile)

            print(
                str(selection) + " " + str(index) + " " +
                str(data["emu"][index]))
Example #29
0
    def contextMenuCallback(self, selection, text):
        self.overlay = None

        if (text == "Mount USB"):
            print("Mounting USB")
            options = {}
            options["cmd"] = "/usr/bin/udc_connect.sh"
            Runner.runNative(options)

        if (text == "Poweroff"):
            if (platform.processor() == ""):
                subprocess.Popen(["sync"])
                subprocess.Popen(["poweroff"])
            else:
                print("Poweroff")

        if (text == "Reboot"):
            if (platform.processor() == ""):
                subprocess.Popen(["sync"])
                subprocess.Popen(["reboot"])
            else:
                print("reboot")
Example #30
0
    def emulatorCallback(self,
                         selectedFile,
                         key=Keys.DINGOO_BUTTON_A,
                         direct=False):
        self.selectedFile = selectedFile

        if ("emu" in self.config["mainMenu"][self.currentIndex]
                and selectedFile != None):
            emus = []
            for e in self.config["mainMenu"][self.currentIndex]["emu"]:
                emus.append(e["name"])

            if (len(emus) > 0 and key == Keys.DINGOO_BUTTON_START):
                overlay = SelectionMenu.SelectionMenu(
                    self.screen, emus, self.emuSelectionCallback)
                if (direct):
                    self.overlay = overlay
                else:
                    self.subComponent.setOverlay(overlay)

            elif (len(emus) > 0):
                self.emuSelectionCallback(0, emus[0])

            elif (len(emus) == 0):
                self.subComponent = None
                self.overlay = ConfirmOverlay.ConfirmOverlay(
                    "Emu not configured!", (255, 255, 255),
                    [("theme/a_button.png", "OK")], self.clearOverlay)
                RenderControl.setDirty()

            return

        ##old config
        self.subComponent = None
        if (selectedFile != None):
            Runner.runEmu(self.config["mainMenu"][self.currentIndex],
                          selectedFile)
Example #31
0
def load_runner(ID, data_path=deep_mri_runner_path):
    fname = 'data/runner_%d.npz' % (ID)
    fname = join(data_path, fname)
    data = np.load(fname)

    lr = float(data['lr'])
    momentum = float(data['momentum'])
    smoothing_eps = float(data['smoothing_eps'])
    la = float(data['la'])
    v = data['v']
    r = data['r']
    x0 = data['x0']
    mask = data['mask']
    optimizer = str(data['optimizer'])
    backlog = str(data['backlog'])
    max_itr = int(data['max_itr'])
    max_r_norm = float(data['max_r_norm'])
    max_diff_norm = float(data['max_diff_norm'])
    ps = str(data['ps'])
    psf = float(data['psf'])
    ws = str(data['ws'])
    wsf = float(data['wsf'])

    length_r = r.shape[0]
    r_list = []
    v_list = []
    for i in range(length_r):
        r_list.append(r[i])
        v_list.append(v[i])

    runner = Runner.Runner(
        max_itr,
        max_r_norm,
        max_diff_norm,
        la=la,
        warm_start=ws,  # ||f(x+r) - f(x) + p||_{2}^{2} 
        warm_start_factor=wsf,
        perp_start=ps,
        perp_start_factor=psf,
        optimizer=optimizer,
        momentum=momentum,
        smoothing_eps=smoothing_eps,
        learning_rate=lr)
    runner.backlog = backlog
    runner.v = v_list
    runner.r = r_list
    runner.x0 = [x0]
    runner.mask = [mask]
    return runner
Example #32
0
def test_bar_model(symbol="AAPL", order_size=10, split_ratio=0.8, epochs=1,
                   end_date=(datetime.date.today()).strftime('%Y%m%d'),
                   n_samples=1000, isTrain=False):
    _symbol = symbol
    _split_ratio = split_ratio
    _epochs = epochs
    data = GetData.get_A_data(ts_code=_symbol, end_date=end_date, n_samples=n_samples)
    # yesterday one minutes data
    bar_data = GetData.get_latest_bar(symbol=symbol, trade_date=end_date, freq="1M")
    print("test_latest_model:bar_data.shape:", bar_data.shape)
    print(data.columns.values)
    # 数据预处理 根据用户输入的split_ratio 返回划分好的训练集和测试集
    # train, test, date_train, date_test = DataPreprocess.data_preprocess(data, _split_ratio)
    train, test, date_train, date_test = DataPreprocess.data_A_preprocess(data, _split_ratio)
    bar_test, time_test = DataPreprocess.data_A_bar(bar_data)
    print("bar_test.shape:", bar_test.shape, "time_test.shape:", time_test.shape)

    DataPreprocess.data_A_bar(bar_data)
    # 生成训练环境和测试环境
    # env_test = StockEnvironment.StockEnv(test, order_size)
    env_test = StockEnvironment.StockEnv(bar_test, order_size)
    env_train = StockEnvironment.StockEnv(train, order_size)

    # 初始化runner
    runner = Runner.Runner()
    trained_model = None
    if isTrain == False:
        for new_dir in os.listdir(os.curdir):  # 列表出该目录下的所有文件(返回当前目录'.')
            # 如果有success的模型就使用,否则使用train模型
            if new_dir.startswith('success-model-{}'.format(_symbol)):
                trained_model = new_dir
        # 如果没有success模型,使用训练过的train模型
        if trained_model == None:
            for dir_name in os.listdir(os.curdir):  # 列表出该目录下的所有文件(返回当前目录'.')
                if dir_name.startswith('train-model-{}'.format(_symbol)):
                    trained_model = dir_name
        if trained_model == None:
            print("No model for predict,now train a model")
            trained_model = runner.trainer(_symbol, env_train, _epochs, order_size)
    else:
        # 训练dqn网络,返回训练完毕的模型,以及训练最终结果; 显示训练情况图
        trained_model = runner.trainer(_symbol, env_train, _epochs)
    print('Model Name: {}'.format(trained_model))
    # 用训练后的trained_Q对test数据进行分析,给出预测出的最终交易行为;显示测试情况图
    fortune, act, reward, cash = runner.tester(env_test, trained_model, order_size)
    print("profitRatio:{},fortune:{},act:{},reward:{},cash:{}".format(fortune[-1] / 100000.0, fortune[-1], act[-1],
                                                                      reward[-1], cash[-1]))
    print("fortune len:", len(fortune))
    return bar_test, act, fortune, cash, time_test
Example #33
0
def uic_build(task):
    # outputs : 1. hfile 2. cppfile

    base = task.m_outputs[1].m_name
    base = base[:len(base) - 4]

    inc_kde = '#include <tdelocale.h>\n#include <kdialog.h>\n'
    inc_moc = '#include "%s.moc"\n' % base

    ui_path = task.m_inputs[0].bldpath(task.m_env)
    h_path = task.m_outputs[0].bldpath(task.m_env)
    cpp_path = task.m_outputs[1].bldpath(task.m_env)

    qtplugins = task.m_env['QTPLUGINS']
    uic_command = task.m_env['UIC']

    comp_h = '%s -L %s -nounload -o %s %s' % (uic_command, qtplugins, h_path,
                                              ui_path)
    comp_c = '%s -L %s -nounload -tr tr2i18n -impl %s %s >> %s' % (
        uic_command, qtplugins, h_path, ui_path, cpp_path)

    ret = Runner.exec_command(comp_h)
    if ret: return ret

    dest = open(cpp_path, 'w')
    dest.write(inc_kde)
    dest.close()

    ret = Runner.exec_command(comp_c)
    if ret: return ret

    dest = open(cpp_path, 'a')
    dest.write(inc_moc)
    dest.close()

    return ret
Example #34
0
	def compile(self):
		"""The cache file is not written if nothing was build at all (build is up to date)"""
		debug('build: compile called')

		"""
		import cProfile, pstats
		cProfile.run("import Build\nBuild.bld.flush()", 'profi.txt')
		p = pstats.Stats('profi.txt')
		p.sort_stats('cumulative').print_stats(80)
		"""
		self.flush()
		#"""

		self.generator = Runner.Parallel(self, Options.options.jobs)

		def dw(on=True):
			if Options.options.progress_bar:
				if on: sys.stderr.write(Logs.colors.cursor_on)
				else: sys.stderr.write(Logs.colors.cursor_off)

		debug('build: executor starting')

		back = os.getcwd()
		os.chdir(self.bldnode.abspath())

		try:
			try:
				dw(on=False)
				self.generator.start()
			except KeyboardInterrupt:
				dw()
				# if self.generator.processed != 1: TODO
				self.save()
				raise
			except Exception:
				dw()
				# do not store anything, for something bad happened
				raise
			else:
				dw()
				#if self.generator.processed != 1: TODO
				self.save()

			if self.generator.error:
				raise BuildError(self, self.task_manager.tasks_done)

		finally:
			os.chdir(back)
Example #35
0
def main(argv):
    '''
    Здесь мы разбираем флаги командной строки и определяемся:
    - из файла или БД читать данные
    - в файл или в БД писать данные
    '''
    parser = argparse.ArgumentParser(description='Театр')
    from_group = parser.add_mutually_exclusive_group(
    )  #Создаем две группы взаимоисключающих опций (куда записать и где прочитать)
    to_group = parser.add_mutually_exclusive_group()
    from_group.add_argument(
        '-x',
        '--xml',  # Добавляем флаг для xml (add_argument)
        type=str,
        default='theatre.xml',  # Опция всегда активна
        help='Прочитать начальные данные из XML файла')
    from_group.add_argument(
        '-s',
        '--sqlite',
        type=str,
        help='Прочитать начальные данные из базы данных SQLite3')
    to_group.add_argument('-f',
                          '--toxml',
                          type=str,
                          help='Сохранить данные в файл XML')
    to_group.add_argument('-d',
                          '--tosqlite',
                          type=str,
                          help='Сохранить данные в базу данных SQLite3')
    args = parser.parse_args()  # Отправляет в парсер
    application = Runner.runner()

    if args.sqlite:
        print('Starting from SQLite3 database file: {}'.format(args.sqlite))
        application.run_from_sqlite(args.sqlite)
    else:
        print('Starting from XML file: {}'.format(args.xml))
        application.run_from_xml(args.xml, 'theatre')

    application.repl()

    if args.toxml:
        print('Saving to XML file: {}'.format(args.toxml))
        application.save_to_xml(args.toxml)
    if args.tosqlite:
        print('Saving to SQLite3 database file: {}'.format(args.tosqlite))
        application.save_to_sqlite(args.tosqlite)
Example #36
0
def run_selections(daa, min_funcs, max_funcs, versions_per_size, runs_per_technique, seed_initial):
    print 'daa: ' + daa + '\tmin_funcs: ' + str(min_funcs) + '\tmax_funcs: ' + str(max_funcs) + '\tversions_per_size: ' + str(versions_per_size) + '\truns_per_technique: ' + str(runs_per_technique) + '\tseed_initial: ' + str(seed_initial)
    random.seed(seed_initial)

    seeds = random.sample(xrange(0, 10000000), versions_per_size)
    print 'seeds: ',
    print seeds

    for i in range(min_funcs, max_funcs+1):

        aggregate_smash_counts = {}

        for j in range(0, versions_per_size):

            versiondir = versionbasedir + os.sep + 'v-' + str(i) + '-' + str(j)
            target_basename = versiondir + os.sep + 'eggshell'
            if not os.path.exists(versiondir):
                os.makedirs(versiondir)

            funcnames_all = os.listdir(funcdir)
            indices_sample = random.sample(xrange(0, len(funcnames_all)), i)

            funcnames_sample = []
            for index in indices_sample:
                funcnames_sample.append(funcnames_all[index])

            smash_counts = Runner.run_single(daa, seeds[j], funcnames_sample, versiondir, target_basename, runs_per_technique)

            for technique, count in smash_counts.iteritems():
                if not technique in aggregate_smash_counts:
                    aggregate_smash_counts[technique] = 0
                aggregate_smash_counts[technique] += smash_counts[technique]
                print '[Runner] ' + str(i) + '\t' + technique + '\t' + str(count) + '\t' + str(float(count)/float(runs_per_technique))

        for technique, count in aggregate_smash_counts.iteritems():
            print '[Selector] ' + str(i) + '\t' + technique + '\t' + str(count) + '\t' + str(float(count)/float(versions_per_size*runs_per_technique))

        sys.stdout.flush()
Example #37
0
def fa(*args, **kwargs):
    from datetime import datetime
    log.info("A: {}".format(datetime.now().isoformat()))


def fb(*args, **kwargs):
    from datetime import datetime
    import random
    if random.randint(0, 100) > 90:
        time.sleep(0.03)
    log.info("B: {}".format(datetime.now().isoformat()))


def echo(*args, **kwargs):
    log.info("Echo args: {}, kwargs: {}".format(args, kwargs))


if __name__ == "__main__":
    setup_logging("/tmp/cdic_async_tasks.log")

    r = Runner(None)

    # r.add_periodic_task(fc, None, None)
    r.add_periodic_task("A1", fa, 50)
    # r.add_periodic_task("A2", fa, 1, 50)
    # r.add_periodic_task("B", fb, 0.01)

    r.add_on_demand_task(OnDemandTask("echo", "echo:pubsub", echo, lambda msg: ((msg,), {})))

    r.start()
Example #38
0
    def run(self):
        setup_logging(app.config["ASYNC_LOG"])

        logging.getLogger("app.util.dockerhub").setLevel(logging.INFO)
        logging.getLogger("app.async.runner").setLevel(logging.INFO)
        logging.getLogger("app.logic.build_logic").setLevel(logging.INFO)

        r = Runner(app)

        r.add_periodic_task("Reschedule build task", ctx_wrapper(reschedule_stall_builds), 200)
        r.add_periodic_task("Schedule dh status update task",
                            ctx_wrapper(schedule_dh_status_updates), 600)
        r.add_periodic_task("Reschedule failed dockerhub creation task",
                            ctx_wrapper(reschedule_dockerhub_creation), 600)

        all_async_tasks = [
            run_build_task,
            create_dockerhub_task,
            update_dockerhub_build_status_task,
        ]

        for task in all_async_tasks:
            r.add_on_demand_task(task)

        r.start()
def learn_options_from_small_world( epoch_budget, count, env, env_args, agent_type, agent_args, r, searches = 20, beta = 1.1 ):
    """
    Learn options according to the small world distribution
    @r - exponent
    @alpha - Proportion taken each time
    """

    g = env.to_graph()
    gr = g.reverse()
    S = range( len( g.nodes() ) )

    # Get all the edges in the graph
    max_length = np.power( 16, 1.0/r ) # fn of r
    path_lengths = nx.all_pairs_shortest_path_length( g, cutoff=max_length ).items()


    def extract_small_world_options( pi, Q, r ):
        """Extract n options from pi according to the small world distribution"""
        # Choose a state at random
        random.shuffle( S )
        
        for s in S:
            # Choose a s_ ~ P_r(s) if Q(s_,pi(s_)) > Q(s, pi(s))
            s_ = choose_small_world( path_lengths, s, r )
            if not s_: continue
            a = pi[s][0][0]
            a_ = pi[s_][0][0]
            if Q[s_][a_] > Q[s][a]:
                yield learn_option_from_policy( pi, Q, s, s_ )

    progress = ProgressBar( 0, count, mode='fixed' )
    oldprog = str(progress)

    options = []

    alpha = 1/float(searches)
    count_ = int(alpha*beta*count)
    # Evenly divide the epoch budget
    epochs = epoch_budget / searches
    for i in xrange( searches ):
        # Run an agent
        env = env.domain.reset_rewards( env, *env_args )
        agent = agent_type( env.Q, *agent_args ) 
        Runner.run( env, agent, epochs )

        # Extract a policy
        pi = agent.greedy_policy()
        options += list( itertools.islice( extract_small_world_options( pi, agent.Q, r ), count_ ) )

        # print progress
        progress.update_amount( len(options) )
        if oldprog != str(progress):
            print progress, "\r",
            sys.stdout.flush()
            oldprog=str(progress)
    print "\n"

    # We may have learnt a few extra, but that's ok; pick a random
    # @count of them
    random.shuffle( options )

    return options[:count]
Example #40
0
victory=False
grass = Ground()
grass.draw(background)
money=Coins()
changedrecs = []
guy = Person(50,50)
guy.cinterval=5000
guy.cs=0
guy.maxcs=5
pyramid=Pyramid(650,30)
pbase=Base(650,88)
#oldman= OldMan(random.randint(0,783),random.randint(0,577))
oldman = OldMan(100,200)
clerk = Clerk(200,100)
collector= Collector(200,200)
runner= Runner(400,400)
font = pygame.font.Font(None, 36)
text = font.render("Money: "+str(guy.cs), 1, (10, 10, 10))
peoplelist=[oldman,clerk,collector,runner]
blist=[pbase]
while True:
    grass.draw(background)
    screen.blit(background,(0,0))
    guy.draw(screen)
    runner.move()
    money.drawAndCollision(screen, guy, changedrecs)
    if len(money.cs)<guy.maxcs:
        curtime=pygame.time.get_ticks()
        if ctime==0:
            ctime=pygame.time.get_ticks()
        elif ctime!=0 and curtime-ctime>=guy.cinterval: