示例#1
0
def ingestSheet(sheet, enum_gender):
    runners = []
    number_of_rows = sheet.nrows
    number_of_columns = sheet.ncols
    invalidRunnersCount = 0
    for row in range(1, number_of_rows):
        parseError = False
        values = []
        for col in range(number_of_columns):
            value = (sheet.cell(row, col).value)
            try:
                sanitized_value = Runner.col_sanitizer(col, value)
                values.append(sanitized_value)
            except:
                parseError = True
                values.append(value)
        values.append(enum_gender)
        if parseError:
            parseError_Log.critical(Runner.Runner(*values))
            invalidRunnersCount += 1
        else:
            runners.append(Runner.Runner(*values))

    parseError_Log.critical("Invalid %s Runners Count: %d" %
                            (enum_gender, invalidRunnersCount))
    return runners
示例#2
0
def train(config_filename, resume):
    """
    Entry point to start training run(s).
    """
    configs = [load_config(f) for f in config_filename]
    for config in configs:
        Runner(config).train(resume)
示例#3
0
    def run(self):
        r = Runner.Runner(self.net, self.runFile)
        i = 0
        while i < self.simNum:
            #start simulation and log simTime
            print("Count: " + str(i))
            startTime = time.time()
            r.createRandomTrips()
            r.runNoGui()
            simTime = time.time() - startTime

            #Analyse for traffic density fitness
            densityFitness = getFitness.main()

            #Create FileEditor object
            f = FileEditor.FileEditor(simTime, densityFitness)
            f.getOutput()
            f.getTimeStamps()
            f.setBreakpoints()
            f.writeFile()

            if f.getTeleports() > 0:
                fitness = round((densityFitness / f.getTeleports()), 2)
            else:
                fitness = densityFitness
            f.setFitness(fitness)

            print("SimRun F: " + str(fitness))
            #if collisions occur copy route data, warning file and breakpoint file
            print("Collision Num: " + str(f.getCollisionWarnings()))
            if f.getWarnings() > 0 and f.getTeleports() < 10:
                f.copyDir()
                f.copyFile()
                f.writeBreakPointFile()
            i += 1
示例#4
0
def load_runner(ID,
                data_path='/local/scratch/public/va304/dagan/runners/data'):
    """
    Loades a runner object. 
    
    This function should be updated
    """
    fname = 'runner_%d.npz' % (ID)
    fname = join(data_path, fname)
    data = np.load(fname)

    lr = float(data['lr'])
    momentum = float(data['momentum'])
    smoothing_eps = float(data['smoothing_eps'])
    la = float(data['la'])
    v = data['v']
    r = data['r']
    x0 = data['x0']
    mask = data['mask']
    optimizer = str(data['optimizer'])
    backlog = str(data['backlog'])
    max_itr = int(data['max_itr'])
    max_r_norm = float(data['max_r_norm'])
    max_diff_norm = float(data['max_diff_norm'])
    ps = str(data['ps'])
    psf = float(data['psf'])
    ws = str(data['ws'])
    wsf = float(data['wsf'])

    length_r = r.shape[0]
    r_list = []
    v_list = []
    for i in range(length_r):
        r_list.append(r[i])
        v_list.append(v[i])

    runner = Runner.Runner(
        max_itr,
        max_r_norm,
        max_diff_norm,
        la=la,
        warm_start=ws,  # ||f(x+r) - f(x) + p||_{2}^{2} 
        warm_start_factor=wsf,
        perp_start=ps,
        perp_start_factor=psf,
        optimizer=optimizer,
        momentum=momentum,
        smoothing_eps=smoothing_eps,
        learning_rate=lr)
    runner.backlog = backlog
    runner.v = v_list
    runner.r = r_list
    runner.x0 = [x0]
    runner.mask = [mask]
    return runner
示例#5
0
def main(cfg: DictConfig) -> None:

    mlflow.set_tracking_uri(cfg.params.tracking_uri)
    mlflow.set_experiment(cfg.params.experiment_name)
    mlflow.start_run(run_name=cfg.params.run_name)
    mlflow.log_params(cfg.params)
    mlflow.log_param("cwd", os.getcwd())
    mlflow.log_artifacts(".hydra", "hydra")

    runner = Runner(cfg)
    runner.run()
示例#6
0
    def run(self):
        """
        call the QC code & confirm the running is ok. if not throw error messages.
        """
        # collect input file list
        filelist = tools.load_data("filelist.dat")
        # Run gaussian
        g = Runner()
        g.caller(filename=filelist)

        return
示例#7
0
文件: app.py 项目: SQuantTeam/SQuant
def test_bar_model(symbol="AAPL", order_size=10, split_ratio=0.8, epochs=1,
                   end_date=(datetime.date.today()).strftime('%Y%m%d'),
                   n_samples=1000, isTrain=False):
    _symbol = symbol
    _split_ratio = split_ratio
    _epochs = epochs
    data = GetData.get_A_data(ts_code=_symbol, end_date=end_date, n_samples=n_samples)
    # yesterday one minutes data
    bar_data = GetData.get_latest_bar(symbol=symbol, trade_date=end_date, freq="1M")
    print("test_latest_model:bar_data.shape:", bar_data.shape)
    print(data.columns.values)
    # 数据预处理 根据用户输入的split_ratio 返回划分好的训练集和测试集
    # train, test, date_train, date_test = DataPreprocess.data_preprocess(data, _split_ratio)
    train, test, date_train, date_test = DataPreprocess.data_A_preprocess(data, _split_ratio)
    bar_test, time_test = DataPreprocess.data_A_bar(bar_data)
    print("bar_test.shape:", bar_test.shape, "time_test.shape:", time_test.shape)

    DataPreprocess.data_A_bar(bar_data)
    # 生成训练环境和测试环境
    # env_test = StockEnvironment.StockEnv(test, order_size)
    env_test = StockEnvironment.StockEnv(bar_test, order_size)
    env_train = StockEnvironment.StockEnv(train, order_size)

    # 初始化runner
    runner = Runner.Runner()
    trained_model = None
    if isTrain == False:
        for new_dir in os.listdir(os.curdir):  # 列表出该目录下的所有文件(返回当前目录'.')
            # 如果有success的模型就使用,否则使用train模型
            if new_dir.startswith('success-model-{}'.format(_symbol)):
                trained_model = new_dir
        # 如果没有success模型,使用训练过的train模型
        if trained_model == None:
            for dir_name in os.listdir(os.curdir):  # 列表出该目录下的所有文件(返回当前目录'.')
                if dir_name.startswith('train-model-{}'.format(_symbol)):
                    trained_model = dir_name
        if trained_model == None:
            print("No model for predict,now train a model")
            trained_model = runner.trainer(_symbol, env_train, _epochs, order_size)
    else:
        # 训练dqn网络,返回训练完毕的模型,以及训练最终结果; 显示训练情况图
        trained_model = runner.trainer(_symbol, env_train, _epochs)
    print('Model Name: {}'.format(trained_model))
    # 用训练后的trained_Q对test数据进行分析,给出预测出的最终交易行为;显示测试情况图
    fortune, act, reward, cash = runner.tester(env_test, trained_model, order_size)
    print("profitRatio:{},fortune:{},act:{},reward:{},cash:{}".format(fortune[-1] / 100000.0, fortune[-1], act[-1],
                                                                      reward[-1], cash[-1]))
    print("fortune len:", len(fortune))
    return bar_test, act, fortune, cash, time_test
示例#8
0
def load_runner(ID, data_path=deep_mri_runner_path):
    fname = 'data/runner_%d.npz' % (ID)
    fname = join(data_path, fname)
    data = np.load(fname)

    lr = float(data['lr'])
    momentum = float(data['momentum'])
    smoothing_eps = float(data['smoothing_eps'])
    la = float(data['la'])
    v = data['v']
    r = data['r']
    x0 = data['x0']
    mask = data['mask']
    optimizer = str(data['optimizer'])
    backlog = str(data['backlog'])
    max_itr = int(data['max_itr'])
    max_r_norm = float(data['max_r_norm'])
    max_diff_norm = float(data['max_diff_norm'])
    ps = str(data['ps'])
    psf = float(data['psf'])
    ws = str(data['ws'])
    wsf = float(data['wsf'])

    length_r = r.shape[0]
    r_list = []
    v_list = []
    for i in range(length_r):
        r_list.append(r[i])
        v_list.append(v[i])

    runner = Runner.Runner(
        max_itr,
        max_r_norm,
        max_diff_norm,
        la=la,
        warm_start=ws,  # ||f(x+r) - f(x) + p||_{2}^{2} 
        warm_start_factor=wsf,
        perp_start=ps,
        perp_start_factor=psf,
        optimizer=optimizer,
        momentum=momentum,
        smoothing_eps=smoothing_eps,
        learning_rate=lr)
    runner.backlog = backlog
    runner.v = v_list
    runner.r = r_list
    runner.x0 = [x0]
    runner.mask = [mask]
    return runner
 def __init__(self):
     self.runner = Runner()  #Runner Game we will be using
     #Hyperparameters
     self.input = 3
     self.middle = 3
     self.output = 3
     self.learning_rate = 0  #Will be set by the user
     # In the output the player can go up or down
     #Weights for each layer based on the Hyperparameters
     #Weights are initialized to random rather than 0
     self.weights_1 = np.random.randn(self.input, self.middle)
     self.weights_2 = np.random.randn(self.middle, self.output)
     #Incase you wanna look at the weights and compare them
     print(self.weights_1)
     print(self.weights_2)
示例#10
0
def main(epochs, agent_str, agent_args, env_str, env_args, verbose):
    """RL Testbed.
    @arg epochs: Number of episodes to run for
    @arg agent_str: String name of agent
    @arg agent_args: Arguments to the agent constructor
    @arg env_str: String name of environment
    @arg env_args: Arguments to the environment constructor
    """
    # Load agent and environment

    agent, env = load(agent_str, agent_args, env_str, env_args)
    runner = Runner.Runner(agent, env)
    if verbose:
        runner.post_act_hook = verbose_post_act_hook
    else:
        runner.post_act_hook = quiet_post_act_hook
    runner.post_react_hook = post_react_hook

    runner.run(epochs)
 def __init__(self):
     self.runner = Runner()
     self.input = 3
     self.middle = 3
     self.output = 3
     #In the output the player can go up or down
     self.weights_1 = np.random.randn(self.middle, self.input)
     self.weights_2 = np.random.randn(self.middle, self.output)
     weight_fileA = open("weights_1.txt", "r")
     weight_fileB = open("weights_2.txt", "r")
     '''
     weightA = float(weight_fileA.readline())
     weightB = float(weight_fileB.readline())
     if weightA != None:
         self.weights_1 = np.array([weightA,weightA,weightA])
     if weightB != None:
         self.weights_2 = np.array([weightB,weightB,weightB])
     '''
     print(self.weights_1)
     print(self.weights_2)
示例#12
0
文件: app.py 项目: SQuantTeam/SQuant
    def run(self):
        # 进行任务操作
        # 根据用户输入的symbol 从本地读取已下载数据 或 从Quandl获取数据 默认获取最近500条数据
        if _symbol == None:
            print("input _symbol is None!")
            return
        data = GetData.get_data(_symbol)

        # 数据预处理 根据用户输入的split_ratio 返回划分好的训练集和测试集
        train, test = DataPreprocess.data_preprocess(data, _split_ratio)

        order_size = 100
        # 生成训练环境和测试环境
        env_train = StockEnvironment.StockEnv(train, order_size)
        env_test = StockEnvironment.StockEnv(test, order_size)

        # 初始化runner
        runner = Runner.Runner()
        # 训练dqn网络,返回训练完毕的模型,以及训练最终结果; 显示训练情况图
        trained_model = runner.trainer(_symbol, env_train, _epochs)

        for new_dir in os.listdir(os.curdir):  # 列表出该目录下的所有文件(返回当前目录'.')
            # 如果有success的模型就使用,否则使用train模型
            if new_dir.startswith('success-model-{}'.format(_symbol)):
                trained_model = new_dir

        print('Model Name: {}'.format(trained_model))

        # 用训练后的trained_Q对test数据进行分析,给出预测出的最终交易行为;显示测试情况图
        fortune, act, reward, cash = runner.tester(env_test, trained_model, order_size)
        d = test
        a = act
        f = fortune
        c = cash
        # 预测说明:
        #          模型仅预测当天的交易行为,输入模型的数据为历史数据,
        #		   历史数据指的不是train数据集,而是test数据集的数据
        # PlotTradePoint.plot_trade_point(test, act, x_name='Test - Steps', y_name='Test - Close Price')

        # 任务完成,发射信号
        self.signal.emit(d, a, f, c)
示例#13
0
    print(section)
    for element in cfg[section]:
        print("  " + element + " > " + str(cfg[section][element]))
"""

# Creates a filehandler from a folder
data = fh.FileHandler(cfg["config"]["reads_dir"],
                      cfg["config"]["work_dir"],
                      clean=cfg["config"]["clean"])

# Checks order :
order = cfg["config"]["test_order"]
# Creates script accordingly
for element in order:
    if element == "fastqc":
        # Creates a fastqc object depending on the config in fastqc
        qc = fq.FastQC(data,
                       cfg["fastqc"]["directory"],
                       zip_extract=cfg["fastqc"]["zipextract"])
        # Runs the generated script
        run = rnr.Runner(data)
    elif element == "sortmerna":
        # Creates a smr object
        rna = smr.SortMeRna(
            data,
            cfg["sortmerna"]["database"],
            number_of_threads=cfg["sortmerna"]["number_of_threads"])
        run = rnr.Runner(data)
    elif element == "trimmer":
        pass
示例#14
0
def main(environment, total_games, max_time_per_game, training_interval,
         save_interval, num_epochs, batch_size, actor_lr, critic_lr,
         actor_gamma, critic_gamma):
    # For logging purposes, we'll make use of this
    start_time = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
    log_dir, log_file = setup_logs(environment, start_time)

    # Then set up environment
    if environment == "CartPole-v0":
        solved_thresh = 195.0
    elif environment == "LunarLander-v2":
        solved_thresh = 200.0

    TOTAL_GAMES = 100 * batch_size
    MAX_TIME_PER_GAME = max_time_per_game
    SAVE_INTERVAL = save_interval
    GAME_OVER_PENALTY = -100
    NUM_EPOCHS = num_epochs

    ################################
    # Save our training parameters #
    line = "total_games: {}\nmax_time_per_game: {}\ntraining_interval: {}\nsave_interval: {}\nnum_epochs: {}\nbatch_size: {}\nactor_lr: {}\ncritic_lr: {}\nactor_gamma: {}\ncritic_gamma: {}\n".format(
        total_games, max_time_per_game, training_interval, save_interval,
        num_epochs, batch_size, actor_lr, critic_lr, actor_gamma, critic_gamma)
    os.write(log_file, line)
    ################################

    runner = Runner(environment, batch_size)
    state_size = runner.get_state_size()
    action_size = runner.get_action_size()

    value_size = 1  # This is the output size for the critic model
    actor = Actor(state_size, action_size, actor_gamma, actor_lr)
    critic = Critic(state_size, value_size, critic_gamma, critic_lr)

    done = False
    history = []
    history_avg_100 = []
    game_num = 0
    step = 0
    state = runner.reset_all()
    state = np.reshape(state, [batch_size, state_size])
    cumulative_reward = np.zeros(batch_size)
    game_steps = np.zeros(batch_size)

    print("Playing {} with A2C...").format(environment)

    while (game_num < TOTAL_GAMES):
        step += 1
        game_steps += 1

        action = actor.act(state)
        next_state, reward, done, _ = runner.step(action)
        cumulative_reward += reward

        # Penalize failure harshly
        if environment == "CartPole-v0":
            for i in range(batch_size):
                if (done[i]) and (game_steps[i] < 200):
                    reward[i] = GAME_OVER_PENALTY

        if environment == "LunarLander-v2":
            for i in range(batch_size):
                if game_steps[
                        i] == 1000:  # Without this, just hovers to avoid crashing
                    reward[i] = GAME_OVER_PENALTY

        train(actor, critic, state, action, reward, next_state, done,
              batch_size, num_epochs)
        state = next_state

        for i in range(batch_size):
            if done[i]:
                print("Game {}/{} complete, score {}").format(
                    game_num, TOTAL_GAMES, cumulative_reward[i])
                state[i] = runner.reset_one(i)
                history.append(cumulative_reward[i])
                cumulative_reward[i] = 0
                game_steps[i] = 0
                game_num += 1
                # If we have an average score > solved_thresh over 100 consecutive rounds, we have solved CartPole!
                if len(history) > 100:
                    avg_last_100 = np.average(history[-100:])
                    history_avg_100.append(avg_last_100)

                    if avg_last_100 > solved_thresh:
                        stop_time = datetime.now().strftime(
                            '%Y-%m-%d_%H:%M:%S')
                        print(
                            "Congratulations! {} been solved after {} rounds."
                        ).format(environment, step)
                        actor.model.save(
                            os.path.join(
                                log_dir, "actor_solved_" +
                                str(int(avg_last_100)) + ".h5"))
                        critic.model.save(
                            os.path.join(
                                log_dir, "critic_solved_" +
                                str(int(avg_last_100)) + ".h5"))
                        plot_name = os.path.join(log_dir, "Solved.png")
                        plot_history(history, history_avg_100, plot_name)
                        line = "Training start: {}\nTraining ends:  {}\n".format(
                            start_time, stop_time)
                        os.write(log_file, line)
                        os.write(
                            log_file,
                            "Cumulative score history: \n{}\n\nAverage of 100 rounds: \n{}\n."
                            .format(history, avg_last_100))
                        os.close(log_file)
                        return 0

        # If not, just save the model and keep going
        if step % SAVE_INTERVAL == 0:
            actor.model.save(
                os.path.join(log_dir, "actor_" + str(step) + ".h5"))
            critic.model.save(
                os.path.join(log_dir, "critic_" + str(step) + ".h5"))
    ##################################################################
    # If we're here, then we finished our training without solution #
    # Let's save the most recent models and make the plots anyway   #
    #################################################################
    stop_time = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
    actor.model.save(os.path.join(log_dir, "actor_" + str(step) + ".h5"))
    critic.model.save(os.path.join(log_dir, "critic_" + str(step) + ".h5"))
    plot_name = os.path.join(log_dir, "Unsolved.png")
    plot_history(history, history_avg_100, plot_name)
    line = "Training start: {}\nTraining ends:  {}".format(
        start_time, stop_time)
    os.write(log_file, line)
    os.write(
        log_file,
        "Cumulative score history: \n{}\n\nAverage of 100 rounds: \n{}\n.".
        format(history, avg_last_100))
    os.close(log_file)
    return 0
示例#15
0
def test_coverage() -> None:

    cfg = OmegaConf.load("./config.yaml")
    runner = Runner(cfg)
    runner.run()
示例#16
0
def run() -> None:
    r = Runner.Runner()
    r.run()
    return None
示例#17
0
    # 获取对应公司的数据
    _symbol = "AAPL"
    _split_ratio = 0.8
    _epochs = 5

    data = GetData.get_data(_symbol)
    # 数据预处理 根据用户输入的split_ratio 返回划分好的训练集和测试集
    train, test = DataPreprocess.data_preprocess(data, _split_ratio)

    # 生成训练环境和测试环境
    env_train = StockEnvironment.StockEnv(train)
    env_test = StockEnvironment.StockEnv(test)

    # 初始化runner
    runner = Runner.Runner()
    # 训练dqn网络,返回训练完毕的模型,以及训练最终结果; 显示训练情况图
    trained_model = runner.trainer(_symbol, env_train, _epochs)

    for new_dir in os.listdir(os.curdir):  # 列表出该目录下的所有文件(返回当前目录'.')
        # 如果有success的模型就使用,否则使用train模型
        if new_dir.startswith('success-model-{}'.format(_symbol)):
            trained_model = new_dir

    print('Model Name: {}'.format(trained_model))

    # 用训练后的trained_Q对test数据进行分析,给出预测出的最终交易行为;显示测试情况图
    fortune, act, reward, cash = runner.tester(env_test, trained_model)
    print("fortune:{},act:{},reward:{},cash:{}".format(fortune[-1], act[-1], reward[-1], cash[-1]))
    d = test
    a = act
示例#18
0
__author__ = 'QQ860'

import Runner

huoshan3 = 55
huoshan2 = 24
rn = Runner.Runner(24, 1)
rn.huoshan_run()
示例#19
0
from Runner import *
from Plotter import *

NAME = ['List', 'Sorted List', 'Tree', 'Set', 'Dictionary', 'Linked List']
DATA_SIZE = 100000000
DATA_RANGE = 100000000
runner = Runner(DATA_SIZE, DATA_RANGE)


def plot_graph(time, operation_name):
    plot = Plotter()
    plot.diagram_plot(time, NAME, operation_name)


def data_search(search_loop_time, data_list, data_tree, data_set, data_dict, data_linked_list, data_sort_list):
    targets = [random.randint(0, DATA_SIZE) for i in range(search_loop_time)]
    print(targets)
    list_search_time = runner.search("list", search_loop_time, targets, data_list)
    sorted_list_search_time = runner.search("sort_list", search_loop_time, targets, data_sort_list)
    tree_search_time = runner.search("tree", search_loop_time, targets, data_tree)
    set_search_time = runner.search("set", search_loop_time, targets, data_set)
    dict_search_time = runner.search("dictionary", search_loop_time, targets, data_dict)
    linked_list_search_time = runner.search("linked_list", search_loop_time, targets, data_linked_list)
    time_search = [list_search_time, sorted_list_search_time, tree_search_time, set_search_time, dict_search_time, linked_list_search_time]
    plot_graph(time_search, "Search Time")


def data_delete(delete_loop_time, data_list, data_tree, data_set, data_dict, data_linked_list, data_sort_list):
    targets = [random.randint(0, DATA_SIZE) for i in range(delete_loop_time)]
    list_delete_time = runner.delete("list", delete_loop_time, targets, data_list)
    sort_list_delete_time = runner.delete("sort_list", delete_loop_time, targets, data_sort_list)
示例#20
0
from Runner import *
import json
import requests

# Load tool list
tools = loadTools()

assert USE_FTP or USE_LOCAL_DIRECTORY or USE_ZENODO, "Must select one method for distributing data in the settings.py file"

# Now launch tools
updatedTools = []
for tool in tools:
    # Only launch those that are active
    if tool["active"] == True:
        print("Running %s" % tool["name"])
        runner = Runner(tool)
        runner.run()

        if runner.success:
            print("Run was successful")
            if USE_FTP:
                print("Pushing results to FTP")
                runner.pushToFTP()
            if USE_LOCAL_DIRECTORY:
                print("Pushing results to local directory")
                runner.pushToLocalDirectory()
            if USE_ZENODO:
                print("Pushing results to Zenodo")
                doiURL = runner.pushToZenodo()
                # We'll overwrite the dataurl with the DOI for Zenodo
                tool['dataurl'] = doiURL
示例#21
0
# check if tables are empty
try:
    models.Sound.query.all()
except:
    print "Creating database"
    db.create_all()
    # add sample alarm
    s = models.Sound("First sound", "/tmp/test.mp3")
    db.session.add(s)
    s = models.Sound("Second sound", "/tmp/test2.mp3")
    db.session.add(s)
    st = models.Sunrisetype("Normal")
    db.session.add(st)
    st = models.Sunrisetype("Beach")
    db.session.add(st)
    st = models.Sunrisetype("Jungle")
    db.session.add(st)
    sa = models.Alarm("SampleAlarm",
                      hour=9,
                      minute=0,
                      weekdays=-1,
                      duration=30,
                      sunrise_id=0,
                      sound_id=0)
    db.session.add(sa)
    db.session.commit()

# start the background-runner that will handle the alarms
runner = Runner(app.config, md, models.Alarm.query.all(), models, db)
runner.start()
示例#22
0
def main():
    DBusGMainLoop(set_as_default=True)
    runner = Runner()
    loop = GLib.MainLoop()
    loop.run()
 def run(self):        
     r = Runner.Runner(self.net, self.sumocfgName)
     r.runGui(self.breakpoints)
示例#24
0
 def exeTask(self):
     runner = Runner.Runner()
     result = runner.run()
     return result
示例#25
0
def fa(*args, **kwargs):
    from datetime import datetime
    log.info("A: {}".format(datetime.now().isoformat()))


def fb(*args, **kwargs):
    from datetime import datetime
    import random
    if random.randint(0, 100) > 90:
        time.sleep(0.03)
    log.info("B: {}".format(datetime.now().isoformat()))


def echo(*args, **kwargs):
    log.info("Echo args: {}, kwargs: {}".format(args, kwargs))


if __name__ == "__main__":
    setup_logging("/tmp/cdic_async_tasks.log")

    r = Runner(None)

    # r.add_periodic_task(fc, None, None)
    r.add_periodic_task("A1", fa, 50)
    # r.add_periodic_task("A2", fa, 1, 50)
    # r.add_periodic_task("B", fb, 0.01)

    r.add_on_demand_task(OnDemandTask("echo", "echo:pubsub", echo, lambda msg: ((msg,), {})))

    r.start()
示例#26
0
    def cenario(self, screen, estado):

        ncells = 15
        center = (ncells - 1) // 2

        font.init()
        self.myfont = font.SysFont('Sans Serif', 30)

        self.background = image.load("./Assets/Background.png")

        tabuleiro = image.load("./Assets/Y-zone.png")
        self.tabuleiro = transform.scale(tabuleiro, (700, 700))

        estado.endTurn = spriteButton(screen, "./Assets/FinalizarTurno.png",
                                      (850, 600), (300, 120))
        self.vezzumbi = spriteButton(screen, "./Assets/VezZumbis.png",
                                     (850, 0), (300, 120))
        self.vezsobreviventes = spriteButton(screen,
                                             "./Assets/VezSobreviventes.png",
                                             (850, 0), (300, 120))

        self.area = Area(screen, estado, 16, 16, 700, 700, ncells)

        self.area.background_sound = mixer.Sound("./Assets/Songs/Horde.wav")
        self.area.spawn_sound = mixer.Sound("./Assets/Songs/Spawn.wav")
        self.area.die_sound = mixer.Sound("./Assets/Songs/Die.wav")
        self.area.male_death_sound = mixer.Sound(
            "./Assets/Songs/Male_Death.wav")
        self.area.female_death_sound = mixer.Sound(
            "./Assets/Songs/Female_Death.wav")
        self.area.pistol_song = mixer.Sound("./Assets/Songs/Pistol.wav")
        self.area.rifle_song = mixer.Sound("./Assets/Songs/Rifle.wav")
        self.area.shotgun_song = mixer.Sound("./Assets/Songs/Shotgun.wav")
        self.area.chainsaw_song = mixer.Sound("./Assets/Songs/Chainsaw.wav")
        self.area.katana_song = mixer.Sound("./Assets/Songs/Katana.wav")

        self.area.background_sound.set_volume(0.07)
        self.area.background_sound.play(-1)

        self.Amy = spriteSobrevivente(screen, (830, 100),
                                      "./Assets/Personagens/Amy.png",
                                      (110, 200))
        self.Ned = spriteSobrevivente(screen, (1030, 100),
                                      "./Assets/Personagens/Ned.png",
                                      (110, 200))
        self.Phil = spriteSobrevivente(screen, (830, 350),
                                       "./Assets/Personagens/Phil.png",
                                       (110, 200))
        self.Wanda = spriteSobrevivente(screen, (1030, 350),
                                        "./Assets/Personagens/Wanda.png",
                                        (110, 200))

        self.Walker = image.load("./Assets/Zumbis/Walker.png")
        self.Runner = image.load("./Assets/Zumbis/Runner.png")
        self.Fatty = image.load("./Assets/Zumbis/Fatty.png")
        self.Abomination = image.load("./Assets/Zumbis/Abomination.png")

        self.area.addSobrevivente(
            Amy(self.area, "Amy", "./Assets/Personagens/Amy.png"), center,
            center)
        self.area.addSobrevivente(
            Ned(self.area, "Ned", "./Assets/Personagens/Ned.png"), center,
            center + 1)
        self.area.addSobrevivente(
            Phil(self.area, "Phil", "./Assets/Personagens/Phil.png"),
            center + 1, center)
        self.area.addSobrevivente(
            Wanda(self.area, "Wanda", "./Assets/Personagens/Wanda.png"),
            center + 1, center + 1)

        self.area.addZombie(
            Abomination(self.area, self.Abomination, "Abomination"), 0,
            ncells - 1)
        self.area.addZombie(Runner(self.area, self.Runner, "Runner"), 1,
                            ncells - 1)
        self.area.addZombie(Fatty(self.area, self.Fatty, "Fatty"), 0,
                            ncells - 2)

        self.area.addZombie(
            Abomination(self.area, self.Abomination, "Abomination"),
            ncells - 1, 0)
        self.area.addZombie(Runner(self.area, self.Runner, "Runner"),
                            ncells - 1, 1)
        self.area.addZombie(Fatty(self.area, self.Fatty, "Fatty"), ncells - 2,
                            0)

        self.estado = estado
        self.estado.area = self.area