Beispiel #1
0
st_taking_branch.on_activate(determine_next_turn)
st_taking_branch.add_transition(Transition(st_clearing_branch, branch_taken))

st_clearing_branch.add_transition(TransitionTimed(1000, st_line_following))
st_clearing_branch.add_transition(
    Transition(st_obstacle_avoidance, obstacle_detected))

st_obstacle_avoidance.add_transition(
    Transition(st_line_following, obstacle_avoidance.done))
st_obstacle_avoidance.on_activate(obstacle_avoidance.reset)

st_tour_done.add_transition(Transition(st_idle, robot.done_movement))

fsm = FSM(st_start)
dsp = Dispatcher(fsm)

dsp.link_action(st_calibration, calibration.run)
dsp.link_action(st_obstacle_avoidance, obstacle_avoidance.run)

dsp.link_action(st_idle, mode_selection)
st_idle.on_activate(reset)
dsp.link_action(st_wait, robot.stop)

dsp.link_action(st_route_planning, nav.plan_route)

dsp.link_action(st_line_following, line_follower.run)
dsp.link_action(st_line_lost, seek_line)

dsp.link_action(st_clearing_branch, line_follower.run)
Beispiel #2
0
    def __init__(self,
                 url=None,
                 port=None,
                 ip=None,
                 auto_shutdown=True,
                 wait_time=20,
                 timeout=5,
                 auto_delete=True,
                 temp_path=None,
                 is_playing_fnc=None,
                 print_status=False):

        #server
        if port:
            self.port = port
        else:
            self.port = random.randint(8000, 8099)
        if ip:
            self.ip = ip
        else:
            self.ip = "127.0.0.1"
        self.server = Server((self.ip, self.port), Handler, client=self)

        #Options
        if temp_path:
            self.temp_path = temp_path
        else:
            self.temp_path = os.path.join(os.path.dirname(__file__), "tmp")
        self.is_playing_fnc = is_playing_fnc
        self.timeout = timeout
        self.auto_delete = auto_delete
        self.wait_time = wait_time
        self.auto_shutdown = auto_shutdown
        self.buffer_size = 15
        self.last_pieces_priorize = 5
        self.state_file = "state"
        self.torrent_paramss = {
            'save_path': self.temp_path,
            'storage_mode': lt.storage_mode_t.storage_mode_sparse
        }

        #State
        self.has_meta = False
        self.meta = None
        self.start_time = None
        self.last_connect = 0
        self.connected = False
        self.closed = False
        self.file = None
        self.files = None
        self._th = None

        #Sesion
        self._cache = Cache(self.temp_path)
        self._ses = lt.session()
        self._ses.listen_on(0, 0)
        #Cargamos el archivo de estado (si esxiste)
        if os.path.exists(os.path.join(self.temp_path, self.state_file)):
            try:
                f = open(os.path.join(self.temp_path, self.state_file), "rb")
                state = pickle.load(f)
                self._ses.load_state(state)
                f.close()
            except:
                pass

        self._start_services()

        #Monitor & Dispatcher
        self._monitor = Monitor(self)
        if print_status:
            self._monitor.add_listener(self.print_status)
        self._monitor.add_listener(self._check_meta)
        self._monitor.add_listener(self.save_state)
        self._monitor.add_listener(self.priorize_start_file)
        self._monitor.add_listener(self.announce_torrent)

        if self.auto_shutdown:
            self._monitor.add_listener(self._auto_shutdown)

        self._dispatcher = Dispatcher(self)
        self._dispatcher.add_listener(self._update_ready_pieces)

        #Iniciamos la URL
        if url:
            self.start_url(url)
Beispiel #3
0
from dispatcher import Dispatcher

if __name__ == '__main__':
    print('Welcom to github.com')
    dis = Dispatcher()
    dis.run()
Beispiel #4
0
from ..cmd import Cmd
from .new_merchant import CmdNewMerchant
from .new_user import CmdNewUser
from .new_transaction import CmdNewTransaction
from dispatcher import Dispatcher

new_dispatcher = Dispatcher()
new_dispatcher.register_command("user", CmdNewUser)
new_dispatcher.register_command("merchant", CmdNewMerchant)
new_dispatcher.register_command("txn", CmdNewTransaction)


class CmdNew(Cmd):
    def process(self, command, *args):
        try:
            new_dispatcher.dispatch(command, *args)
        except KeyError:
            print("Invalid keywords used with new")
        except Exception as e:
            print(e.args)
def start_simulation_less_than_n(no_of_jobs_server_1, no_of_jobs_server_2, arrival_rate, job_distribution, sim_time):
    list_of_servers = []
    global jobs_ran
    global final_data
    stopping_n = no_of_jobs_server_2 
    for _ in range(0,2): # Create 2 servers
        scheduler_i = FIFO()
        job_size_i = Expo(job_distribution)
        server_i = Server(job_size_i, scheduler_i)
        list_of_servers.append(server_i)
    # Create dispatcher
    policy_i = ShortestQueue() #Join shortest queue policy
    dispatcher = Dispatcher(policy_i, list_of_servers) #Create a dispatcher with JSQ policy

    statistics = Statistics()
    world = Global(statistics)
    p_arrivals = PoissonArrival(float(arrival_rate))
    arrival = 0

    #For each job in server 1 we:
    #Create a job with arrival at time 0, then increase # of jobs in the server.
    #We get time the job enters service, either immediately or after the total processing done.
    #Set the enter service and service time needed.
    #Next calculate departure time and set it, then we can schedule the departure.
    server_1_processing_time = 0
    for job in range(0, int(no_of_jobs_server_1)):
        job = Job(0) #All arrive at time 0
        list_of_servers[0]._total_jobs +=1
        enter_service = max(list_of_servers[0]._total_processing_time, 0) #Takes processing time (time when server is idle) and the time of arrival. Max of those is enter service for this job
        job.set_enter_service(enter_service) #Setting the enter service, first is 0, next is 0+service_1, next is 0+service_1 + service_2.. ...
        job.set_service(list_of_servers[0].get_service_time()) #Generates service time AND increases processing time for server
        departure_time = job._enter_service + job._service_time # Enter service will be correct because of processing time of server bookkeeping
        job.set_departure(departure_time) #Set the departure time as enter service+service time. (Its a timeperiod on the timeline)
        world.schedule_event(list_of_servers[0].departure, job._departure_time, [job, world]) #Schedule this departure to the world queue
    for job in range(0, int(no_of_jobs_server_2)):
        job = Job(0)
        list_of_servers[1]._total_jobs +=1
        enter_service = max(list_of_servers[1]._total_processing_time, 0)
        job.set_enter_service(enter_service)
        job.set_service(list_of_servers[1].get_service_time())
        departure_time = job._enter_service + job._service_time
        job.set_departure(departure_time)
        world.schedule_event(list_of_servers[1].departure, job._departure_time, [job, world])
    
    initial_arrival = random.expovariate(p_arrivals._rate)
    params = [dispatcher, world]
    world.schedule_event(p_arrivals.generate_arrival, initial_arrival, params) #Schedule first arrival to start chain

    last_event = 0
    world.number_of_arr_dep = 0 #resetting the number of events before we start
    # Now we need to schedule the initial arrivals to start the chain of events.
    # Now that each dispatcher has an arrival, we can start looping through events
    while world.next_event() <= float(sim_time): # while the virtual time of next event is less than our simulation time..
        if(list_of_servers[0]._total_jobs<=stopping_n and list_of_servers[1]._total_jobs <= stopping_n):
            break
        last_event = world.next_event()
        world.process_event() # We take the event and process it (running the function(s))
    #for loop to step between while loops (every 10%)while world.next
    #We've reached a stopping state. Record event parameters and print to file
    jobs_ran += world._stats.number_of_jobs # We stopped, we add the number of jobs ran this time to global variable
    recorded_x = list_of_servers[0]._total_jobs
    recorded_y = list_of_servers[1]._total_jobs
    recorded_T = last_event #Last event that happened (e.g. departure that caused the total jobs to be < 4)
    recorded_N = world.number_of_arr_dep #Get the number of events that happened'
    final_data.append((recorded_x, recorded_y, recorded_T, recorded_N))
Beispiel #6
0
from producer import Producer
from consummer import Consummer
from dispatcher import Dispatcher
import queue

main_queue = queue.Queue()
odd_queue = queue.Queue()
even_queue = queue.Queue()
producer = Producer(main_queue)
dispatcher = Dispatcher(main_queue, odd_queue, even_queue)
odd_consummer = Consummer(odd_queue)
even_consummer = Consummer(even_queue)

producer.start()
dispatcher.start()
odd_consummer.start()
#even_consummer.start()
input()
producer.stop()
odd_consummer.stop()
even_consummer.stop()
dispatcher.stop()
Beispiel #7
0
def configure():
    global status, active, reqs_queues, config

    if not config:
        logging.info("reading config from file")
        if not read_config_from_file():
            logging.error("configuration reading error")
            return False
        else:
            logging.info("configuration read from file")

    logging.info("configuration read: " + str(config.__dict__))
    logging.info("Getting models from: %s", config.models_endpoint)
    logging.info("Getting containers from: %s", config.containers_endpoint)

    # init models
    models = [
        Model(json_data=json_model)
        for json_model in get_data(config.models_endpoint)
    ]
    if len(models) > 0:
        logging.info("Models: %s", [model.to_json() for model in models])
    else:
        logging.warning("No models found")

    # init containers
    containers = [
        Container(json_data=json_container)
        for json_container in get_data(config.containers_endpoint)
    ]
    if len(containers) > 0:
        logging.info("Containers: %s",
                     [container.to_json() for container in containers])
    else:
        logging.warning("No containers found")
    logging.info("Found %d models and %d containers", len(models),
                 len(containers))

    # init requests queues
    reqs_queues = {model.name: queue.Queue() for model in models}
    responses_list = {model.name: [] for model in models}

    # init policy
    queues_policies = QueuesPolicies(reqs_queues, responses_list, models,
                                     logging)
    gpu_policy = queues_policies.policies.get(config.gpu_queues_policy)
    cpu_policy = queues_policies.policies.get(config.cpu_queues_policy)
    logging.info("Policy for GPUs: %s", config.gpu_queues_policy)
    logging.info("Policy for CPUs: %s", config.cpu_queues_policy)

    # disable logging if verbose == 0
    logging.info("Verbose: %d", config.verbose)
    if config.verbose == 0:
        app.logger.disabled = True
        logging.getLogger('werkzeug').setLevel(logging.WARNING)

    # init dispatchers
    status = "Init dispatchers"
    logging.info(status)
    dispatcher_gpu = Dispatcher(app.logger, models, containers,
                                DispatchingPolicy.ROUND_ROBIN, Device.GPU)
    dispatcher_cpu = Dispatcher(app.logger, models, containers,
                                DispatchingPolicy.ROUND_ROBIN, Device.CPU)

    # start the send requests thread
    status = "Start send reqs thread"
    logging.info(status)
    log_consumer_threads_pool = ThreadPoolExecutor(
        max_workers=config.max_log_consumers)
    for i in range(config.max_log_consumers):
        log_consumer_threads_pool.submit(log_consumer)

    # start the queues consumer threads
    status = "Start queues consumer threads"
    logging.info(status)

    if list(filter(lambda c: c.device == Device.GPU and c.active, containers)):
        # threads that pools from the apps queues and dispatch to gpus
        polling_gpu_threads_pool = ThreadPoolExecutor(
            max_workers=config.max_polling_threads)
        for i in range(config.max_polling_threads):
            polling_gpu_threads_pool.submit(queues_pooling, dispatcher_gpu,
                                            gpu_policy,
                                            config.max_consumers_gpu)

    if list(filter(lambda c: c.device == Device.CPU and c.active, containers)):
        # threads that pools from the apps queues and dispatch to cpus
        pooling_cpu_threads_pool = ThreadPoolExecutor(
            max_workers=config.max_polling_threads)
        for i in range(config.max_polling_threads):
            pooling_cpu_threads_pool.submit(queues_pooling, dispatcher_cpu,
                                            cpu_policy,
                                            config.max_consumers_cpu)

    status = "active"
    active = True
    logging.info(status)
    return True
Beispiel #8
0
def create_app(
        containers_manager="http://localhost:5001",
        requests_store="http://localhost:5002",
        verbose=1,
        gpu_queues_policy=QueuesPolicy.HEURISTIC_1,
        cpu_queues_policy=QueuesPolicy.ROUND_ROBIN,
        max_log_consumers=1,
        max_polling=1,  # the number of threads waiting for requests
        max_consumers_cpu=100,
        max_consumers_gpu=100):  # the number of concurrent threads requests
    global reqs_queues, requests_store_host, status, gpu_policy, cpu_policy, responses_list
    requests_store_host = requests_store + "/requests"

    # init log
    coloredlogs.install(level='DEBUG', milliseconds=True)
    # log_format = "%(asctime)s:%(levelname)s:%(name)s: %(filename)s:%(lineno)d:%(message)s"
    # logging.basicConfig(level='DEBUG', format=log_format)

    # init models and containers
    status = "Init models and containers"
    logging.info(status)
    models_endpoint = containers_manager + "/models"
    containers_endpoint = containers_manager + "/containers"
    logging.info("Getting models from: %s", models_endpoint)
    logging.info("Getting containers from: %s", containers_endpoint)

    models = [
        Model(json_data=json_model) for json_model in get_data(models_endpoint)
    ]
    logging.info("Models: %s", [model.to_json() for model in models])
    containers = [
        Container(json_data=json_container)
        for json_container in get_data(containers_endpoint)
    ]
    logging.info("Containers: %s",
                 [container.to_json() for container in containers])
    logging.info("Found %d models and %d containers", len(models),
                 len(containers))

    # init reqs queues
    reqs_queues = {model.name: queue.Queue() for model in models}
    responses_list = {model.name: [] for model in models}

    # init policy
    queues_policies = QueuesPolicies(reqs_queues, responses_list, models,
                                     logging)
    gpu_policy = queues_policies.policies.get(gpu_queues_policy)
    cpu_policy = queues_policies.policies.get(cpu_queues_policy)
    logging.info("Policy for GPUs: %s", gpu_queues_policy)
    logging.info("Policy for CPUs: %s", cpu_queues_policy)

    # disable logging if verbose == 0
    logging.info("Verbose: %d", verbose)
    if verbose == 0:
        app.logger.disabled = True
        logging.getLogger('werkzeug').setLevel(logging.WARNING)

    # init dispatchers
    status = "Init dispatchers"
    logging.info(status)
    dispatcher_gpu = Dispatcher(app.logger, models, containers,
                                DispatchingPolicy.ROUND_ROBIN, Device.GPU)
    dispatcher_cpu = Dispatcher(app.logger, models, containers,
                                DispatchingPolicy.ROUND_ROBIN, Device.CPU)

    # start the send requests thread
    status = "Start send reqs thread"
    logging.info(status)
    log_consumer_threads_pool = ThreadPoolExecutor(
        max_workers=max_log_consumers)
    for i in range(max_log_consumers):
        log_consumer_threads_pool.submit(log_consumer)

    # start the queues consumer threads
    status = "Start queues consumer threads"
    logging.info(status)

    if list(filter(lambda c: c.device == Device.GPU and c.active, containers)):
        # threads that pools from the apps queues and dispatch to gpus
        polling_gpu_threads_pool = ThreadPoolExecutor(max_workers=max_polling)
        for i in range(max_polling):
            polling_gpu_threads_pool.submit(queues_pooling, dispatcher_gpu,
                                            gpu_policy, max_consumers_gpu)

    if list(filter(lambda c: c.device == Device.CPU and c.active, containers)):
        # threads that pools from the apps queues and dispatch to cpus
        pooling_cpu_threads_pool = ThreadPoolExecutor(max_workers=max_polling)
        for i in range(max_polling):
            pooling_cpu_threads_pool.submit(queues_pooling, dispatcher_cpu,
                                            cpu_policy, max_consumers_cpu)

    # start
    status = "Running"
    logging.info(status)
    return app
Beispiel #9
0
from commands import CmdNew, CmdPayback, CmdReport, CmdUpdate
from dispatcher import Dispatcher

main_dispatcher = Dispatcher()
main_dispatcher.register_command("new", CmdNew)
main_dispatcher.register_command("report", CmdReport)
main_dispatcher.register_command("payback", CmdPayback)
main_dispatcher.register_command("update", CmdUpdate)


def main():
    while True:
        inp = input("").strip().lower()
        if not inp:
            break
        command, *args = inp.split()
        main_dispatcher.dispatch(command, *args)


if __name__ == "__main__":
    main()
Beispiel #10
0
from flask import Flask, request, abort
from gevent.wsgi import WSGIServer
import config
import json

from dispatcher import Dispatcher
'''
App. Starting server and handling http requests.
'''

app = Flask(__name__)
dispatcher = Dispatcher(api_url=config.METEO_API, token=config.METEO_TOKEN)


# handling message on GET: /forecast
@app.route('/forecast', methods=['GET'])
def handle_forecast():
    if request.method == 'GET':
        query = request.get_json()
        return dispatcher.forecast(query)


@app.route('/current', methods=['GET'])
def handle_current():
    if request.method == 'GET':
        query = request.get_json()
        return dispatcher.current(query)


if __name__ == '__main__':
    server = WSGIServer((config.HOST, config.PORT), app)
Beispiel #11
0
from dispatcher import Dispatcher

app = Dispatcher()


@app.route('/')
def app1(environ, start_response):
    start_response('200 OK', [('Content-Type', 'text/plain')])
    yield 'Hello World!\n'


@app.route('/show/{id}')
def app2(environ, start_response):
    start_response('200 OK', [('Content-Type', 'text/plain')])
    args = environ['dispatcher.args']
    yield 'Showing element %s\n' % args['id']
Beispiel #12
0
def init_controller():
    if not os.path.exists(config.DEMO_CONF_DIR):
        os.makedirs(config.DEMO_CONF_DIR)
    ini_files = ['sys.ini', 'demo.ini', 'jif.ini']

    for file in ini_files:
        if not os.path.isfile(os.path.join(config.DEMO_CONF_DIR, file)):
            if 'sys' in file:
                logger.boot('System ini not found, saving default')
                config.save_default_sys()
            elif 'demo' in file:
                logger.boot('Demo ini not found, saving default')
                config.save_default_demo()
            elif 'jif' in file:
                logger.boot('JIF ini not found, saving default')
                config.save_default_jif()

    configs = config.load_config('sys, demo, jif')

    control = DemoController(configs['dconf'], configs['jconf'],
                             configs['sysconf'])
    control.lock = threading.Lock()

    # Dispatcher setup
    control.dispatcher = Dispatcher(control)
    control.dispatcher.start()

    # Data controller
    control.data_controller = DataController(control.all_targets,
                                             control.reprint_pool,
                                             configs['dconf'][0],
                                             control.exit_dir)
    control.data_controller.setup_workers()

    # File observer setups
    proc_observer = Observer()
    proc_observer.schedule(file_handler.StatusChange(control.proc_queue),
                           control.proc_dir)
    control.observers.append(proc_observer)
    proc_observer.start()
    newjob_observer = Observer()
    newjob_observer.schedule(file_handler.NewJob(control.proc_queue),
                             control.proc_dir)
    control.observers.append(newjob_observer)
    newjob_observer.start()
    reprint_observer = Observer()
    reprint_observer.schedule(file_handler.Reprint(control.proc_queue),
                              control.proc_dir)
    control.observers.append(reprint_observer)
    reprint_observer.start()
    complete_observer = Observer()
    complete_observer.schedule(file_handler.Completed(control.proc_queue),
                               control.proc_dir)
    control.observers.append(complete_observer)
    complete_observer.start()

    # Web setup
    control.clients = aptinterface.cl
    web_thread = threading.Thread(target=init_web_server,
                                  args=(control.sysconf['HTTPServer']['host'],
                                        control.sysconf['HTTPServer']['port']))
    web_thread.start()

    return control
import datetime
import random
import json
import websockets
import time
from uuid import uuid4
from typing import Any, List, Dict
from loguru import logger

from dispatcher import Dispatcher, Websocket
from dispatcher.filters.builtin import EventTypeFilter
from dispatcher.types import WebsocketEvent
from dispatcher.types import Channel, ChannelPool, User, UserPool, Roles

s = Websocket()
dp = Dispatcher(s)
Dispatcher.set_current(dp)


@dp.login_handler()
async def echo(event: WebsocketEvent, data):
    logger.info('login')
    s.ch_pool.add_channel(int(event.body['channelId']))
    user = User.get_current()
    user.avatar = event.body['avatar']
    user.status = event.body['status']

    res = user.move_to_channel(int(event.body['channelId']))
    channel = user.get_channel()
    if (user.id != -1):
        await s.mongo.change_user_list_channel(int(event.body['channelId']), user.to_dict(), True)
Beispiel #14
0
                                'parse_func': sel_attr,
                                'kwargs': {
                                    'attr': 'data-name',
                                },
                            },
                            'lat': {
                                'parse_func': sel_attr,
                                'kwargs': {
                                    'attr': 'data-lat',
                                },
                            },
                            'lng': {
                                'parse_func': sel_attr,
                                'kwargs': {
                                    'attr': 'data-lng',
                                },
                            },
                        },
                    },
                },
            },
        },
    ]
}]

disp = Dispatcher(classes=clss)
results = disp.run()
filename = input('filename:')
with open(filename, 'w') as fle:
    json.dump(results, fle)
Beispiel #15
0
        "-t",
        "--collect_interval",
        help=
        "Set if it is desired to store the entire interval data to desktop",
        action="store_true")

    parser.set_defaults(is_dispatcher=False)
    parser.set_defaults(is_worker=False)

    args = parser.parse_args()
    args.interval_directory = "interval_data\\"
    args.image = None
    df = None

    if args.is_dispatcher:
        dispatcher = Dispatcher(args)
        df = dispatcher.run()
    else:
        benchmark = None

        if args.benchmark == "idle":
            if not args.is_worker:
                benchmark = Benchmark(args)
            else:
                assert (args.address is not None)
                benchmark = ClientBenchmark(args)
        elif args.benchmark == "PowerGadget":
            benchmark = PowerGadget(args)
        elif args.benchmark == "BLA":
            benchmark = BLA(args)
        elif args.benchmark == "IPPET":
Beispiel #16
0
                        type=str,
                        nargs='?',
                        default='query_params.csv',
                        help='No of concurrent workers to run')

    args = parser.parse_args()
    no_of_workers = args.Concurrency
    query_arguments_file = "data/" + args.File
    # Define the queues our application will utilize
    worker_queues = ["queue_" + str(x) for x in range(1, no_of_workers + 1)]
    # Instantiate the queues as a multiprocessing.Queue()
    for worker_queue in worker_queues:
        QUEUES[worker_queue] = multiprocessing.Queue()

    # Will pick queue to send task
    dispatcher = Dispatcher(worker_queues)
    try:
        with open(query_arguments_file) as csv_file:
            csv_reader = csv.reader(csv_file, delimiter=',')
            line_count = 0
            for row in csv_reader:
                if len(row) != 3:
                    exit("Pleases specify the correct format")
                if line_count == 0:
                    line_count += 1
                else:
                    # Select worker responsible for current host
                    worker = dispatcher.select_worker(row[0])
                    queue = QUEUES.get(worker)
                    queue.put(row)
    except Exception as e:
# coding: utf8
from __future__ import print_function

import logging

from dispatcher import Dispatcher
from net_protocol import NetClient
from utils import argparse_worker, read_config

if __name__ == "__main__":
    logging.basicConfig(
        format="%(asctime)s | %(name)s | %(levelname)s | %(message)s")
    logger = logging.getLogger()
    logger.setLevel(logging.DEBUG)

    args, _ = argparse_worker()
    config = read_config(args.settings)

    client_address = config.pop("client_address")
    local_address = (
        client_address.get("host", ""),
        client_address["port"],
    )
    Dispatcher(NetClient, local_address, **config).start()
Beispiel #18
0
 def get(self, request):
     if request.method == 'GET':
         # Initialize Dispatcher
         dispatcher = Dispatcher(request, self.trapi_version)
     return JsonResponse(dispatcher.get_versions())
Beispiel #19
0
                continue
            e.append(self.get_encoded(addr, port, password, method))
            if e not in u:
                u.append(e)
        for i in sorted(u):
            yield i
        # params_zip = params_groups
        # params_zip.sort(key=lambda x: x[0].lower()[0], reverse=True)
        # for i in params_zip:
        #     yield i

    def get_encoded(self, addr, port, password, method):
        pork = "{method}:{password}".format(**locals())
        cooked = pork.encode("base64")
        cooked = cooked.strip("\n")
        # print pork
        # print cooked
        tpl = "ss://{cooked}@{addr}:{port}"
        return tpl.format(**locals())

    def decode(self, encoded):
        base64_en = encoded.split("//")[1].split("@")[0]
        return (base64_en + "=").decode("base64")


if __name__ == '__main__':
    from dispatcher import Dispatcher
    disp = Dispatcher(IShadow)
    for i in disp.iter_params():
        print i
Beispiel #20
0
from telegram.ext import Updater, CommandHandler
from radiator_control import RadiatorControl
from dispatcher import Dispatcher
import config

dispatcher = Dispatcher(config)
updater = Updater(config.token, use_context=True)

updater.dispatcher.add_handler(
    CommandHandler('radiators', dispatcher.radiators))
updater.dispatcher.add_handler(CommandHandler('rooms', dispatcher.rooms))
updater.dispatcher.add_handler(CommandHandler('help', dispatcher.help))
updater.dispatcher.add_handler(CommandHandler('off', dispatcher.off))
updater.dispatcher.add_handler(CommandHandler('auto', dispatcher.auto))
updater.dispatcher.add_handler(CommandHandler('manual', dispatcher.manual))
updater.dispatcher.add_handler(CommandHandler('status', dispatcher.status))

updater.start_polling()
updater.idle()
Beispiel #21
0
def main_loop_parallel():
    color_stdout("Started {0}\n".format(" ".join(sys.argv)), schema='tr_text')

    args = Options().args
    jobs = args.jobs
    if jobs < 1:
        # faster result I got was with 2 * cpu_count
        jobs = 2 * multiprocessing.cpu_count()

    if jobs > 0:
        color_stdout("Running in parallel with %d workers\n\n" % jobs,
                     schema='tr_text')
    randomize = True

    color_stdout("Timeout options:\n", schema='tr_text')
    color_stdout('-' * 19, "\n", schema='separator')
    color_stdout("REPLICATION_SYNC_TIMEOUT:".ljust(26) +
                 "{}\n".format(args.replication_sync_timeout),
                 schema='tr_text')
    color_stdout("TEST_TIMEOUT:".ljust(26) + "{}\n".format(args.test_timeout),
                 schema='tr_text')
    color_stdout("NO_OUTPUT_TIMEOUT:".ljust(26) +
                 "{}\n".format(args.no_output_timeout),
                 schema='tr_text')
    color_stdout("\n", schema='tr_text')

    task_groups = get_task_groups()
    if Options().args.reproduce:
        task_groups = reproduce_task_groups(task_groups)
        jobs = 1
        randomize = False

    dispatcher = Dispatcher(task_groups, jobs, randomize)
    dispatcher.start()

    print_greetings()

    color_stdout("\n", '=' * 86, "\n", schema='separator')
    color_stdout("WORKR".ljust(6), schema='t_name')
    color_stdout("TEST".ljust(48), schema='t_name')
    color_stdout("PARAMS".ljust(16), schema='test_var')
    color_stdout("RESULT\n", schema='test_pass')
    color_stdout('-' * 81, "\n", schema='separator')

    try:
        is_force = Options().args.is_force
        dispatcher.wait()
        dispatcher.wait_processes()
        color_stdout('-' * 81, "\n", schema='separator')
        has_failed = dispatcher.statistics.print_statistics()
        has_undone = dispatcher.report_undone(
            verbose=bool(is_force or not has_failed))
        if has_failed:
            dispatcher.artifacts.save_artifacts()
            return EXIT_FAILED_TEST
        if has_undone:
            return EXIT_NOTDONE_TEST
    except KeyboardInterrupt:
        color_stdout('-' * 81, "\n", schema='separator')
        dispatcher.statistics.print_statistics()
        dispatcher.report_undone(verbose=False)
        raise
    except HangError:
        color_stdout('-' * 81, "\n", schema='separator')
        dispatcher.statistics.print_statistics()
        dispatcher.report_undone(verbose=False)
        return EXIT_HANG
    return EXIT_SUCCESS
Beispiel #22
0
    (https://sourceforge.net/tracker/?func=detail&atid=105470&aid=1230540&group_id=5470).
    Call once from __main__ before creating any threads.
    If using psyco, call psycho.cannotcompile(threading.Thread.run)
    since this replaces a new-style class method.
    """
    import sys
    run_old = threading.Thread.run

    def run(*args, **kwargs):
        try:
            run_old(*args, **kwargs)
        except (KeyboardInterrupt, SystemExit):
            raise
        except:
            sys.excepthook(*sys.exc_info())

    threading.Thread.run = run


install_thread_excepthook()

message_queue = queue.Queue()

consumer = Consumer(token)
db = Database()
dispatch = Dispatcher(message_queue, db, consumer, commands)

gw = Gateway(token, message_queue)
gw.start()
dispatch.start()
Beispiel #23
0
                               'key': 'name',
                               'method': '$addToSet'
                           },
                           db_type='MongoDB',
                           db='drstat',
                           table='courses',
                           attrs=[
                               Attr(name='name',
                                    selector='li.title',
                                    func='sel_text'),
                               Attr(name='text',
                                    selector='#bsocontent p',
                                    func='sel_text',
                                    kws={'as_list': True}),
                           ]),
                  Template(name='next_url',
                           selector='div.next',
                           attrs=[
                               Attr(name='next',
                                    selector='a',
                                    func='sel_attr',
                                    kws={'attr': 'href'},
                                    source=True)
                           ])
              ])
    ])

d = Dispatcher()
d.add_scraper(drstat)
d.run()
Beispiel #24
0
from Queue import Queue
from card_reader import CardReader
from remote_listener import RemoteListener
from dispatcher import Dispatcher

print "Welcome to Jeremy's Jukebox 3.0"
print ''

q = Queue()

CardReader(q).start()
RemoteListener(q).start()
Dispatcher(q, "/mnt/bigdaddy").start()
def start_simulation_state(no_of_jobs_server_1, no_of_jobs_server_2, arrival_rate, job_distribution, given_x, given_y, sim_time):
    list_of_servers = []
    for _ in range(0,2): # Create 2 servers
        scheduler_i = FIFO()
        job_size_i = Expo(job_distribution)
        server_i = Server(job_size_i, scheduler_i)
        list_of_servers.append(server_i)
    # Create dispatcher
    policy_i = ShortestQueue() #Join shortest queue policy
    dispatcher = Dispatcher(policy_i, list_of_servers) #Create a dispatcher with JSQ policy

    statistics = Statistics()
    world = Global(statistics)
    p_arrivals = PoissonArrival(float(arrival_rate))
    arrival = 0

    
    def set_up_servers():
        nonlocal list_of_servers, world
        for job in range(0, int(no_of_jobs_server_1)):
            job = Job(0) #All arrive at time 0
            list_of_servers[0]._total_jobs +=1
            enter_service = max(list_of_servers[0]._total_processing_time, 0) #Takes processing time (time when server is idle) and the time of arrival. Max of those is enter service for this job
            job.set_enter_service(enter_service) #Setting the enter service, first is 0, next is 0+service_1, next is 0+service_1 + service_2.. ...
            job.set_service(list_of_servers[0].get_service_time()) #Generates service time AND increases processing time for server
            departure_time = job._enter_service + job._service_time # Enter service will be correct because of processing time of server bookkeeping
            job.set_departure(departure_time) #Set the departure time as enter service+service time. (Its a timeperiod on the timeline)
            world.schedule_event(list_of_servers[0].departure, job._departure_time, [job, world]) #Schedule this departure to the world queue
        # Now we have some jobs already in the server(s) before we start the main loop of adding arrivals e.t.c.
        for job in range(0, int(no_of_jobs_server_2)):
            job = Job(0)
            list_of_servers[1]._total_jobs +=1
            enter_service = max(list_of_servers[1]._total_processing_time, 0)
            job.set_enter_service(enter_service)
            job.set_service(list_of_servers[1].get_service_time())
            departure_time = job._enter_service + job._service_time
            job.set_departure(departure_time)
            world.schedule_event(list_of_servers[1].departure, job._departure_time, [job, world])
        #cost function is basically sojourn time in these cases.    

    #### IMPORTANT
    # Need here to make a check to see if we're out of bounds before each process_event
    # If we're out of bounds we 'stop' and save the stats.
    # Then reset the 'world' and run again, until the sim_time is over. 
    # May be best to do a sub-routine to 'reset' the world (can be used for both simulation processes)
    
    def reset_world(first_arrival):
        nonlocal list_of_servers, dispatcher, statistics, world # if we want to modify variable from closure, must place as nonlocal
        list_of_servers.clear() #clear the 2 servers
        for _ in range(0,2): # Create 2 servers
            scheduler_i = FIFO()
            job_size_i = Expo(job_distribution) # use job_distr from closure
            server_i = Server(job_size_i, scheduler_i) # create a new server to place in list
            list_of_servers.append(server_i)
        dispatcher = Dispatcher(policy_i, list_of_servers) # resetting the dispatcher with new servers
        statistics = Statistics()
        world = Global(statistics)
        set_up_servers()
        params = [dispatcher, world]
        world.schedule_event(p_arrivals.generate_arrival, first_arrival, params) #Schedule first arrival to start chain
        #Now we have created two new servers, reset them and created a dispatcher with the new servers
        #Then we reset the world(nonlocal) and statistics to get a clean slate
        #Then we called function to set the initial jobs in the servers again (not same jobs!)

    reset_world(0) # Call function to setup our world

    # Now we need to schedule the initial arrivals to start the chain of events.
    for x in range(1, 11):
        # Now that each dispatcher has an arrival, we can start looping through events
        while world.next_event() <= float(sim_time)*(x*0.1): # while the virtual time of next event is less than our simulation time..
            if list_of_servers[0]._total_jobs > int(given_x) or list_of_servers[1]._total_jobs > int(given_y):
                next_arrival_new_world = world.next_event() #Get the time for the first event for new world
                #reset the world here (e.g. clear all stats and world queue, then reset jobs in servers and 'restart')
                print('resetting world')
                world._stats.write_to_file_intermediate_stats('given_end_of_'+given_x+'_and_'+given_y)
                reset_world(next_arrival_new_world) # This function should reset statistics, world event queue, and server states
                #also remember to log the stats before reset.
            world.process_event() # We take the event and process it (running the function(s))
        print("{}%".format(x*10))
    #for loop to step between while loops (every 10%)while world.next

    total_no_jobs = world._stats.number_of_jobs
    print(total_no_jobs)
    world._stats.print_stats()
Beispiel #26
0
 def __init__(self):
     self.size = (1920, 1080)  # default screen size
     self.uitools = UITools()
     self.dispatcher = Dispatcher()
Beispiel #27
0
                                             'regex':
                                             '([A-z0-9\-]+\s*[A-z0-9\-*\s]*)'
                                         }),
                                    Attr(name='date',
                                         selector='td:nth-of-type(5)',
                                         func='sel_text'),
                                    Attr(name='views',
                                         selector='td:nth-of-type(6)',
                                         func='sel_text'),
                                )), )),
        Phase(source_worker=WebSource,
              parser=HTMLParser,
              templates=(Template(name='drug_report',
                                  selector='',
                                  db_type='mongo_db',
                                  db='erowid',
                                  table='drug_report',
                                  attrs=(
                                      Attr(name='text',
                                           selector='.report-text-surround',
                                           func='sel_text'),
                                      Attr(name='weight',
                                           selector='td.bodyweight-amount',
                                           func='sel_text'),
                                  )), )),
    ])

disp = Dispatcher()
disp.add_scraper(erowid)
disp.run()
Beispiel #28
0
scene.add_object(Sphere(10, x=50, y=30, scale_z=1.5, color=Color(r=255)))
scene.add_object(Sphere(3, z=20, scale_x=5, color=Color(g=255)))
scene.add_object(Sphere(5, color=Color(r=255, b=255)).stretch('x', angle_y=60))
scene.add_object(Plane(z=-10, color=Color(255, 255, 255)))
scene.add_object(Plane(z=80, color=Color(255, 255, 255)))
scene.add_object(Plane(x=-80, rot_y=90, color=Color(b=255)))
scene.add_object(Plane(x=80, rot_y=90, color=Color(b=255)))
scene.add_object(Plane(y=100, rot_x=90, color=Color(r=255, g=255)))
scene.add_object(Cylinder(5, x=-80, y=100, color=Color(b=255)))
scene.add_object(Cylinder(5, x=80, y=100, color=Color(b=255)))
scene.add_object(Cone(10, color=Color(r=128, g=128, b=128)))

nb_lines_chunk = 50
dispatch = Dispatcher(61385,
                      set(i for i in range(height) if i % nb_lines_chunk == 0),
                      key_fmt='i')

dispatch.welcome = bytes(scene)

data = {}


@dispatch.result
def result(ys, chunk):
    data[ys] = chunk


# Lumière: ensemble de 2 composants:
# - méthode C de calcul de la couleur en un point d'un objet
# - méthode I de calcul d'intensité en un point (renvoie un nombre positif)
Beispiel #29
0
                _id = tokens[2]
                origin = deserialize_location(tokens[3])
                destination = deserialize_location(tokens[4])
                patience = int(tokens[5])
                status = 'waiting'
                rider = Rider(_id, status, patience, origin, destination)
                event = RiderRequest(timestamp, rider)

            events.append(event)
    return events


if __name__ == "__main__":
    import doctest
    doctest.testmod()
    dispatcher = Dispatcher()
    monitor = Monitor()
    rider_Bathe = Rider('Bathe', WAITING, 5, Location(1, 2), Location(5, 8))
    driver_Atom = Driver('Atom', Location(0, 0), 1)
    rider_rq = RiderRequest(0, rider_Bathe)
    print(rider_rq)
    cancellation = rider_rq.do(dispatcher, monitor)[0]
    driver_rq = DriverRequest(1, driver_Atom)
    print(driver_rq)
    pickup = driver_rq.do(dispatcher, monitor)[0]
    print(pickup)
    print(cancellation)
    dropoff = pickup.do(dispatcher, monitor)[0]
    print(dropoff)
    driver_rq1 = dropoff.do(dispatcher, monitor)[0]
    print(driver_rq1)
Beispiel #30
0
    queue_extraced = queue.Queue()
    queue_av = queue.Queue()
    queue_hash = queue.Queue()
    queue_hashed = queue.Queue()
    queue_ext_path = queue.Queue()
    queue_csv = queue.Queue()
    queue_csved = queue.Queue()
    queue_blk = queue.Queue()
    queue_mem = queue.Queue()
    queue_memed = queue.Queue()
    queue_rslt = queue.Queue()
    queue_elastic = queue.Queue()

    see = Seeker(queue_dis, IN_DIR, BASE_NAME, CHECK_TIME)
    dis = Dispatcher(queue_dis, queue_extrac, queue_extraced, queue_ext_path,
                     queue_av, queue_hash, queue_hashed, queue_csv,
                     queue_csved, queue_blk, queue_mem, queue_memed,
                     queue_elastic, IN_DIR, WORK_DIR, OUT_DIR, DIR_OUT)
    has = Hasher(queue_hash, queue_hashed, IN_DIR, WORK_DIR, BLOCK_SIZE_HASH)
    ext = Extractor(queue_extrac, queue_extraced, queue_ext_path, IN_DIR,
                    WORK_DIR)
    csv = Csver(queue_csv, queue_csved, WORK_DIR, OUT_DIR)
    blk = Bulker(queue_blk, queue_extraced, WORK_DIR, OUT_DIR)
    mem = Memer(queue_mem, queue_extraced, IN_DIR, WORK_DIR, OUT_DIR)
    #tim = Timeliner(queue_extrac,WORK_DIR,OUT_DIR)
    avc = Avcheck(queue_av, WORK_DIR, OUT_DIR)
    #elas = Elasticer(queue_elastic,WORK_DIR,OUT_DIR)

    see.start()
    dis.start()
    has.start()
    ext.start()