def event_analysis():
    """
    Event analysis process. It fetches all the event in the database and analyse the description & website and
    then create all the related features
    """
    event_analysis = EventAnalysis()

    # Store all available website and avoid parsing a website several times
    websites = dict(dict())

    #  Contains the list of key-word with tree tagger
    description_tree_tagger = dict()
    website_tree_tagger = dict()

    events = Event.objects.all()

    if len(events) == 0:
        return

    nb_core = cpu_count()
    nb_events = len(events)
    nb_events_thread = nb_events/nb_core
    events_thread = []

    for i in range(nb_core-1):
        events_thread.append(events[i*nb_events_thread:(i+1)*nb_events_thread])
    events_thread.append(events[(nb_core-1)*nb_events_thread:])

    # Fulfill the corpus
    start_threads(nb_core, event_analysis_fulfill_corpus,
                  events_thread, event_analysis, websites, description_tree_tagger, website_tree_tagger)

    #Monothread - event_analysis_fulfill_corpus(event_analysis, websites, description_tree_tagger, website_tree_tagger, events)

    event_analysis.set_corpus_complete()

    # We compute the tf-idf of the key word in the description and in the website if exists
    start_threads(nb_core, event_analysis_compute_tf_idf,
                  events_thread, event_analysis, websites, description_tree_tagger, website_tree_tagger)

    #Monothread - event_analysis_compute_tf_idf(event_analysis, websites, description_tree_tagger, website_tree_tagger, events)

    # We fetch the k most important tags by event
    job_queue = JobQueue()
    job_queue.start()
    start_threads(nb_core, event_analysis_fetch_k_most_important_features_and_push_database,
                  events_thread, job_queue, event_analysis, websites)
    job_queue.finish()

    #Monothread - event_analysis_fetch_k_most_important_features_and_push_database(None, event_analysis, websites, events)

    compute_statistics(events, description_tree_tagger, website_tree_tagger)
Beispiel #2
0
 def run(self):
     queue = JobQueue()
     utils = GraphUtils()
     builder = GraphBuilder()
     nm = NotificationMultimap()
     while True:
         # Main loop
         while queue.isLocked():
             sleep(0.1)
         item = None
         if queue:
             try:
                 item = queue.get()
                 graph = builder.build(item)
                 citem = self.__getCanonicalItem__(item)
                 utils.write_gexf(graph, os.path.join("static", citem + ".gexf"))
                 #utils.write_json(graph, os.path.join("static", citem + ".json"))
                 self.__logger__.info("Graph made for "+item)
                 # Notify registered Twitter users
                 twitter_usernames = nm[item]
                 for user in twitter_usernames:
                     try:
                         answer = Lookup().pushNotification(user)
                     except IOError, e:
                         self.__logger__.error(str(e))
             except Exception, e:
                 if item:
                     queue.put(item)
                 self.__logger__.error(str(e))
         else:
             #Wait 1s
             sleep(1)
             #Read the queue directory again if the queue is empty
             queue = JobQueue()
Beispiel #3
0
def create_job_queue():
    data = json.loads(request.data, encoding="utf-8")
    # Checks
    CONFIG_CREATE_JOB_QUEUE.validate(data)
    if data["jobQueueName"] in job_queues:
        abort(400, "Job queue already exists.")
    for ce in data["computeEnvironmentOrder"]:
        if ce["computeEnvironment"] not in compute_environments:
            abort(400, f"Compute environment {ce} does not exist")
        if compute_environments[ce["computeEnvironment"]].state != ComputeEnvironment.STATE_ENABLED:
            abort(400, f"Compute environment {ce} is not enabled.")
    if not (0 < len(data["computeEnvironmentOrder"]) < 3):
        abort(400, f"Invalid number ({len(data['computeEnvironmentOrder'])}) of compute environments selected")
    orders = set()
    for ce in data["computeEnvironmentOrder"]:
        if ce["order"] in orders:
            abort(400, f"Two compute environments have the same order.")
        orders.add(ce["order"])
    # Action
    new_job_queue = JobQueue(**data)
    job_queues[data["jobQueueName"]] = new_job_queue
    return jsonify(new_job_queue.describe(everything=False))
def main():
    for test in glob("tests/*[!*.a]"):
        print(f"Running {test}")
        with open(test, "r") as test_fh:
            test_input = test_fh.read().strip()
            sys.stdin = io.StringIO(test_input)
        with open(f"{test}.a") as test_answer_fh:
            test_answer = test_answer_fh.read().strip()

        try:
            job_queue = JobQueue()
            job_queue.solve(write=False)
            test_output = "\n".join([
                f"{assigned_worker} {start_time}"
                for assigned_worker, start_time in zip(
                    job_queue.assigned_workers, job_queue.start_times)
            ])
            assert test_output.strip() == test_answer
        except AssertionError:
            print(
                f"AssertionError at {test}:\n    input: {test_input}\n    expected output: {test_answer}\n    actual output: {test_output}"
            )
            break
 def __init__(self, cert_file, addr, logger, err_logger=None, timeout=10, idle_timeout=3600, white_list=()):
     self.logger = logger
     self.logger_err = err_logger or self.logger
     self.engine = SSLSocketEngine(io_poll.get_poll(), cert_file=cert_file, is_blocking=True)
     self.engine.set_logger(logger)
     self.engine.set_timeout(rw_timeout=timeout, idle_timeout=idle_timeout)
     self.inf_sock = None
     self.addr = addr
     self.jobqueue = JobQueue(logger)
     self.is_running = False
     self.ip_dict = dict()
     for ip in white_list:
         self.ip_dict[ip] = None
     self.rpc_handles = RPC_ServerHandle()
Beispiel #6
0
    def submitJob(self, job):
        Q = JobQueue.getQueueObject(self.connection)

        worker = Worker(Q.submitJob, job=job, task="submit")

        # make
        # worker = Worker(self.__submit, job)
        worker.signals.updateStarted.connect(self.updateStarted)
        worker.signals.progress.connect(self.writeLog)
        worker.signals.jobSubmitted.connect(self.jobTableModel.addJob)
        worker.signals.updateFinished.connect(self.updateFinished)

        self.jobsPending = True
        self.threadpool.start(worker)
Beispiel #7
0
def event_analysis():
    """
    Event analysis process. It fetches all the event in the database and analyse the description & website and
    then create all the related features
    """
    event_analysis = EventAnalysis()

    # Store all available website and avoid parsing a website several times
    websites = dict(dict())

    #  Contains the list of key-word with tree tagger
    description_tree_tagger = dict()
    website_tree_tagger = dict()

    events = Event.objects.all()

    if len(events) == 0:
        return

    nb_core = cpu_count()
    nb_events = len(events)
    nb_events_thread = nb_events / nb_core
    events_thread = []

    for i in range(nb_core - 1):
        events_thread.append(events[i * nb_events_thread:(i + 1) *
                                    nb_events_thread])
    events_thread.append(events[(nb_core - 1) * nb_events_thread:])

    # Fulfill the corpus
    start_threads(nb_core, event_analysis_fulfill_corpus, events_thread,
                  event_analysis, websites, description_tree_tagger,
                  website_tree_tagger)

    #Monothread - event_analysis_fulfill_corpus(event_analysis, websites, description_tree_tagger, website_tree_tagger, events)

    event_analysis.set_corpus_complete()

    # We compute the tf-idf of the key word in the description and in the website if exists
    start_threads(nb_core, event_analysis_compute_tf_idf, events_thread,
                  event_analysis, websites, description_tree_tagger,
                  website_tree_tagger)

    #Monothread - event_analysis_compute_tf_idf(event_analysis, websites, description_tree_tagger, website_tree_tagger, events)

    # We fetch the k most important tags by event
    job_queue = JobQueue()
    job_queue.start()
    start_threads(
        nb_core,
        event_analysis_fetch_k_most_important_features_and_push_database,
        events_thread, job_queue, event_analysis, websites)
    job_queue.finish()

    #Monothread - event_analysis_fetch_k_most_important_features_and_push_database(None, event_analysis, websites, events)

    compute_statistics(events, description_tree_tagger, website_tree_tagger)
Beispiel #8
0
 def __init__(self,
              addr,
              client_keys,
              logger,
              err_logger=None,
              timeout=10,
              idle_timeout=3600,
              block_size=128):
     """ client_keys :  dict(ip=>key) or str """
     self.logger = logger
     self.logger_err = err_logger or self.logger
     self.engine = TCPSocketEngine(io_poll.get_poll(), is_blocking=True)
     self.engine.set_logger(logger)
     self.engine.set_timeout(rw_timeout=timeout,
                             idle_timeout=idle_timeout)
     assert isinstance(client_keys, dict)
     self.client_keys = client_keys
     self.inf_sock = None
     self.addr = addr
     self.jobqueue = JobQueue(logger)
     self.is_running = False
     self.ip_dict = dict()
     self.block_size = block_size
     self.rpc_handles = RPC_ServerHandle()
Beispiel #9
0
    def syncJobs(self):
        # Start a worker that calls the __connect function and
        # synchronizes the jobs running on the server

        Q = JobQueue.getQueueObject(self.connection)

        # connect to host
        worker = Worker(Q.syncJobs, task="sync")
        worker.signals.updateStarted.connect(self.updateStarted)
        worker.signals.progress.connect(self.writeLog)
        worker.signals.jobsSynced.connect(self.jobTableModel.setJobs)
        worker.signals.updateFinished.connect(self.updateFinished)

        self.threadpool.start(worker)
        self.updateJobsPending()
Beispiel #10
0
 def __init__(self,
              cert_file,
              addr,
              logger,
              err_logger=None,
              timeout=10,
              idle_timeout=3600,
              white_list=()):
     self.logger = logger
     self.logger_err = err_logger or self.logger
     self.engine = SSLSocketEngine(io_poll.get_poll(),
                                   cert_file=cert_file,
                                   is_blocking=True)
     self.engine.set_logger(logger)
     self.engine.set_timeout(rw_timeout=timeout,
                             idle_timeout=idle_timeout)
     self.inf_sock = None
     self.addr = addr
     self.jobqueue = JobQueue(logger)
     self.is_running = False
     self.ip_dict = dict()
     for ip in white_list:
         self.ip_dict[ip] = None
     self.rpc_handles = RPC_ServerHandle()
 def __init__(self, addr, client_keys, logger, err_logger=None, timeout=10, idle_timeout=3600, block_size=128):
     """ client_keys :  dict(ip=>key) or str """
     self.logger = logger
     self.logger_err = err_logger or self.logger
     self.engine = TCPSocketEngine(io_poll.get_poll(), is_blocking=True)
     self.engine.set_logger(logger)
     self.engine.set_timeout(rw_timeout=timeout, idle_timeout=idle_timeout)
     assert isinstance(client_keys, dict)
     self.client_keys = client_keys
     self.inf_sock = None
     self.addr = addr
     self.jobqueue = JobQueue(logger)
     self.is_running = False
     self.ip_dict = dict()
     self.block_size = block_size
     self.rpc_handles = RPC_ServerHandle()
Beispiel #12
0
    def deleteJob(self, jobID):
        try:
            #print("joblist -> DELETE JOB")

            # part of the queue => this should use Factory design
            Q = JobQueue.getQueueObject(self.connection)

            worker = Worker(Q.deleteJob, jobID=jobID, task="delete")
            worker.signals.updateStarted.connect(self.updateStarted)
            worker.signals.progress.connect(self.writeLog)
            worker.signals.jobDeleted.connect(self.jobTableModel.deleteJob)
            worker.signals.updateFinished.connect(self.updateFinished)

            # Execute job
            self.threadpool.start(worker)
        except:
            self.msgSignal.emit({
                'connectionID': self.connectionID,
                'jobID': jobID,
                'message': str(sys.exc_info()[1]),
                'messageType': 'ERROR'
            })
Beispiel #13
0
def main():
    """
    Main command-line execution loop.
    """
    try:
        # Parse command line options
        parser, options, arguments = parse_options()

        # Handle regular args vs -- args
        arguments = parser.largs
        remainder_arguments = parser.rargs

        # Update env with any overridden option values
        # NOTE: This needs to remain the first thing that occurs
        # post-parsing, since so many things hinge on the values in env.
        for option in env_options:
            state.env[option.dest] = getattr(options, option.dest)

        # Handle --hosts, --roles, --exclude-hosts (comma separated string =>
        # list)
        for key in ['hosts', 'roles', 'exclude_hosts']:
            if key in state.env and isinstance(state.env[key], basestring):
                state.env[key] = state.env[key].split(',')

        # Handle output control level show/hide
        update_output_levels(show=options.show, hide=options.hide)

        # Handle version number option
        if options.show_version:
            print("Fabric %s" % state.env.version)
            sys.exit(0)

        # Load settings from user settings file, into shared env dict.
        state.env.update(load_settings(state.env.rcfile))

        # Find local fabfile path or abort
        fabfile = find_fabfile()
        if not fabfile and not remainder_arguments:
            abort("""Couldn't find any fabfiles!

Remember that -f can be used to specify fabfile path, and use -h for help.""")

        # Store absolute path to fabfile in case anyone needs it
        state.env.real_fabfile = fabfile

        # Load fabfile (which calls its module-level code, including
        # tweaks to env values) and put its commands in the shared commands
        # dict
        if fabfile:
            docstring, callables, default = load_fabfile(fabfile)
            state.commands.update(callables)

        # Handle case where we were called bare, i.e. just "fab", and print
        # a help message.
        actions = (options.list_commands, options.shortlist, options.display,
            arguments, remainder_arguments, default)
        if not any(actions):
            parser.print_help()
            sys.exit(1)

        # Abort if no commands found
        if not state.commands and not remainder_arguments:
            abort("Fabfile didn't contain any commands!")

        # Now that we're settled on a fabfile, inform user.
        if state.output.debug:
            if fabfile:
                print("Using fabfile '%s'" % fabfile)
            else:
                print("No fabfile loaded -- remainder command only")

        # Shortlist is now just an alias for the "short" list format;
        # it overrides use of --list-format if somebody were to specify both
        if options.shortlist:
            options.list_format = 'short'
            options.list_commands = True

        # List available commands
        if options.list_commands:
            print("\n".join(list_commands(docstring, options.list_format)))
            sys.exit(0)

        # Handle show (command-specific help) option
        if options.display:
            display_command(options.display)

        # If user didn't specify any commands to run, show help
        if not (arguments or remainder_arguments or default):
            parser.print_help()
            sys.exit(0)  # Or should it exit with error (1)?

        # Parse arguments into commands to run (plus args/kwargs/hosts)
        commands_to_run = parse_arguments(arguments)

        # Parse remainders into a faux "command" to execute
        remainder_command = parse_remainder(remainder_arguments)

        # Figure out if any specified task names are invalid
        unknown_commands = []
        for tup in commands_to_run:
            if crawl(tup[0], state.commands) is None:
                unknown_commands.append(tup[0])

        # Abort if any unknown commands were specified
        if unknown_commands:
            abort("Command(s) not found:\n%s" \
                % indent(unknown_commands))

        # Generate remainder command and insert into commands, commands_to_run
        if remainder_command:
            r = '<remainder>'
            state.commands[r] = lambda: api.run(remainder_command)
            commands_to_run.append((r, [], {}, [], [], []))

        # Ditto for a default, if found
        if not commands_to_run and default:
            commands_to_run.append((default.name, [], {}, [], [], []))

        if state.output.debug:
            names = ", ".join(x[0] for x in commands_to_run)
            print("Commands to run: %s" % names)

        # Import multiprocessing if needed, erroring out usefully if it can't.
        if state.env.parallel or _parallel_tasks(commands_to_run):
            try:
                import multiprocessing
            except ImportError, e:
                msg = "At least one task needs to be run in parallel, but the\nmultiprocessing module cannot be imported:"
                msg += "\n\n\t%s\n\n" % e
                msg += "Please make sure the module is installed or that the above ImportError is\nfixed."
                abort(msg)

        # At this point all commands must exist, so execute them in order.
        for name, args, kwargs, cli_hosts, cli_roles, cli_exclude_hosts in commands_to_run:
            # Get callable by itself
            task = crawl(name, state.commands)
            # Set current task name (used for some error messages)
            state.env.command = name
            # Set host list (also copy to env)
            state.env.all_hosts = hosts = get_hosts(
                task, cli_hosts, cli_roles, cli_exclude_hosts)

            # Get pool size for this task
            pool_size = _get_pool_size(task, hosts)
            # Set up job queue in case parallel is needed
            jobs = JobQueue(pool_size)
            if state.output.debug:
                jobs._debug = True

            # If hosts found, execute the function on each host in turn
            for host in hosts:
                # Preserve user
                prev_user = state.env.user
                # Split host string and apply to env dict
                username, hostname, port = interpret_host_string(host)
                # Log to stdout
                if state.output.running and not hasattr(task, 'return_value'):
                    print("[%s] Executing task '%s'" % (host, name))

                # Handle parallel execution
                if requires_parallel(task):
                    # Grab appropriate callable (func or instance method)
                    to_call = task
                    if hasattr(task, 'run') and callable(task.run):
                        to_call = task.run
                    # Wrap in another callable that nukes the child's cached
                    # connection object, if needed, to prevent shared-socket
                    # problems.
                    def inner(*args, **kwargs):
                        key = normalize_to_string(state.env.host_string)
                        state.connections.pop(key, "")
                        try:
                            to_call(*args, **kwargs)
                        except SystemExit, e:
                            if not state.env.skip_on_failure:
                                raise
                    ## Stuff into Process wrapper
                    p = multiprocessing.Process(target=inner, args=args,
                        kwargs=kwargs)
                    # Name/id is host string
                    p.name = state.env.host_string
                    # Add to queue
                    jobs.append(p)
                # Handle serial execution
                else:
                    try:
                        _run_task(task, args, kwargs)
                    except SystemExit, e:
                        if not state.env.skip_on_failure:
                            raise

                # Put old user back
                state.env.user = prev_user
Beispiel #14
0
from flask import Flask, request, jsonify
from werkzeug.exceptions import HTTPException
from interface import PotholeEvent
from job_queue import JobQueue
import json
import glob
from utility import fakes_pothole_obj
import os

app = Flask(__name__)
job_queue = JobQueue(verbose=True)


@app.route('/')
def hello_world():
    return 'Hello, World!'


@app.route('/test/<hash>', methods=['GET'])
def test(hash):
    for i in range(5):
        job_queue.add(hash)
    print("End test main")
    return jsonify(hash=hash, success=True)


@app.route('/analyze', methods=['POST'])
@app.route('/analyze/<path:json_filename>', methods=['GET'])
def analyze(json_filename=None):
    # if GET and File
    if json_filename is not None:
    class SSL_RPC_Server(object):

        logger = None

        def __init__(self, cert_file, addr, logger, err_logger=None, timeout=10, idle_timeout=3600, white_list=()):
            self.logger = logger
            self.logger_err = err_logger or self.logger
            self.engine = SSLSocketEngine(io_poll.get_poll(), cert_file=cert_file, is_blocking=True)
            self.engine.set_logger(logger)
            self.engine.set_timeout(rw_timeout=timeout, idle_timeout=idle_timeout)
            self.inf_sock = None
            self.addr = addr
            self.jobqueue = JobQueue(logger)
            self.is_running = False
            self.ip_dict = dict()
            for ip in white_list:
                self.ip_dict[ip] = None
            self.rpc_handles = RPC_ServerHandle()

        def add_handle(self, func):
            self.rpc_handles.add_handle(func)
            self.logger.debug("added handle: %s" % str(func))

        def add_view(self, obj):
            for name in dir(obj):
                method = getattr(obj, name)
                if callable(method) and hasattr(method, 'func_name'):
                    if method.func_name.find("__") == 0:
                        continue
                    self.add_handle(method)


        def start(self, worker_num):
            if self.is_running:
                return
            self.jobqueue.start_worker(worker_num)
            self.logger.info("jq started")
            self.inf_sock = self.engine.listen_addr_ssl(self.addr, readable_cb=self._server_handle, new_conn_cb=self._check_ip)
            self.logger.info("server started")
            self.is_running = True


        def stop(self):
            if not self.is_running:
                return
            self.engine.unlisten(self.inf_sock)
            self.logger.info("server stopped")
            self.jobqueue.stop()
            self.logger.info("job_queue stopped")
            self.is_running = False

        def poll(self, timeout=10):
            self.engine.poll(timeout)

        def loop(self):
            while self.is_running:
                self.poll()

        def _check_ip(self, sock, *args):
            peer = sock.getpeername()
            if len(peer) == 2 and self.ip_dict:
                if self.ip_dict.has_key(peer[0]):
                    return sock
                return None
            return sock

        def _server_handle(self, conn):
            sock = conn.sock
            head = None
            try:
                head = NetHead.read_head(sock)
            except socket.error:
                self.engine.close_conn(conn)
                return
            except Exception, e:
                self.logger_err.exception(e)
                self.engine.close_conn(conn)
                return
            try:
                if head.body_len <= 0: 
                    self.logger.error("from peer: %s, zero len head received" % (conn.peer))
                    self.engine.close_conn(conn)
                    return 
                buf = head.read_data(sock)
                req = RPC_Req.deserialize(buf)
                job = InteractJob(self, conn, req)
                self.engine.remove_conn(conn)
                self.jobqueue.put_job(job)
                self.logger.info("peer %s, new req %s enqueue" % (conn.peer, str(req)))
            except Exception, e:
                self.logger_err.exception(e)
                self.engine.close_conn(conn)
                return
    class AES_RPC_Server(object):

        logger = None

        def __init__(self, addr, client_keys, logger, err_logger=None, timeout=10, idle_timeout=3600, block_size=128):
            """ client_keys :  dict(ip=>key) or str """
            self.logger = logger
            self.logger_err = err_logger or self.logger
            self.engine = TCPSocketEngine(io_poll.get_poll(), is_blocking=True)
            self.engine.set_logger(logger)
            self.engine.set_timeout(rw_timeout=timeout, idle_timeout=idle_timeout)
            assert isinstance(client_keys, dict)
            self.client_keys = client_keys
            self.inf_sock = None
            self.addr = addr
            self.jobqueue = JobQueue(logger)
            self.is_running = False
            self.ip_dict = dict()
            self.block_size = block_size
            self.rpc_handles = RPC_ServerHandle()

        def get_key(self, peer):
            return self.client_keys.get(peer[0])

        def add_handle(self, func):
            self.rpc_handles.add_handle(func)
            self.logger.debug("added handle: %s" % str(func))

        def add_view(self, obj):
            for name in dir(obj):
                method = getattr(obj, name)
                if callable(method) and hasattr(method, 'func_name'):
                    if method.func_name.find("__") == 0:
                        continue
                    self.add_handle(method)

        def start(self, worker_num):
            if self.is_running:
                return
            self.jobqueue.start_worker(worker_num)
            self.logger.info("jq started")
            self.inf_sock = self.engine.listen_addr(self.addr, readable_cb=self._server_handle, new_conn_cb=self._check_ip)
            self.logger.info("server started")
            self.is_running = True


        def stop(self):
            if not self.is_running:
                return
            self.engine.unlisten(self.inf_sock)
            self.logger.info("server stopped")
            self.jobqueue.stop()
            self.logger.info("job_queue stopped")
            self.is_running = False

        def poll(self, timeout=10):
            self.engine.poll(timeout)

        def loop(self):
            while self.is_running:
                self.poll()

        def _check_ip(self, sock, *args):
            peer = sock.getpeername()
            if self.get_key(peer):
                return sock
            self.logger_err.warn("block %s" % (str(peer)))

        def _server_handle(self, conn):
            sock = conn.sock
            head = None
            try:
                head = NetHead.read_head(sock)
            except socket.error:
                self.engine.close_conn(conn)
                return
            except Exception, e:
                self.logger_err.exception(e)
                self.engine.close_conn(conn)
                return
            key = self.get_key(conn.peer)
            try:
                if head.body_len < self.block_size: 
                    self.logger.error("from peer: %s, smaller than block_size head received" % (str(conn.peer)))
                    self.engine.close_conn(conn)
                    return 
                buf = head.read_data(sock)
                iv = buf[0:self.block_size]
                buf = buf[self.block_size:]
                crypter_r = AESCryptor(key, iv, self.block_size)
                crypter_w = AESCryptor(key, iv, self.block_size)
                req = RPC_Req.deserialize(crypter_r.decrypt(buf))
                job = AESInteractJob(self, conn, req, crypter_w)
                self.engine.remove_conn(conn)
                self.jobqueue.put_job(job)
                self.logger.info("peer %s, new req %s enqueue" % (conn.peer, str(req)))
            except Exception, e:
                self.logger_err.exception(e)
                self.engine.close_conn(conn)
                return
Beispiel #17
0
from job_queue import JobQueue

app = Flask(__name__, static_url_path='/static')
app.config.from_envvar('POGOJIG_SETTINGS')


class UploadForm(FlaskForm):
    upload_file = FileField(validators=[DataRequired()])


class ResetForm(FlaskForm):
    pass


job_queue = JobQueue(app.config['JOB_QUEUE_DB'])


def tempfile_path(namespace):
    """ Return a path for a per-session temporary file identified by the given namespace. Create the session tempfile
    dir if necessary. The application tempfile dir is controlled via the upload_path config value and not managed by
    this function. """
    sess_tmp = path.join(app.config['UPLOAD_PATH'], session['session_id'])
    os.makedirs(sess_tmp, exist_ok=True)
    return path.join(sess_tmp, namespace)


def require_session_id(fun):
    @wraps(fun)
    def wrapper(*args, **kwargs):
        if 'session_id' not in session:
Beispiel #18
0
import itertools

from job_queue import JobQueue


if __name__ == '__main__':
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('queue', help='job queue sqlite3 database file')
    parser.add_argument('--loglevel', '-l', default='info')
    args = parser.parse_args()

    numeric_level = getattr(logging, args.loglevel.upper(), None)
    if not isinstance(numeric_level, int):
        raise ValueError('Invalid log level: %s' % loglevel)
    logging.basicConfig(level=numeric_level)

    job_queue = JobQueue(args.queue)
    
    signal.signal(signal.SIGALRM, lambda *args: None) # Ignore incoming alarm signals while processing jobs
    signal.setitimer(signal.ITIMER_REAL, 0.001, 1)
    while signal.sigwait([signal.SIGALRM, signal.SIGINT]) == signal.SIGALRM:
        logging.debug('Checking for jobs')
        for job in job_queue.job_iter('render'):
            logging.info(f'Processing {job.type} job {job.id} session {job["session_id"]} from {job.client} submitted {job.created}')
            with job:
                job.result = subprocess.call(['sudo', '/usr/local/sbin/pogojig_generate.sh', job['session_id']])
                logging.info(f'Finishied processing {job.type} job {job.id}')
    logging.info('Caught SIGINT. Exiting.')

Beispiel #19
0
    class SSL_RPC_Server(object):

        logger = None

        def __init__(self,
                     cert_file,
                     addr,
                     logger,
                     err_logger=None,
                     timeout=10,
                     idle_timeout=3600,
                     white_list=()):
            self.logger = logger
            self.logger_err = err_logger or self.logger
            self.engine = SSLSocketEngine(io_poll.get_poll(),
                                          cert_file=cert_file,
                                          is_blocking=True)
            self.engine.set_logger(logger)
            self.engine.set_timeout(rw_timeout=timeout,
                                    idle_timeout=idle_timeout)
            self.inf_sock = None
            self.addr = addr
            self.jobqueue = JobQueue(logger)
            self.is_running = False
            self.ip_dict = dict()
            for ip in white_list:
                self.ip_dict[ip] = None
            self.rpc_handles = RPC_ServerHandle()

        def add_handle(self, func):
            self.rpc_handles.add_handle(func)
            self.logger.debug("added handle: %s" % str(func))

        def add_view(self, obj):
            for name in dir(obj):
                method = getattr(obj, name)
                if callable(method) and hasattr(method, 'func_name'):
                    if method.func_name.find("__") == 0:
                        continue
                    self.add_handle(method)

        def start(self, worker_num):
            if self.is_running:
                return
            self.jobqueue.start_worker(worker_num)
            self.logger.info("jq started")
            self.inf_sock = self.engine.listen_addr_ssl(
                self.addr,
                readable_cb=self._server_handle,
                new_conn_cb=self._check_ip)
            self.logger.info("server started")
            self.is_running = True

        def stop(self):
            if not self.is_running:
                return
            self.engine.unlisten(self.inf_sock)
            self.logger.info("server stopped")
            self.jobqueue.stop()
            self.logger.info("job_queue stopped")
            self.is_running = False

        def poll(self, timeout=10):
            self.engine.poll(timeout)

        def loop(self):
            while self.is_running:
                self.poll()

        def _check_ip(self, sock, *args):
            peer = sock.getpeername()
            if len(peer) == 2 and self.ip_dict:
                if self.ip_dict.has_key(peer[0]):
                    return sock
                return None
            return sock

        def _server_handle(self, conn):
            sock = conn.sock
            head = None
            try:
                head = NetHead.read_head(sock)
            except socket.error:
                self.engine.close_conn(conn)
                return
            except Exception, e:
                self.logger_err.exception(e)
                self.engine.close_conn(conn)
                return
            try:
                if head.body_len <= 0:
                    self.logger.error("from peer: %s, zero len head received" %
                                      (conn.peer))
                    self.engine.close_conn(conn)
                    return
                buf = head.read_data(sock)
                req = RPC_Req.deserialize(buf)
                job = InteractJob(self, conn, req)
                self.engine.remove_conn(conn)
                self.jobqueue.put_job(job)
                self.logger.info("peer %s, new req %s enqueue" %
                                 (conn.peer, str(req)))
            except Exception, e:
                self.logger_err.exception(e)
                self.engine.close_conn(conn)
                return
Beispiel #20
0
####################################################################
parser = argparse.ArgumentParser(description="Automate Todoist workflows.")
parser.add_argument("--loglevel", dest="loglevel", nargs=1, help="set a log level")
args = parser.parse_args()

# If the user specified a log level, use it.
if args.loglevel is not None:
    loglevel, *rest = args.loglevel
    ch.setLevel(loglevel.upper())
# Register the console handler with the logger.
logger.addHandler(ch)

# Setup.
user = todoist.login_with_api_token(API_TOKEN)
logger.info("Logged in to Todoist.")
q = JobQueue(logger=logger)

# Load the config.
with open(CONFIG_DIR / "config.yml") as f:
    conf = yaml.load(f, Loader=yaml.SafeLoader)
# Add the environment variables to the config dict.
conf["email_addr"] = EMAIL_ADDR
conf["email_pw"] = EMAIL_PW
conf["api_token"] = API_TOKEN
logger.debug("Loaded config file.")

###############################################################################
# Add jobs from the jobs.py file.
###############################################################################

# Add each job to the queue, but first bind user and conf variables.
Beispiel #21
0
    class AES_RPC_Server(object):

        logger = None

        def __init__(self,
                     addr,
                     client_keys,
                     logger,
                     err_logger=None,
                     timeout=10,
                     idle_timeout=3600,
                     block_size=128):
            """ client_keys :  dict(ip=>key) or str """
            self.logger = logger
            self.logger_err = err_logger or self.logger
            self.engine = TCPSocketEngine(io_poll.get_poll(), is_blocking=True)
            self.engine.set_logger(logger)
            self.engine.set_timeout(rw_timeout=timeout,
                                    idle_timeout=idle_timeout)
            assert isinstance(client_keys, dict)
            self.client_keys = client_keys
            self.inf_sock = None
            self.addr = addr
            self.jobqueue = JobQueue(logger)
            self.is_running = False
            self.ip_dict = dict()
            self.block_size = block_size
            self.rpc_handles = RPC_ServerHandle()

        def get_key(self, peer):
            return self.client_keys.get(peer[0])

        def add_handle(self, func):
            self.rpc_handles.add_handle(func)
            self.logger.debug("added handle: %s" % str(func))

        def add_view(self, obj):
            for name in dir(obj):
                method = getattr(obj, name)
                if callable(method) and hasattr(method, 'func_name'):
                    if method.func_name.find("__") == 0:
                        continue
                    self.add_handle(method)

        def start(self, worker_num):
            if self.is_running:
                return
            self.jobqueue.start_worker(worker_num)
            self.logger.info("jq started")
            self.inf_sock = self.engine.listen_addr(
                self.addr,
                readable_cb=self._server_handle,
                new_conn_cb=self._check_ip)
            self.logger.info("server started")
            self.is_running = True

        def stop(self):
            if not self.is_running:
                return
            self.engine.unlisten(self.inf_sock)
            self.logger.info("server stopped")
            self.jobqueue.stop()
            self.logger.info("job_queue stopped")
            self.is_running = False

        def poll(self, timeout=10):
            self.engine.poll(timeout)

        def loop(self):
            while self.is_running:
                self.poll()

        def _check_ip(self, sock, *args):
            peer = sock.getpeername()
            if self.get_key(peer):
                return sock
            self.logger_err.warn("block %s" % (str(peer)))

        def _server_handle(self, conn):
            sock = conn.sock
            head = None
            try:
                head = NetHead.read_head(sock)
            except socket.error:
                self.engine.close_conn(conn)
                return
            except Exception, e:
                self.logger_err.exception(e)
                self.engine.close_conn(conn)
                return
            key = self.get_key(conn.peer)
            try:
                if head.body_len < self.block_size:
                    self.logger.error(
                        "from peer: %s, smaller than block_size head received"
                        % (str(conn.peer)))
                    self.engine.close_conn(conn)
                    return
                buf = head.read_data(sock)
                iv = buf[0:self.block_size]
                buf = buf[self.block_size:]
                crypter_r = AESCryptor(key, iv, self.block_size)
                crypter_w = AESCryptor(key, iv, self.block_size)
                req = RPC_Req.deserialize(crypter_r.decrypt(buf))
                job = AESInteractJob(self, conn, req, crypter_w)
                self.engine.remove_conn(conn)
                self.jobqueue.put_job(job)
                self.logger.info("peer %s, new req %s enqueue" %
                                 (conn.peer, str(req)))
            except Exception, e:
                self.logger_err.exception(e)
                self.engine.close_conn(conn)
                return