コード例 #1
0
def event_analysis():
    """
    Event analysis process. It fetches all the event in the database and analyse the description & website and
    then create all the related features
    """
    event_analysis = EventAnalysis()

    # Store all available website and avoid parsing a website several times
    websites = dict(dict())

    #  Contains the list of key-word with tree tagger
    description_tree_tagger = dict()
    website_tree_tagger = dict()

    events = Event.objects.all()

    if len(events) == 0:
        return

    nb_core = cpu_count()
    nb_events = len(events)
    nb_events_thread = nb_events / nb_core
    events_thread = []

    for i in range(nb_core - 1):
        events_thread.append(events[i * nb_events_thread:(i + 1) *
                                    nb_events_thread])
    events_thread.append(events[(nb_core - 1) * nb_events_thread:])

    # Fulfill the corpus
    start_threads(nb_core, event_analysis_fulfill_corpus, events_thread,
                  event_analysis, websites, description_tree_tagger,
                  website_tree_tagger)

    #Monothread - event_analysis_fulfill_corpus(event_analysis, websites, description_tree_tagger, website_tree_tagger, events)

    event_analysis.set_corpus_complete()

    # We compute the tf-idf of the key word in the description and in the website if exists
    start_threads(nb_core, event_analysis_compute_tf_idf, events_thread,
                  event_analysis, websites, description_tree_tagger,
                  website_tree_tagger)

    #Monothread - event_analysis_compute_tf_idf(event_analysis, websites, description_tree_tagger, website_tree_tagger, events)

    # We fetch the k most important tags by event
    job_queue = JobQueue()
    job_queue.start()
    start_threads(
        nb_core,
        event_analysis_fetch_k_most_important_features_and_push_database,
        events_thread, job_queue, event_analysis, websites)
    job_queue.finish()

    #Monothread - event_analysis_fetch_k_most_important_features_and_push_database(None, event_analysis, websites, events)

    compute_statistics(events, description_tree_tagger, website_tree_tagger)
コード例 #2
0
ファイル: __main__.py プロジェクト: RaphaelAslanian/PyBatch
def create_job_queue():
    data = json.loads(request.data, encoding="utf-8")
    # Checks
    CONFIG_CREATE_JOB_QUEUE.validate(data)
    if data["jobQueueName"] in job_queues:
        abort(400, "Job queue already exists.")
    for ce in data["computeEnvironmentOrder"]:
        if ce["computeEnvironment"] not in compute_environments:
            abort(400, f"Compute environment {ce} does not exist")
        if compute_environments[ce["computeEnvironment"]].state != ComputeEnvironment.STATE_ENABLED:
            abort(400, f"Compute environment {ce} is not enabled.")
    if not (0 < len(data["computeEnvironmentOrder"]) < 3):
        abort(400, f"Invalid number ({len(data['computeEnvironmentOrder'])}) of compute environments selected")
    orders = set()
    for ce in data["computeEnvironmentOrder"]:
        if ce["order"] in orders:
            abort(400, f"Two compute environments have the same order.")
        orders.add(ce["order"])
    # Action
    new_job_queue = JobQueue(**data)
    job_queues[data["jobQueueName"]] = new_job_queue
    return jsonify(new_job_queue.describe(everything=False))
コード例 #3
0
def main():
    for test in glob("tests/*[!*.a]"):
        print(f"Running {test}")
        with open(test, "r") as test_fh:
            test_input = test_fh.read().strip()
            sys.stdin = io.StringIO(test_input)
        with open(f"{test}.a") as test_answer_fh:
            test_answer = test_answer_fh.read().strip()

        try:
            job_queue = JobQueue()
            job_queue.solve(write=False)
            test_output = "\n".join([
                f"{assigned_worker} {start_time}"
                for assigned_worker, start_time in zip(
                    job_queue.assigned_workers, job_queue.start_times)
            ])
            assert test_output.strip() == test_answer
        except AssertionError:
            print(
                f"AssertionError at {test}:\n    input: {test_input}\n    expected output: {test_answer}\n    actual output: {test_output}"
            )
            break
コード例 #4
0
ファイル: rpc_server.py プロジェクト: hackudown/vps
 def __init__(self,
              cert_file,
              addr,
              logger,
              err_logger=None,
              timeout=10,
              idle_timeout=3600,
              white_list=()):
     self.logger = logger
     self.logger_err = err_logger or self.logger
     self.engine = SSLSocketEngine(io_poll.get_poll(),
                                   cert_file=cert_file,
                                   is_blocking=True)
     self.engine.set_logger(logger)
     self.engine.set_timeout(rw_timeout=timeout,
                             idle_timeout=idle_timeout)
     self.inf_sock = None
     self.addr = addr
     self.jobqueue = JobQueue(logger)
     self.is_running = False
     self.ip_dict = dict()
     for ip in white_list:
         self.ip_dict[ip] = None
     self.rpc_handles = RPC_ServerHandle()
コード例 #5
0
ファイル: rpc_server.py プロジェクト: hackudown/vps
 def __init__(self,
              addr,
              client_keys,
              logger,
              err_logger=None,
              timeout=10,
              idle_timeout=3600,
              block_size=128):
     """ client_keys :  dict(ip=>key) or str """
     self.logger = logger
     self.logger_err = err_logger or self.logger
     self.engine = TCPSocketEngine(io_poll.get_poll(), is_blocking=True)
     self.engine.set_logger(logger)
     self.engine.set_timeout(rw_timeout=timeout,
                             idle_timeout=idle_timeout)
     assert isinstance(client_keys, dict)
     self.client_keys = client_keys
     self.inf_sock = None
     self.addr = addr
     self.jobqueue = JobQueue(logger)
     self.is_running = False
     self.ip_dict = dict()
     self.block_size = block_size
     self.rpc_handles = RPC_ServerHandle()
コード例 #6
0
from job_queue import JobQueue

app = Flask(__name__, static_url_path='/static')
app.config.from_envvar('POGOJIG_SETTINGS')


class UploadForm(FlaskForm):
    upload_file = FileField(validators=[DataRequired()])


class ResetForm(FlaskForm):
    pass


job_queue = JobQueue(app.config['JOB_QUEUE_DB'])


def tempfile_path(namespace):
    """ Return a path for a per-session temporary file identified by the given namespace. Create the session tempfile
    dir if necessary. The application tempfile dir is controlled via the upload_path config value and not managed by
    this function. """
    sess_tmp = path.join(app.config['UPLOAD_PATH'], session['session_id'])
    os.makedirs(sess_tmp, exist_ok=True)
    return path.join(sess_tmp, namespace)


def require_session_id(fun):
    @wraps(fun)
    def wrapper(*args, **kwargs):
        if 'session_id' not in session:
コード例 #7
0
ファイル: main.py プロジェクト: eswan18/autodoist_d
####################################################################
parser = argparse.ArgumentParser(description="Automate Todoist workflows.")
parser.add_argument("--loglevel", dest="loglevel", nargs=1, help="set a log level")
args = parser.parse_args()

# If the user specified a log level, use it.
if args.loglevel is not None:
    loglevel, *rest = args.loglevel
    ch.setLevel(loglevel.upper())
# Register the console handler with the logger.
logger.addHandler(ch)

# Setup.
user = todoist.login_with_api_token(API_TOKEN)
logger.info("Logged in to Todoist.")
q = JobQueue(logger=logger)

# Load the config.
with open(CONFIG_DIR / "config.yml") as f:
    conf = yaml.load(f, Loader=yaml.SafeLoader)
# Add the environment variables to the config dict.
conf["email_addr"] = EMAIL_ADDR
conf["email_pw"] = EMAIL_PW
conf["api_token"] = API_TOKEN
logger.debug("Loaded config file.")

###############################################################################
# Add jobs from the jobs.py file.
###############################################################################

# Add each job to the queue, but first bind user and conf variables.
コード例 #8
0
ファイル: job_processor.py プロジェクト: mdeweerd/pogojig
import itertools

from job_queue import JobQueue


if __name__ == '__main__':
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('queue', help='job queue sqlite3 database file')
    parser.add_argument('--loglevel', '-l', default='info')
    args = parser.parse_args()

    numeric_level = getattr(logging, args.loglevel.upper(), None)
    if not isinstance(numeric_level, int):
        raise ValueError('Invalid log level: %s' % loglevel)
    logging.basicConfig(level=numeric_level)

    job_queue = JobQueue(args.queue)
    
    signal.signal(signal.SIGALRM, lambda *args: None) # Ignore incoming alarm signals while processing jobs
    signal.setitimer(signal.ITIMER_REAL, 0.001, 1)
    while signal.sigwait([signal.SIGALRM, signal.SIGINT]) == signal.SIGALRM:
        logging.debug('Checking for jobs')
        for job in job_queue.job_iter('render'):
            logging.info(f'Processing {job.type} job {job.id} session {job["session_id"]} from {job.client} submitted {job.created}')
            with job:
                job.result = subprocess.call(['sudo', '/usr/local/sbin/pogojig_generate.sh', job['session_id']])
                logging.info(f'Finishied processing {job.type} job {job.id}')
    logging.info('Caught SIGINT. Exiting.')

コード例 #9
0
from flask import Flask, request, jsonify
from werkzeug.exceptions import HTTPException
from interface import PotholeEvent
from job_queue import JobQueue
import json
import glob
from utility import fakes_pothole_obj
import os

app = Flask(__name__)
job_queue = JobQueue(verbose=True)


@app.route('/')
def hello_world():
    return 'Hello, World!'


@app.route('/test/<hash>', methods=['GET'])
def test(hash):
    for i in range(5):
        job_queue.add(hash)
    print("End test main")
    return jsonify(hash=hash, success=True)


@app.route('/analyze', methods=['POST'])
@app.route('/analyze/<path:json_filename>', methods=['GET'])
def analyze(json_filename=None):
    # if GET and File
    if json_filename is not None: