Beispiel #1
0
def worker():
    logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
                        level=logging.INFO)

    host = os.getenv("REDIS_SERVICE_HOST", "redis-master")
    q = rediswq.RedisWQ(name="jobProduct", host=host)

    logging.info("Worker with sessionID %s.", q.sessionID())
    logging.info("Initial queue state empty=%s.", q.empty())

    host = os.getenv("DASK_SCHEDULER_HOST",
                     "dask-scheduler.dask.svc.cluster.local")
    dask_client = Client(f"{host}:8786")

    dc = Datacube()

    s3_client = S3Client()

    lease_secs = int(os.getenv("JOB_LEASE_PERIOD", "3600"))

    while not q.empty():
        item = q.lease(lease_secs=lease_secs, block=True, timeout=600)
        if item is not None:
            itemstr = item.decode("utf=8")
            logging.info("Working on %s.", itemstr)
            process_job(dc, dask_client, s3_client, itemstr, lease_secs)
            q.complete(item)
        else:
            logging.info("Waiting for work.")

    logging.info("Queue empty, exiting.")
def main():
    host = os.getenv("REDIS_SERVICE_HOST", "localhost")
    password = os.getenv("REDIS_PASSWORD")
    queue_name = os.getenv("REDIS_JOBQUEUE_NAME", "analysis-jobs")

    tornado.options.parse_command_line()
    # test azure connection
    app_log.info("Testing database connection...")
    db = connect_to_azure_database()
    db.close()
    app_log.info("Database connection okay!")

    q = rediswq.RedisWQ(name=queue_name, host=host, password=password)
    app_log.info("Worker with sessionID: " + q.sessionID())
    app_log.info(f"Running with lease time {ANALYSIS_LEASE_SECONDS}")
    gc_interval = max(ANALYSIS_LEASE_SECONDS // 4, 10)
    app_log.info(f"Running garbage collection if idle for {gc_interval}")
    while True:
        item = q.lease(
            lease_secs=ANALYSIS_LEASE_SECONDS,
            block=True,
            timeout=gc_interval,
        )
        if item is None:
            app_log.debug("Waiting for work")
            q.gc()
            continue
        tic = time.perf_counter()
        process_one(q, item)
        toc = time.perf_counter()
        app_log.info(f"Analysis completed in {int(toc-tic)}s")
Beispiel #3
0
def judge(args):
	queue_name = args.queue
	q = rediswq.RedisWQ(name=queue_name,host="redis")
  	while not q.empty():
  		time.sleep(1)
  	time.sleep(30)
  	if queue_name == "result":
  		print('Inference Complete')
Beispiel #4
0
def processQueue(host):
    print("Process started")
    q = rediswq.RedisWQ(name=job, host=host)
    print("Worker with sessionID: " + q.sessionID())
    print("Initial queue state: empty=" + str(q.empty()))
    while not q.empty():
        item = q.lease(lease_secs=10, block=True, timeout=2)
        if item is not None:
            itemstr = item.decode("utf=8")
            print("Working on " + itemstr)
            time.sleep(10)  # Put your actual work here instead of sleep.
            q.complete(item)
        else:
            print("Waiting for work")
    print("Process finished")
    def bind_redis(self):
        for retry_n in range(20):
            try:
                self.redis_client = Redis(host=self.redis_endpoint)
                self.redis_client.delete('test_conn')
                logging.info('Connected to Redis!')
            except:
                logging.warning(
                    'Failed to connect to Redis. Retrying to connect in {} seconds...'
                    .format(Fibo(retry_n)))
                time.sleep(Fibo(retry_n))
                continue
            break
        else:
            raise RedisConnectError()

        self.q = rediswq.RedisWQ(name="reconocer", host=self.redis_endpoint)
Beispiel #6
0
def run():

    # for dev purposes only: bypass redis and process static item.
    if DEV_MODE and DEV_ITEM is not None:
        print('INFO:Running in dev-mode with: %s' % DEV_ITEM)
        print(process_client(DEV_ITEM, None))
        return True

    # get Redis address from environemnt.
    # defaults to 'redis:6379' using the queue 'job'.
    redis_host = os.environ.get('REDIS_HOST')
    redis_port = os.environ.get('REDIS_PORT', 6379)
    redis_queue = os.environ.get('REDIS_QUEUE', 'job')

    # falling back to 'redis' is more useful than the
    # default 'localhost', especially in k8s clusters.
    if redis_host is None:
        print('WARNING:You did not provide a Redis host. ' +
              'Falling back to "redis".')
        redis_host = 'redis'

    print("INFO:Trying to reach redis running at %s:%s. Using queue %s." %
          (redis_host, redis_port, redis_queue))
    queue = rediswq.RedisWQ(name=redis_queue, host=redis_host, port=redis_port)
    print("INFO:Connection established")
    print("INFO:Worker with sessionID: " + queue.sessionID())

    while not queue.empty():
        print("INFO:Consulting work queue for the next item.")
        object_url = queue.lease(lease_secs=LEASE_SECS, block=True, timeout=2)

        item_start = time.time()

        if object_url is not None:
            print("INFO:Processing item... " + object_url.decode("utf=8"))
            if process_client(object_url, queue):
                queue.complete(object_url)

                time_spent = time.time() - item_start

                print(
                    "INFO:Item completed. Results posted. '{}' seconds to process item."
                    .format(time_spent))
        else:
            queue.check_expired_leases()
            print("INFO:Waiting for work...")
    def bind_redis(self):
        for retry_n in range(20):
            try:
                self.redis_client = Redis(host=self.redis_endpoint)
                self.redis_client.delete('test_conn')
                logging.info('Connected to Redis!')
            except:
                logging.warning(
                    'Failed to connect to Redis. Retrying to connect in {} seconds...'.format(Fibo(retry_n)))
                time.sleep(Fibo(retry_n))
                continue
            break
        else:
            raise RedisConnectError()

        self.q = rediswq.RedisWQ(name=self.work_queue, host=self.redis_endpoint)
        logging.info('Worker with sessionID: ' + self.q.sessionID())
        logging.info('Initial queue state: empty=' + str(self.q.empty()))
def main():
    redis_host = os.environ.get("REDIS_HOST")
    if not redis_host:
        redis_host = "redis"
    q = rediswq.RedisWQ(name="temperature_job", host=redis_host)
    print("Worker with sessionID: " +  q.sessionID())
    print("Initial queue state: empty=" + str(q.empty()))
    while not q.empty():
        item = q.lease(lease_secs=180, block=True, timeout=2) 
        if item is not None:
            filename = item.decode("utf=8")
            print("Aggregating " + filename)
            aggregate_temperature_file(filename)
            q.complete(item)
        else:
            print("Waiting for work")
            import time
            time.sleep(5)
Beispiel #9
0
def run():
    q = rediswq.RedisWQ(name="job", host=HOST)
    enc = codify.Encoder()
    APP_KEY = os.getenv("APP_KEY")
    print("Worker with sessionID: " + q.sessionID())
    print("Initial queue state: empty=" + str(q.empty()))
    while not q.empty():
        item = q.lease(lease_secs=LEASE_SECS, block=True, timeout=2)
        if item is not None:
            prefix = enc.encode(APP_KEY, item.decode("utf=8"))
            if find(prefix, q):
                q.complete(item)
        else:
            # TODO(clenimar): find a better way to regularly check expired
            # leases. Preferably in the Redis queue implementation itself.
            q.check_expired_leases()
            print("Waiting for work")
            continue

    print("Queue empty, exiting")
def main():
    """
    Workload which:
      1. Claims a filename from a Redis Worker Queue
      2. Reads the dataset from the file
      3. Partially trains the model on the dataset
      4. Saves a model checkpoint and generates a report on
         the performance of the model after the partial training.
      5. Removes the filename from the Redis Worker Queue
      6. Repeats 1 through 5 till the Queue is empty
    """
    q = rediswq.RedisWQ(name="datasets", host=HOST)
    print("Worker with sessionID: " + q.sessionID())
    print("Initial queue state: empty=" + str(q.empty()))
    checkpoint_path = None
    while not q.empty():
        # Claim item in Redis Worker Queue
        item = q.lease(lease_secs=20, block=True, timeout=2)
        if item is not None:
            dataset_path = item.decode("utf-8")
            print("Processing dataset: " + dataset_path)
            training_dataset_path = FILESTORE_PATH + dataset_path

            # Initialize the model training manager class
            model_trainer = FraudDetectionModelTrainer(
                training_dataset_path,
                TESTING_DATASET_PATH,
                CLASS_LABEL,
                checkpoint_path=checkpoint_path,
            )

            # Train model and save checkpoint + report
            checkpoint_path = model_trainer.train_and_save(OUTPUT_DIR)
            model_trainer.generate_report(REPORT_PATH)

            # Remove item from Redis Worker Queue
            q.complete(item)
        else:
            print("Waiting for work")

    print("Queue empty, exiting")
Beispiel #11
0
def generate_session_traffic(listname):
    q = rediswq.RedisWQ(name=listname, host="redis")
    print("Worker with sessionID: " + q.sessionID())
    print("Initial queue state: empty=" + str(q.empty()))
    while not q.empty():
        item = q.lease(lease_secs=5, block=True, timeout=2)
        if item is not None:
            itemstr = item.decode("utf-8")
            print("-----------------------------------------------------")
            # print("Working on " + itemstr)
            print("Working on " + str(itemstr.split(':')))
            domain_value, account_value, max_sess = itemstr.split(
                ':')  # Put your actual work here instead of sleep.
            print("domain_value ==> %s" % (domain_value))
            print("account_value ==> %s" % (account_value))
            print("max_sessions==> %s" % (max_sess))
            insert_data(domain_value, account_value, max_sess)
            # insert_data(instance,database,domain_value,account_value,max_sess)
            q.complete(item)
        else:
            print("Waiting for work")
    print("Queue empty, exiting")
Beispiel #12
0
    def post(self):
        args = parser.parse_args()

        session_id = args['session_id']

        q = rediswq.RedisWQ(name=session_id, host="20.49.225.191", port="6379")

        if not q.empty():
            item = q.lease(lease_secs=10, block=True, timeout=2)
            if item is not None:
                tuple_ = pickle.loads(item)
                flow_id = tuple_[0]
                task_id = tuple_[1]

                flow = openml.flows.get_flow(flow_id,
                                             reinstantiate=True,
                                             strict_version=False)
                task = openml.tasks.get_task(task_id)

                dataset = task.get_dataset()
                tti = task.task_type_id
                ep = task.estimation_procedure_id
                em = task.evaluation_measure

                model = flow.model

                X, y, categorical_indicator, attribute_names = dataset.get_data(
                    dataset_format='array',
                    target=dataset.default_target_attribute)

                model.fit(X, y)
                preds = model.predict(X)
                score = mean_squared_error(y, preds)

                q.complete(item)

                return score
import rediswq
import os
import pandas as pd
import json

# TODO: Remove time.sleep() for actual applications

# Config variables
redis_wq_name = os.environ['REDIS_WQ_NAME']
redis_wq_host = os.environ['REDIS_WQ_HOST']
wordlist_file_path = os.environ['WORDLIST_FILE_PATH']
input_datasets_dir_path = os.environ['INPUT_DATASETS_DIR_PATH']
processed_datasets_dir_path = os.environ['PROCESSED_DATASETS_DIR_PATH']

# Initiate Redis
q = rediswq.RedisWQ(name=redis_wq_name, host=redis_wq_host)
print("Worker with sessionID: " + q.sessionID())
print("Initial queue state: empty=" + str(q.empty()))


def wordcount_task(filename, pattern):
    input_filename = input_datasets_dir_path + "/" + filename
    output_filename = processed_datasets_dir_path + "/" + filename.replace(
        "csv", "json")
    col_names = ['id', 'date', 'time', 'tweet', 'location']
    df = pd.read_csv(input_filename, names=col_names, encoding='utf-8')
    df = df[df['tweet'].str.contains(pattern, na=False)]
    if not df.empty:
        df1 = (df['tweet'].str.split(expand=True).stack().value_counts().
               rename_axis('vals').reset_index(name='count'))
        df1["count"] = pd.to_numeric(df1["count"])
import os
from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
import rediswq

if __name__ == '__main__':
    q = rediswq.RedisWQ(
        name=os.getenv('SCRAPER_QUEUE_NAME'),
        host=os.getenv('REDIS_HOST'),
    )
    crawler_process = CrawlerProcess(get_project_settings())
    for spider_name in crawler_process.spider_loader.list():
        q.put(spider_name)
Beispiel #15
0
import time
import rediswq
import os
import json
import pathlib

try:
    session_id = os.environ['SESSION_ID']
except KeyError:
    session_id = 'default'

q = rediswq.RedisWQ(name=session_id, host="20.49.225.191", port="6379")

output = {}

while not q.empty():
    item = q.lease(lease_secs=10, block=True, timeout=2)
    if item is not None:
        itemstr = item.decode("utf-8")
        output[itemstr] = f"Worked on {itemstr}"
        time.sleep(10)
        q.complete(item)

print(json.dumps(output))
Beispiel #16
0
    per_scene_wofs(**loaded_json)

##################
# Job processing #
##################

import os
import logging
import rediswq
import datetime

level = os.getenv("LOGLEVEL", "INFO").upper()
logging.basicConfig(format="%(asctime)s %(levelname)-8s %(name)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=level)

host = os.getenv("REDIS_SERVICE_HOST", "redis-master")
q = rediswq.RedisWQ(name="jobWater", host=host)

logger = logging.getLogger("worker")
logger.info(f"Worker with sessionID: {q.sessionID()}")
logger.info(f"Initial queue state: empty={q.empty()}")

while not q.empty():
    item = q.lease(lease_secs=1800, block=True, timeout=600)
    if item is not None:
        itemstr = item.decode("utf=8")
        logger.info(f"Working on {itemstr}")
        start = datetime.datetime.now().replace(microsecond=0)

        process_scene(itemstr)
        q.complete(item)
Beispiel #17
0
##################
# Job processing #
##################

import os
import logging
import rediswq
import datetime

level = os.getenv("LOGLEVEL", "INFO").upper()
logging.basicConfig(format="%(asctime)s %(levelname)-8s %(name)s %(message)s",
                    datefmt="%Y-%m-%d %H:%M:%S",
                    level=level)

host = os.getenv("REDIS_SERVICE_HOST", "redis-master")
q = rediswq.RedisWQ(name="jobS2L1Cv8", host=host)

logger = logging.getLogger("worker")
logger.info(f"Worker with sessionID: {q.sessionID()}")
logger.info(f"Initial queue state: empty={q.empty()}")

while not q.empty():
    item = q.lease(lease_secs=1800, block=True, timeout=600)
    if item is not None:
        itemstr = item.decode("utf=8")
        logger.info(f"Working on {itemstr}")
        start = datetime.datetime.now().replace(microsecond=0)

        process_scene(itemstr)
        q.complete(item)
Beispiel #18
0
#!/usr/bin/env python
# From https://kubernetes.io/docs/tasks/job/fine-parallel-processing-work-queue/
import subprocess
import rediswq
import os

host = "redis"

q = rediswq.RedisWQ(name="datacrawler-urls", host="redis")
print("Worker with sessionID: " + q.sessionID())
print("Initial queue state: empty=" + str(q.empty()))
while not q.empty():
    item = q.lease(lease_secs=10, block=True, timeout=2)
    if item is not None:
        itemstr = item.decode("utf=8")
        print("Working on " + itemstr)
        data = itemstr.split(",")
        os.environ["RANK"] = data[0]
        os.environ["URL"] = data[1]

        return_code = subprocess.call([
            "/opt/apt/datacrawler/datacrawler", "--no-sandbox", "--headless",
            "--disable-gpu", "--disk-cache-dir=/dev/null",
            "--disk-cache-size=1"
        ])

        if return_code == 0:
            q.complete(item)
    else:
        print("Waiting for work")
print("Queue empty, exiting")
Beispiel #19
0
#!/usr/bin/env python

import time
import os, sys
import rediswq

host = os.getenv("REDIS_SERVICE_NAME", "redis")
# Uncomment next two lines if you do not have Kube-DNS working.
# import os
# host = os.getenv("REDIS_SERVICE_HOST")
job_number = int(os.getenv("INITIAL_JOB_NUMBER", "1"))

q = rediswq.RedisWQ(name=os.getenv("SUBMIT_QUEUE_NAME", default="archive"), host=host)
print("Worker with sessionID: " +  q.sessionID())
print("Initial queue state: empty=" + str(q.empty()))
while True:
  item = q.lease(lease_secs=300, block=True, timeout=2)
  if item is not None:
    itemstr = item.decode("utf-8")
    print("Working on " + itemstr)
    time.sleep(10) # Put your actual work here instead of sleep.
    print(os.popen('python /handle_request.py {:05d} {} > last_job.txt'.format(job_number, itemstr)).read(), file=sys.stderr)
    job_number = job_number + 1
    q.complete(item)
  else:
    print("Waiting for work")
  while q.empty():
    # Sleep while we wait for work.
    time.sleep(30)
print("Queue empty, exiting")
Beispiel #20
0
import subprocess

host = os.getenv("REDIS_SERVICE_HOST")
queue = os.getenv("REDIS_SERVICE_QUEUE")

# Ensure that our output folder exists
output_dir = '/output/' + queue.replace('-', '/')
if not os.path.exists(output_dir):
    os.makedirs(output_dir)

# Set up a basic file logger
logging.basicConfig(level=logging.DEBUG,
                    format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging

q = rediswq.RedisWQ(name=queue, host=host)
logger.info("Worker started with sessionID: {0}".format(q.sessionID()))
logger.debug("Initial state: empty={0}".format(str(q.empty())))

# Timeout for worker pods in seconds
timeout = 600
start_time = time.time()
next_timeout = start_time + timeout

while not q.empty() and time.time() < next_timeout:
    item = q.lease(lease_secs=600, block=True, timeout=2)
    if item is not None:
        # Read a work item (a bash command) from the queue
        command = item.decode("utf=8")
        logger.info("Running command: {0}".format(command))
Beispiel #21
0
    def post(self):

        args = parser.parse_args()
        ttid = args['ttid']
        did = args['did']
        target = args['target']
        session_id = args['session_id']
        predict = args['predict']
        predict_file = args['predict_file']

        is_csv = False

        if (isinstance(predict_file, FileStorage)):
            csvfile = pd.read_csv(predict_file.stream)
            predict = csvfile
            is_csv = True
        else:
            predict = json.loads(predict)

        print(
            f"Did: {did}, ttid: {ttid}, target: {target}, predict: {predict}, session_id: {session_id}"
        )

        if check_exists(active_datasets, (did, target)):
            print("Already exists.")
            previous = next((item for item in active_datasets
                             if (item[0] == did and item[1] == target)), None)
            score = previous[2]
            result = filter(lambda dict: dict['did'] == did, active)
            result = list(result)[0]
            fid = result['flow']
            filename = f"f{fid}-d{did}.pkl"
            dataset = openml.datasets.get_dataset(did)

            X, y, categorical_indicator, attribute_names = dataset.get_data(
                dataset_format='dataframe', target=target)

            X = X.dropna()

            if is_csv:
                to_predict = format_row(X, predict, attribute_names)
            else:
                to_predict = format_string_row(X, predict, attribute_names)

            model = None

            with open(filename, 'rb') as f:
                model = pickle.load(f)

            pred = model.predict(to_predict)
            le = preprocessing.LabelEncoder()
            le.fit_transform(y)

            res = {target: list(le.inverse_transform(pred)), 'score': score}

            return res

        else:
            print("New dataset.")

            redis = rediswq.RedisWQ(name=session_id,
                                    host="20.49.225.191",
                                    port="6379")

            datasets = client.execute(
                gql(close_connections.substitute(did=did, distance=10000)))
            tasks_to_check = set()

            for dataset in datasets['close_connections']:
                tasks = client.execute(
                    gql(
                        similar_tasks.substitute(did=dataset['did'],
                                                 task_type_id=ttid)))
                for t in tasks['similar_tasks']:
                    tasks_to_check.add(t['tid'])

            flows = set()
            for t in tasks_to_check:
                limit = 1
                if ttid == 2:
                    limit == 100
                evals = client.execute(
                    gql(evaluations.substitute(tid=t, limit=limit)))

                for e in evals['evaluations']:
                    flows.add(e['flow_id'])

            print(flows)

            jobs = len(flows)
            items = zip(list(flows), itertools.repeat(did),
                        itertools.repeat(target))

            pickled = [pickle.dumps(t) for t in list(items)]
            redis.add_items(pickled, str(session_id))
            set_jobs(jobs, session_id)
            top = create_job(jobs)
            delete_job("job-wq-2")

            dataset = openml.datasets.get_dataset(did)

            X, y, categorical_indicator, attribute_names = dataset.get_data(
                dataset_format='dataframe', target=target)

            X = X.replace(to_replace="?", value=np.nan)
            X = X.dropna()

            if is_csv:
                to_predict = format_row(X, predict, attribute_names)
            else:
                to_predict = format_string_row(X, predict, attribute_names)

            file_, score = create_model(top['flow'], did, target)
            model = None

            with open(file_, 'rb') as f:
                model = pickle.load(f)

            pred = model.predict(to_predict)
            le = preprocessing.LabelEncoder()
            le.fit_transform(y)

            try:
                res = {
                    target: list(le.inverse_transform(pred)),
                    'score': score
                }
            except ValueError:
                res = {target: pred, 'score': score}

            active.append(top)
            active_datasets.append((did, target, score))

            print(res)

            return res
Beispiel #22
0
##################
# Job processing #
##################

import os
import logging
import rediswq
import datetime

level = os.getenv("LOGLEVEL", "INFO").upper()
logging.basicConfig(format="%(asctime)s %(levelname)-8s %(name)s %(message)s",
                    datefmt="%Y-%m-%d %H:%M:%S",
                    level=level)

host = os.getenv("REDIS_SERVICE_HOST", "redis-master")
q = rediswq.RedisWQ(name="jobMOD", host=host)

logger = logging.getLogger("worker")
logger.info(f"Worker with sessionID: {q.sessionID()}")
logger.info(f"Initial queue state: empty={q.empty()}")

while not q.empty():
    item = q.lease(lease_secs=1800, block=True, timeout=600)
    if item is not None:
        itemstr = item.decode("utf=8")
        logger.info(f"Working on {itemstr}")
        start = datetime.datetime.now().replace(microsecond=0)

        process_scene(itemstr)
        q.complete(item)
Beispiel #23
0
def run_process(serviceConfig, outputDir, Model):

    logging.debug('IMAGE:{}'.format(
        os.path.basename(os.path.normpath(serviceConfig.image_path))))

    if os.path.isdir(outputDir) is False:
        os.makedirs(outputDir)

    cache_path = os.path.join(outputDir, 'full_results.pkl')

    geoImg = GeoImage(serviceConfig.image_path, gap=200)

    px, py = geoImg.pixelSize  # tuple(px,py)
    pixel_size = (px, py)

    model = None if serviceConfig.cacheStrategy == Cache.read else Model(
        serviceConfig.modelStorePath)

    geojson = ContourProcessor(geoImg, outputDir)

    readCacheIterator = None
    if serviceConfig.cacheStrategy == Cache.read:
        with open(cache_path, 'rb') as input:
            cachedReadResults = pickle.load(input)
        readCacheIterator = iter(cachedReadResults)

    cacheWriteResults = []

    slice_ids = geoImg.getSplits()

    # Cache Read mode

    if serviceConfig.cacheStrategy == Cache.read:
        for xyOffset in slice_ids:
            result = next(readCacheIterator)

    # Cache Write mode - Use multi-gpu
    if serviceConfig.cacheStrategy == Cache.write:

        # k8s redis
        # Redis Queues
        q = rediswq.RedisWQ(name="job", host="redis")
        qresult = rediswq.RedisWQ(name="result", host="redis")
        qstat = rediswq.RedisWQ(name="status", host="redis")

        # Redis Object
        qr = redis.Redis(host='redis', port=6379, decode_responses=True)

        print("Worker with sessionID: " + q.sessionID())
        print("Initial queue state: empty=" + str(q.empty()))
        start_time = time.time()
        while not q.empty():

            # Remove array suppression (...) so that it doesn't mess up the array to string conversion
            import numpy as np
            np.set_printoptions(threshold=np.inf)

            item = q.lease(lease_secs=60, block=False)

            if item is not None:
                itemstr = item.decode("utf=8")
                itemstr = re.findall(r"\d+\.?\d*", itemstr)
                slice_name = "{}_{}.png".format(itemstr[0], itemstr[1])
                print("Working on slice :" + slice_name)
                im_path = '/mnt/crops/' + slice_name

                left, up = itemstr

                img = cv2.imread(
                    im_path)  # Read image from crop directory -> left_up.png
                result = model.infere(img,
                                      imageId='left-{}_up-{}'.format(left, up))

                # # Default -> Creates race condition
                # patchGeom = geojson.addPatchBoundary(left, up)
                # for feature in result:
                #      geojson.addFeature(left, up, feature)
                #      #pod_flag = 1
                # q.complete(item)

                # Create queue element
                q_elem = [left, up, result]

                # Push into receiver queue
                qr.rpush('result', str(q_elem))

                q.complete(item)

            else:
                print("Waiting...")

        end_time = time.time()

        print("Queue Empty \nExiting", "\nProcess Time:",
              '{}s'.format(end_time - start_time))

        if qstat.empty() is False:
            ind = qstat.lease(lease_secs=60, block=False)
            qstat.complete(ind)
        else:
            logging.debug('Writing detections to geojson')
            from numpy import array, int32  # For eval to work
            while not qresult.empty():
                res = qresult.lease(lease_secs=60, block=False)
                if res is not None:

                    res2 = res

                    if sys.version_info[0] == 3:
                        # Python3
                        res2 = res.decode("utf=8")

                    qres_elem = eval(res2)

                    left, up = eval(qres_elem[0]), eval(qres_elem[1])
                    result = qres_elem[2]  # type list

                    cacheWriteResults.append(copy.deepcopy(result))

                    patchGeom = geojson.addPatchBoundary(left, up)
                    for feature in result:
                        geojson.addFeature(left, up, feature, patchGeom)

                    qresult.complete(res)

            logging.debug("Caching Detections: {}".format(
                len(cacheWriteResults)))

            with open(cache_path, 'wb') as output:
                pickle.dump(cacheWriteResults, output, pickle.HIGHEST_PROTOCOL)

            logging.debug('Geojson Cleanup')
            geojson.cleanUp()

            if os.path.exists(geojson.cleanedGeojsonPath) is False:
                logging.error('Cleaned Geojson not produced')
Beispiel #24
0
    def post(self):

        path = site.USER_SITE

        if path not in sys.path:
            sys.path.append(path)

        args = parser.parse_args()

        ttid = args['tid']
        did = args['did']
        target = args['target']
        session_id = args['session_id']
        predict = args['predict']
        print(active_datasets)
        print(active)
        if did in active_datasets:

            result = filter(lambda dict: dict['did'] == did, active)
            result = list(result)[0]
            fid = result['flow']
            filename = f"f{fid}-d{did}.pkl"
            dataset = openml.datasets.get_dataset(did)

            X, y, categorical_indicator, attribute_names = dataset.get_data(
                dataset_format='dataframe', target=target)

            X[target] = y
            X = X.dropna()

            model = None

            with open(filename, 'rb') as f:
                model = pickle.load(f)

            test = format_test(X, predict, attribute_names)
            test = test.drop([target], axis=1)
            pred = model.predict(test)

            res = {target: pred.tolist()}

            return res

        else:
            active_datasets.append(did)
            redis = rediswq.RedisWQ(name=session_id,
                                    host="20.49.225.191",
                                    port="6379")

            datasets = client.execute(
                gql(close_connections.substitute(did=did, distance=10000)))
            tasks_to_check = set()

            for dataset in datasets['close_connections']:
                tasks = client.execute(
                    gql(
                        similar_tasks.substitute(did=dataset['did'],
                                                 task_type_id=ttid)))
                for t in tasks['similar_tasks']:
                    tasks_to_check.add(t['tid'])

            flows = set()

            for t in tasks_to_check:
                evals = client.execute(
                    gql(evaluations.substitute(tid=t, limit=1)))
                for e in evals['evaluations']:
                    flows.add(e['flow_id'])

            jobs = len(flows)

            items = zip(list(flows), itertools.repeat(did),
                        itertools.repeat(target))

            print(list(flows))

            pickled = [pickle.dumps(t) for t in list(items)]
            redis.add_items(pickled, str(session_id))
            set_jobs(jobs, session_id)
            results = create_job(jobs)

            max_ = 1
            top = {}

            for result in results:
                if result['flow'] is not None:
                    if result['score'] < max_ and result['score'] > 0:
                        max_ = result['score']
                        top = result

            delete_job("job-wq-2")
            dataset = openml.datasets.get_dataset(did)

            X, y, categorical_indicator, attribute_names = dataset.get_data(
                dataset_format='dataframe', target=target)

            X[target] = y
            X = X.dropna()
            active.append(top)
            file_ = create_model(top['flow'], did, target)
            model = None

            with open(file_, 'rb') as f:
                model = pickle.load(f)

            test = format_test(X, predict, attribute_names)
            test = test.drop([target], axis=1)
            pred = model.predict(test)

            res = {target: pred.tolist()}

            return res
Beispiel #25
0
    '--forgive_threshold',
    type=int,
    default=3,
    help=
    'max number of times in a row that you forgive for getting a reason to kill'
)
parser.add_argument('-r',
                    '--reset',
                    type=int,
                    default=False,
                    help='switch to only reset kill queue to 0')
args = parser.parse_args()

####################
### Connect to Redis Server
q = rediswq.RedisWQ(name=REDIS_SERVER_NAME, host=REDIS_SERVER_IP)

####################
### Reset Kill Queue to 0
if args.reset:
    #item = q._db.rpop(q._kill_q_key)
    q._db.flushdb()  #Delete all keys in the current database
    print("Deleted all keys in the current database...")
    exit()
else:
    pass

####################
### Pull items from Redis Queue for Read + Write
if args.killquick:
    # add to the redis q for kill
Beispiel #26
0
              _ _
             | | |
  _ __   ___ | | |
 | '_ \ / _ \| | |
 | |_) | (_) | | |
 | .__/ \___/|_|_|
 | |
 |_|
"""
import constants
import rediswq
import time

PRINT_DELAY = 5

queue = rediswq.RedisWQ(name=constants.JOB_NAME, host=constants.HOST)


def poll(queue, print_delay):
    """
    poll checks the number of remaining items in the redis queue and prints the value.
    This value is retrieved from the pod named `poll` using `kubectl logs poll`.

    :param queue: RedisWQ() - object that has some items in its database
    :param print_delay: int - a delay between prints so the output is not overwhelming
    :return: None
    """
    while not queue.empty():

        main_q_size, _ = queue.get_queue_sizes
        time.sleep(print_delay)
Beispiel #27
0
#!/usr/bin/env python

import time
import rediswq

host = "redis"

q = rediswq.RedisWQ(name="job2", host="redis")
print("Worker with sessionID: " + q.sessionID())
print("Initial queue state: empty=" + str(q.empty()))

while not q.empty():
    item = q.lease(lease_secs=10, block=True, timeout=2)

    if item is not None:
        itemstr = item.decode("utf=8")
        print("Working on " + itemstr)
        time.sleep(10)  # Put your actual work here instead of sleep.
        q.complete(item)
    else:
        print("Waiting for work")

print("Queue empty, exiting")
Beispiel #28
0
#!/usr/bin/env python

import time
import os, sys
import rediswq

host = os.getenv("REDIS_SERVICE_NAME", "redis")
# Uncomment next two lines if you do not have Kube-DNS working.
# import os
# host = os.getenv("REDIS_SERVICE_HOST")
job_number = int(os.getenv("INITIAL_JOB_NUMBER", "1"))

q = rediswq.RedisWQ(name=os.getenv("SUBMIT_QUEUE_NAME", default="cleanup"),
                    host=host)
print("Worker with sessionID: " + q.sessionID())
print("Initial queue state: empty=" + str(q.empty()))
while True:
    item = q.lease(lease_secs=300, block=True, timeout=2)
    if item is not None:
        itemstr = item.decode("utf-8")
        print("Working on " + itemstr)
        time.sleep(10)  # Put your actual work here instead of sleep.
        print(os.popen('python /handle_request.py {} > last_job.txt'.format(
            itemstr)).read(),
              file=sys.stderr)
        job_number = job_number + 1
        q.complete(item)
    else:
        print("Waiting for work")
    while q.empty():
        # Sleep while we wait for work.
Beispiel #29
0
#!/usr/bin/env python

import time
import rediswq

# host="redis"
# Uncomment next two lines if you do not have Kube-DNS working.
import os
host = os.getenv("REDIS_SERVICE_HOST")

q = rediswq.RedisWQ(name="job2", host=host)
print("Worker with sessionID: " + q.sessionID())
print("Initial queue state: empty=" + str(q.empty()))
while not q.empty():
    item = q.lease(lease_secs=10, block=True, timeout=2)
    if item is not None:
        itemstr = item.decode("utf-8")
        print("Working on " + itemstr)
        time.sleep(10)  # Put your actual work here instead of sleep.
        q.complete(item)
    else:
        print("Waiting for work")
print("Queue empty, exiting")
Beispiel #30
0
#!/usr/bin/env python

import rediswq
import subprocess
import sys
import time

host = "redis"
# Uncomment next two lines if you do not have Kube-DNS working.
# import os
# host = os.getenv("REDIS_SERVICE_HOST")

q = rediswq.RedisWQ(name="antfiles", host="redis")
print("Worker with sessionID: " + q.sessionID())
print("Initial queue state: empty=" + str(q.empty()))
print('', flush=True)

while not q.empty():
    item = q.lease(lease_secs=120, block=True, timeout=2)
    if item is not None:
        filename = item.decode("utf-8")
        print()
        print("Ant data found")
        print("Working on " + filename)
        print('', flush=True)
        p = subprocess.call(['/usr/local/bin/antscrewcounter', filename],
                            shell=False,
                            stdout=sys.stdout,
                            stderr=sys.stderr)
        print('', flush=True)
        print("Completed " + filename)