コード例 #1
0
def prepare_celery():
    """Runs the djcelery loader which is required to set the correct backend/scheduler
    Then adds some signal callbacks to ensure all tasks are registered correctly.
    """
    djcelery.setup_loader()
    beat_init.connect(beat_update_schedule)
    worker_init.connect(celeryd_discover_tasks)
コード例 #2
0
from base_task import TalentTask, _workerState
from celery.signals import worker_init
from genome_scoring import GENOME_FILE, TREE_FILE, BOUNDARY_FILE
from sstree import PySSTree
from blist import sortedlist
import pickle

def _initializeWorker(**kwargs):
    
    print '_initializeWorker called'
    
    organism = "drosophila_melanogaster"
    
    with open(GENOME_FILE % organism, 'r') as f:
        text = f.read()

    _workerState["sTree"] = PySSTree(text, "load", TREE_FILE % organism)
    
    with open(BOUNDARY_FILE % organism, "r") as f:
        _workerState["geneBoundaries"] = sortedlist(pickle.load(f), key=lambda x: x['pos'])
    
worker_init.connect(_initializeWorker)
コード例 #3
0
ファイル: mausworker.py プロジェクト: durgarajaram/maus
    MausConfiguration from the Celery master process, the sub-process
    will always inherit the latest version of MausConfiguration from
    the master process. 
    @param kwargs Arguments - unused.
    """
    configuration = Configuration()
    config_doc = configuration.getConfigJSON()
    config_dictionary = json.loads(config_doc)
    MausConfiguration.version = config_dictionary["maus_version"]
    logger = logging.getLogger(__name__)
    if logger.isEnabledFor(logging.INFO):
        logger.info("MAUS version: %s" % MausConfiguration.version)


# Bind the callback method to the Celery worker_init signal.
worker_init.connect(worker_init_callback)


def sub_process_broadcast(panel, func, arguments, errors):
    """
    Invoke a function on every sub-process via asynchronous requests.   
    This is based on a suggestion in 
    http://groups.google.com/group/celery-developers/browse_thread/
    thread/191389ab86185cce
    which repeatedly sends the same request to all the sub-processes
    until all confirm that they have executed the request. This is
    done via their returning of a status message and their sub-process
    ID. It is not the most efficient but it is the only way to ensure
    that all sub-processes are updated at the same time and remain
    consistent with both each other and the main process.
    @param panel Celery panel object.
コード例 #4
0
ファイル: smartfp_tasks.py プロジェクト: mwang87/SMART_NMR
    
    headers = {"content-type": "application/json"}
    json_response = requests.post(fp_pred_url, data=payload, headers=headers)

    fingerprint_prediction = np.asarray(json.loads(json_response.text)['predictions'])
    fingerprint_prediction_nonzero = np.where(fingerprint_prediction.round()[0]==1)[0]

    # Tensorflow Serve fp mw query
    fp_pred_url = "http://smartfp-mw-tf-server:8501/v1/models/VGG16_high_aug_MW_continue:predict"
    payload = json.dumps({"instances": mat.reshape(1,200,240,1).tolist()})
    
    headers = {"content-type": "application/json"}
    json_response = requests.post(fp_pred_url, data=payload, headers=headers)

    pred_MW = json.loads(json_response.text)['predictions'][0][0]

    print("FINISHED PREDICTION", time.time() - start_time, file=sys.stderr)

    DB = shared_model_data["DB"]
    topK = SMART_FPinder.search_database(fingerprint_prediction, fingerprint_prediction_nonzero, pred_MW, DB, mw=mw, top_candidates=20)
    topK.to_csv(output_result_table, index=None)

    print("FINISHED DB SEARCH", time.time() - start_time, file=sys.stderr)

    open(output_result_fp_pred, "w").write(json.dumps(fingerprint_prediction.tolist()))

    return 0

# Load the database when the worker starts
worker_init.connect(worker_load_models)
コード例 #5
0
ファイル: celery_tasks.py プロジェクト: fajoy/typhoonae
         rate_limit=rate_limit,
         run=handle_task,
         ignore_result=True,
         send_error_emails=False,
         __module__=create_task_queue.__module__))


def create_task_queues_from_yaml(app_root=None):
    if app_root:
        queue_info = _ParseQueueYaml(app_root)
    else:
        queue_info = None
    tasks = {}
    if queue_info and queue_info.queue:
        for entry in queue_info.queue:
            tasks[entry.name] = create_task_queue(
                entry.name, queueinfo.ParseRate(entry.rate),
                entry.bucket_size)
    else:
        tasks['default'] = create_task_queue('default', 5)
    return tasks


def load_queue_config(signal, sender=None, **kwargs):
    if 'APP_ROOT' not in os.environ:
        logging.error("application root unknown")
    create_task_queues_from_yaml(os.environ.get('APP_ROOT'))


worker_init.connect(load_queue_config)
コード例 #6
0
class ExpensiveObject():
    def __init__(self):
        self.id_ = ''.join(
            random.choice(string.ascii_lowercase) for _ in range(6))

        print '>> initialising ExpensiveObject ' + self.id_
        time.sleep(10)
        print '<< initialised ExpensiveObject ' + self.id_

    def task(self, word):
        return '{} (from {})'.format(word.upper(), self.id_)


def _load_expensive_object(**kwargs):
    if not _expensive_objects:
        _expensive_objects['loaded'] = ExpensiveObject()


worker_init.connect(_load_expensive_object)


@shared_task
def expensive_task(word):
    logging.info('>> requesting `expensive_task` with ' + word)

    if 'loaded' not in _expensive_objects:
        raise Exception('Expensive object not yet loaded')

    expensive_object = _expensive_objects['loaded']
    return expensive_object.task(word)