예제 #1
0
파일: worker.py 프로젝트: nott/celery
    def rdb_handler(*args):
        """Signal handler setting a rdb breakpoint at the current frame."""
        with in_sighandler():
            _, frame = args
            from celery.contrib import rdb

            rdb.set_trace(frame)
예제 #2
0
파일: utils.py 프로젝트: sivang/laten-fw
def remote_debug():
    from laten.configregistry import Configuration
    c = Configuration()
    if not c.remote_debug:
        return
    from celery.contrib import rdb
    rdb.set_trace()
예제 #3
0
 def rdb_handler(*args):
     """Signal handler setting a rdb breakpoint at the current frame."""
     with in_sighandler():
         from celery.contrib.rdb import set_trace, _frame
         # gevent does not pass standard signal handler args
         frame = args[1] if args else _frame().f_back
         set_trace(frame)
예제 #4
0
파일: worker.py 프로젝트: unixomg/celery
 def rdb_handler(signum, frame):
     """Signal handler setting a rdb breakpoint at the current frame."""
     set_in_sighandler(True)
     try:
         from celery.contrib import rdb
         rdb.set_trace(frame)
     finally:
         set_in_sighandler(False)
예제 #5
0
파일: worker.py 프로젝트: rwillmer/celery
 def rdb_handler(signum, frame):
     """Signal handler setting a rdb breakpoint at the current frame."""
     set_in_sighandler(True)
     try:
         from celery.contrib import rdb
         rdb.set_trace(frame)
     finally:
         set_in_sighandler(False)
예제 #6
0
def taskMagagerGlobalTasks(type=1,id_model=2,**kwargs):
    x = 1+2 
    
    rdb.set_trace()  # <- set breakpoint
    
    print 'testetetststst'
    
    return x
    
예제 #7
0
def remote_task_debugger():
    """
    http://docs.celeryproject.org/en/latest/userguide/debugging.html
    http://docs.celeryproject.org/en/latest/reference/celery.contrib.rdb.html
    """
    from celery.contrib import rdb
    rdb.set_trace()

    return 42
예제 #8
0
 def _onError(function, path, excinfo):
     #TODO
     from celery.contrib import rdb
     rdb.set_trace()
     messages.append({
         'function': function,
         'path': path,
         'excinfo': excinfo
     })
예제 #9
0
def polling_until_reported(task_id):
    # polling
    done = False
    url = current_app.config['CUCKOO_URL'] + '/tasks/view/'
    rdb.set_trace()
    while (not done):
        time.sleep(1)
        res = query_task(task_id, url)
        if res == "reported":
            done = True
    # get report
    url = current_app.config['CUCKOO_URL'] + '/tasks/report/'
    res = get_report(task_id, url)

    resolve(res)
예제 #10
0
def send_email(data={}):
    from celery.contrib import rdb
    rdb.set_trace()
    if data.get('email', None):
        subject = data['sub']
        body = data['body']
        html = get_template(data['html']).render(Context(data))
        sender = '*****@*****.**'
        recipient = data['email']
        email = EmailMultiAlternatives(subject,
                                       body,
                                       from_email=sender,
                                       to=[recipient])
        email.attach_alternative(html, "text/html")
        email.send()
        logger.info("Email Sent")
예제 #11
0
파일: tasks.py 프로젝트: eugenewere/mpesa
def process_b2c_call_response_task(response, id):
    """
    process the request sent back from b2c request
    :param response:
    :param id:
    :return:
    """
    data = response
    B2CRequest.objects.filter(pk=id).update(
        request_id=data.get('requestId', ''),
        error_code=data.get('errorCode', ''),
        error_message=data.get('errorMessage', ''),
        conversation_id=data.get('ConversationID', ''),
        originator_conversation_id=data.get('OriginatorConversationID', ''),
        response_code=data.get('ResponseCode', ''),
        response_description=data.get('ResponseDescription', ''))
    rdb.set_trace()
예제 #12
0
 def execute(self):
     filter_kwargs = {'is_active': True}
     for k, v in self.lookup.items():
         filter_kwargs.update({'data__%s' % k: v})
     try:
         notification = NANotifications.objects.get(**filter_kwargs)
         notification.data.update(self.data)
         from celery.contrib import rdb
         rdb.set_trace()
         notification.save()
         print(nofification.query)
     except NANotifications.MultipleObjectsReturned:
         notification = NANotifications.objects.filter(**filter_kwargs)
         for notif in notification:
             notif.data.update(self.data)
             notif.save()
     except NANotifications.DoesNotExist:
         pass
예제 #13
0
 def rdb_handler(signum, frame):
     """Signal handler setting a rdb breakpoint at the current frame."""
     from celery.contrib import rdb
     rdb.set_trace(frame)
예제 #14
0
def results(constraints):
    global z3

    with open('./foo.p', 'wb') as f:
        pickle.dump(constraints, f)

    if len(constraints) == 0:
        return Result(True, {})

    before = time.time()

    model = {}
    parts = split_and_canonicalize(constraints)
    all_uuids = sorted(uu for uuids in parts for uu in uuids)

    print("before mongo stuff took %f" % (time.time() - before))

    if mongo['results'].find({'uuids': mongo_superset(all_uuids), 'sat': False}).limit(1).count(with_limit_and_skip=True) > 0:
        # this means a constraint set consisting of a subset of the
        # uuids is unsat, meaning all the uuids together must also be
        # unsat
        print("found unsat $in")
        return Result(False, {})

    print("whole pre-solve took %f" % (time.time() - before))

    # for doc in mongo['results'].find({'$or': [{'uuids': uu} for uu in parts]}):
    #     l.info("lemma cache find for %r, sat is %s and model is %s", doc['uuids'], doc['sat'], doc['model'])
    #     # if doc['sat']:
    #     #     if doc['model'] is not None:
    #     #         mapping, _ = parts[tuple(doc['uuids'])]
    #     #         model.update(rename_model(mapping, doc['model'], transform=pickle.loads))
    #     #         del parts[tuple(doc['uuids'])]
    #     if not doc['sat']:
    #         return Result(False, {})

    s = z3.solver()

    for uuids, (mapping, exprs) in parts.items():
        if all(e.is_true() for e in exprs):
            continue

        result = z3.results(s, extra_constraints=exprs, generic_model=True)
        doc = {
            'uuids': uuids,
            'sat': result.sat,
            'model': None,
        }
        if result.sat:
            doc['model'] = {name: bson.binary.Binary(pickle.dumps(val, protocol=pickle.HIGHEST_PROTOCOL))
                            for (name, val) in result.model.items()}
            try:
                model.update(rename_model(mapping, result.model))
            except KeyError:
                rdb.set_trace()

        mongo['results'].replace_one({'uuids': uuids}, doc, upsert=True)

        if not result.sat:
            return Result(False, {})

    return Result(True, model)
예제 #15
0
 def test_set_trace(self, _frame, debugger):
     self.assertTrue(set_trace(Mock()))
     self.assertTrue(set_trace())
     self.assertTrue(debugger.return_value.set_trace.called)
예제 #16
0
파일: tasks.py 프로젝트: dsmurrell/smmpdb
def predict_NCI60(molecule_file_path, email_address):

    print 'in function'
    #print sys.path
    rdb.set_trace()
    #return

    #################################################################################################
    # 1. Load molecules
    #################################################################################################
    print 'blah'
    import repo.bioalerts as bioalerts
    print 'imported bioalerts'
    import os
    import numpy as np
    import sklearn
    from sklearn.ensemble import RandomForestRegressor

    try:
        print "Reading input file.\n"
        molecules = bioalerts.LoadMolecules(molecule_file_path, verbose=False)
        molecules.ReadMolecules()
        print "Total number of input molecules correctly processed: ", len(molecules.mols)
    except:
        print "ERROR: The input molecules could not be processed.\n The extension of the input file might not be supported\n"
        mail = EmailMessage('NCI60 Sensitivity Predictions',
        """Dear User,

        The requested cell line sensitivity predictions on the NCI60 panel could
        not be calculated.

        It is likely that (i) the input file was corrupted or (ii) the format of the input molecules not supported.

        Kind regards
        Cancer Cell Line Profiler team""", 'CancerCellLineProfiler', [email_address])
        mail.send()
    # Check whether the file is huge..
    if (os.path.getsize(molecule_file_path) >> 20) > 1:
        mail = EmailMessage('NCI60 Sensitivity Predictions',
        """Dear User,

        The requested cell line sensitivity predictions on the NCI60 panel could
        not be calculated because the size of the file was higher than 1Mb (maximum input file size supported).

        Kind regards
        Cancer Cell Line Profiler team""", 'CancerCellLineProfiler', [email_address])
        mail.send()

    if len(molecules.mols) == 0:
        print "ERROR: None of the input molecules was processed successfully\n"
        mail = EmailMessage('NCI60 Sensitivity Predictions',
                            """Dear User,

                            The requested cell line sensitivity predictions on the NCI60 panel could
                            not be calculated, because the input file was empty or none of the input molecules
                            was processed correctly.

                            Kind regards
                            Cancer Cell Line Profiler team""", 'CancerCellLineProfiler', [email_address])
        mail.send()
        raise
    #################################################################################################
    # 2. Calculate Morgan fps for the input molecules
    #################################################################################################
    print "Calculating Morgan fingerprints for the input molecules\n"
    mols_info = bioalerts.GetDataSetInfo()
    #mols_info.extract_substructure_information(radii=[0,1,2],mols=molecules.mols)
    fps_input_molecules = bioalerts.CalculateFPs(mols=molecules.mols,radii=[0,1,2])
    fps_input_molecules.calculate_hashed_fps(nBits=256)
    #hashed_binary = fps_input_molecules.fps_hashed_binary
    hashed_counts = fps_input_molecules.fps_hashed_counts
    mean_fps = np.load("./NCI60/server_model/mean_fps_server_NCI60.npy")
    std_fps = np.load("NCI60/server_model/std_fps_server_NCI60.npy")
    hashed_counts = (hashed_counts - mean_fps) / std_fps


    #################################################################################################
    # 3. load cell line descriptors (pathways 1000)
    #################################################################################################
    nb_input_mols = len(molecules.mols)
    cell_descs = np.genfromtxt('./NCI60/pathway_descriptors_most_var.csv',delimiter=",",skiprows=1)
    cell_names = np.genfromtxt('./NCI60/pathway_descriptors_most_var_CELL_NAMES.csv',skiprows=0,dtype="|S40")
    mean_cell_descs = np.mean(cell_descs,axis=0)
    std_cell_descs = np.std(cell_descs,axis=0)
    cell_descs = (cell_descs-mean_cell_descs) / std_cell_descs
    #cell_descs = np.repeat(cell_descs,molecules.mols,axis=0)
    # tile and repeat the cell line and compound descriptors
    hashed_counts = np.tile(hashed_counts,(59,1))
    input_mols_names = np.tile(molecules.mols_ids,(59,1))
    cell_descs = np.repeat(cell_descs,nb_input_mols,axis=0)
    cell_names = np.repeat(cell_names,nb_input_mols,axis=0)

    X = np.hstack((hashed_counts,cell_descs))

    #################################################################################################
    # 4. Load point prediction and error models
    #################################################################################################
    from sklearn.externals import joblib
    point_prediction_model = joblib.load('./NCI60/server_model/point_prediction_model_NCI60.pkl')
    error_prediction_model = joblib.load('./NCI60/server_model/error_prediction_model_NCI60.pkl')

    #################################################################################################
    # 5. Predict the activities
    #################################################################################################
    point_predictions = point_prediction_model.predict(X)
    error_prediction = error_prediction_model.predict(X)

    #################################################################################################
    # 6. Calculate the confidence intervals (70, 80, 90%)
    #################################################################################################
    alphas = np.load("./NCI60/server_model/alphas_NCI60.npy")
    alpha_70 = alphas[np.round(len(alphas)*0.7,decimals=0)]
    alpha_80 = alphas[np.round(len(alphas)*0.8,decimals=0)]
    alpha_90 = alphas[np.round(len(alphas)*0.9,decimals=0)]

    confi_70 = error_prediction * alpha_70
    confi_80 = error_prediction * alpha_80
    confi_90 = error_prediction * alpha_90

    #################################################################################################
    # 7. Write predictions to .csv
    #################################################################################################
    fich = open("./NCI60/predictions_NCI60.csv","w")
    fich.write("Cell_line\tCompound_ID\tPredicted_pGI50\tCI_70\tCI_80\tCI_90\n" %())
    for i in range(0,len(input_mols_names)):
        fich.write("%s\t%s\t%f\t%f\t%f\t%f\n" %(cell_names[i],input_mols_names[i][0],point_predictions[i],confi_70[i],confi_80[i],confi_90[i]))

    fich.close()

    #################################################################################################
    # 8. Generate plot with R of the barplot for the NCI60
    #################################################################################################
    conn = pyRserve.connect()
    logger.debug(conn.eval('source("barplot_NCI60.R")'))

    mail = EmailMessage('NCI60 Sensitivity Predictions',
                        """Dear User,

                        Thank you for using our service.
                        Here are the (i) predicted pGI50 values, and
                        (ii) the 70, 80 and 90% confidence intervals calculated with conformal prediction
                        for your input molecules.

                        In addition, you will find a pdf displaying the bioactivity profile of each input molecule across the NCI60 panel.

                        Kind regards
                        Cancer Cell Line Profiler team""", 'CancerCellLineProfiler', [email_address])
    mail.attach_file('./NCI60/predictions_NCI60.csv')
    mail.attach_file('./NCI60/predicted_profiles_NCI60.pdf')
    mail.send()

    #################################################################################################
    # 9. Remove generated files
    #################################################################################################
    import os, os.path
    if os.path.exists('./NCI60/predictions_NCI60.csv'):
        os.remove('./NCI60/predictions_NCI60.csv')
예제 #17
0
파일: test_rdb.py 프로젝트: yinlinzh/celery
 def test_set_trace(self, _frame, debugger):
     assert set_trace(Mock())
     assert set_trace()
     debugger.return_value.set_trace.assert_called()
예제 #18
0
파일: worker.py 프로젝트: kornholi/celery
 def rdb_handler(signum, frame):
     """Signal handler setting a rdb breakpoint at the current frame."""
     from celery.contrib import rdb
     rdb.set_trace(frame)
예제 #19
0
파일: tasks.py 프로젝트: vinayshenoy/Celery
def hello():
    sleep(random.randint(1, 10))
    rdb.set_trace()
    return "hello world!"
예제 #20
0
def sub(x, y):
    result = x - y
    rdb.set_trace()  # 设置断点
    return result
예제 #21
0
 def rdb_handler(*args):
     """Signal handler setting a rdb breakpoint at the current frame."""
     with in_sighandler():
         _, frame = args
         from celery.contrib import rdb
         rdb.set_trace(frame)
예제 #22
0
파일: test_rdb.py 프로젝트: bryson/celery
 def test_set_trace(self, _frame, debugger):
     assert set_trace(Mock())
     assert set_trace()
     debugger.return_value.set_trace.assert_called()
예제 #23
0
 def test_set_trace(self, _frame, debugger):
     self.assertTrue(set_trace(Mock()))
     self.assertTrue(set_trace())
     self.assertTrue(debugger.return_value.set_trace.called)
예제 #24
0
def add(x, y):
    result = x + y
    rdb.set_trace()
    return result
예제 #25
0
def optimize_rosters_task(formData):
    '''
    Generates rosters based on a given date
    '''
    lock = redis.lock(T_OPTIMIZE, timeout=int(THROTTLE))
    have_lock = lock.acquire(blocking=False)
    if not have_lock:
        LOG.warning('{} lock currently active.'.format(T_CREATE_MODEL))
        resObj, err = celery_result_schema.load(
            dict(
                name='optimize_rosters_task',
                data=None,
                status='locked',
                msg='',
                currentProgress=0,
                totalProgress=1,
            ))
        return resObj

    rdb.set_trace()
    formData, errors = optimize_task_schema.load(formData)
    if errors:
        resObj, err = celery_result_schema.load(
            dict(
                name='optimize_rosters_task',
                data=formData,
                status='fail',
                msg=errors,
                currentProgress=0,
                totalProgress=1,
            ))
        return resObj

    #modelData = formData.get('model')
    try:
        model = Model.query \
                .filter(Model.predictor_name == formData['predictor_name'])\
                .filter(Model.hypers == formData['hypers'])\
                .filter(Model.data_transforms == formData['data_transforms'])\
                .filter(Model.data_cols == formData['data_cols'])\
                .one()
        LOG.info('A model with these features exists. Use nickname {}'.format(
            model.nickname))
        resObj, err = celery_result_schema.load(
            dict(
                name='create_model_task',
                data=model.nickname,
                status='fail',
                msg='Model already exists. Check nickname',
                currentProgress=0,
                totalProgress=1,
            ))
        return resObj

    except NoResultFound:
        LOG.info('No model found, creating model.')
        formData.pop('hypers_dict')
        formData.pop('data_cols_dict')
        model = Model(**formData)
        db.session.add(model)
        db.session.commit()
        resObj, err = celery_result_schema.load(
            dict(
                name='create_model_task',
                data=None,
                status='success',
                msg='Create model',
                currentProgress=0,
                totalProgress=1,
            ))
        return resObj

    except MultipleResultsFound:
        resObj, err = celery_result_schema.load(
            dict(
                name='create_model_task',
                data=None,
                status='fail',
                msg='Found duplicate Models in the database.',
                currentProgress=0,
                totalProgress=1,
            ))
        return resObj
예제 #26
0
def mul(x, y):
    sleep(1)  # Simulate work
    log.info('multiplying {} to {}'.format(x, y))
    rdb.set_trace()
    return x * y