Ejemplo n.º 1
0
    def __getstate__(self):
        """
            For pickling the report when passing it to Celery.
        """
        logging = get_task_logger(__name__) # logging is likely to happen within celery.
        # pickle only what the report needs from the request object

        request = dict(
            GET=self.request.GET,
            META=dict(
                QUERY_STRING=self.request.META.get('QUERY_STRING'),
                PATH_INFO=self.request.META.get('PATH_INFO')
            ),
            datespan=self.request.datespan,
            couch_user=None
        )

        try:
            request.update(couch_user=self.request.couch_user.get_id)
        except Exception as e:
            logging.error("Could not pickle the couch_user id from the request object for report %s. Error: %s" %
                          (self.name, e))
        return dict(
            request=request,
            request_params=self.request_params,
            domain=self.domain,
            context={}
        )
Ejemplo n.º 2
0
    def run(self, plugin_cls_name, settings, logger=None):
        """
        Just load the right plugin class, and then 
        execute the check.

        Return tuple of (RESULT, MSG).
        MSG is empty for UP states.
        """

        plugin = get_cls_by_name(plugin_cls_name)()
        plugin.set_logger(logger or get_task_logger('django'))

        msg = None
        try:
            msg = plugin.run_check(settings)
        except PluginImplementationError as e:
            return (Service.STATE_UNKNOWN, e.message)
        except PluginConfigurationError as e:
            return (Service.STATE_UNKNOWN, "Plugin {} is misconfigured: {}".format(
                plugin_cls_name, e.message))
        except ServiceCheckFailed as e:
            return (Service.STATE_UNKNOWN, "Service check failed: " + e.reason)
        except ServiceIsDown as e:
            return (Service.STATE_DOWN, e.message)
        except ServiceHasWarning as e:
            return (Service.STATE_WARNING, e.message)
        except Exception as e:
            return (Service.STATE_UNKNOWN, 'Unknown exception: {}: {}'.format(
                e.__class__, e))   

        return (Service.STATE_OK, msg or '')
Ejemplo n.º 3
0
    def __setstate__(self, state):
        """
            For unpickling a pickled report.
        """
        logging = get_task_logger(__name__) # logging lis likely to happen within celery.
        self.domain = state.get('domain')
        self.context = state.get('context', {})

        class FakeHttpRequest(object):
            GET = {}
            META = {}
            couch_user = None
            datespan = None

        request_data = state.get('request')
        request = FakeHttpRequest()
        request.GET = request_data.get('GET', {})
        request.META = request_data.get('META', {})
        request.datespan = request_data.get('datespan')

        try:
            couch_user = CouchUser.get(request_data.get('couch_user'))
            request.couch_user = couch_user
        except Exception as e:
            logging.error("Could not unpickle couch_user from request for report %s. Error: %s" %
                            (self.name, e))
        self.request = request
        self._caching = True
        self.request_params = state.get('request_params')
        self._update_initial_context()
Ejemplo n.º 4
0
def fetch_classes(page, object_, identity):
    logger = get_task_logger(__name__ + '.fetch_classes')
    session = get_session()
    res = select_by_class(
        s=identity,
        s_name='name',
        entities=object_.PROPERTIES,
        page=page,
        p=object_.TYPE_PREDICATES,
    )

    current_time = datetime.datetime.now(datetime.timezone.utc)
    logger.warning('fetching %s, %d', identity, len(res))
    for item in res:
        try:
            with session.begin():
                new_entity = object_.initialize(item)
                new_entity.last_crawled = current_time
                new_entity = session.merge(new_entity)
                session.add(new_entity)
        except IntegrityError:
            entities = session.query(object_) \
                .filter_by(
                    name=item['name']
                )
            if entities.count() > 0:
                entity = entities.one()
                entity.last_crawled = current_time
                entity.initalize(item)
Ejemplo n.º 5
0
def delete_order_from_users_google_calendar(order_id, target_users=None):
    from orders_manager.models import User, Order
    from orders_manager.google_apis import GoogleApiHandler

    logger = get_task_logger(__name__)
    google_api_handler = GoogleApiHandler()
    order = Order.objects.get(id=order_id)

    if target_users is None:
        target_users = set([i.user_id for i in order.program_executors.all()])
        target_users.update(
            [i.executor.user_id for i in
             order.additional_services_executors.all()]
        )

    results = {}

    try:
        for user_id in target_users:
            user = User.objects.get(id=user_id)
            res = google_api_handler.delete_event_from_user_calendar(
                user, order.hex_id())
            results.update({user.get_full_name(): res})
            send_order_notice_to_email(order, user, action_type='delete')
    except Exception as ex:
        logger.error(ex.args[0])
        return ex.args[0]

    return results
Ejemplo n.º 6
0
 def __init__(self, ps_client, es_client, array_context):
     self._ps_client = ps_client
     self._es_client = es_client
     self._array_name = array_context.name
     self._array_id = array_context.id
     self._data_ttl = array_context.data_ttl
     self.logger = get_task_logger(__name__)
Ejemplo n.º 7
0
def fetch_link(url, session, *, log_prefix=''):
    '''Returns result, tree, namespace, name, final_url.'''
    logger = get_task_logger(__name__ + '.fetch_link')
    if not is_wiki_page(url):
        return False, None, None, None, url
    r = requests.get(url)
    try:
        final_url = r.url[:r.url.index('?')]
    except ValueError:
        final_url = r.url
    if not is_wiki_page(final_url):
        return False, None, None, None, final_url
    tree = document_fromstring(r.text)
    try:
        namespace = tree.xpath('//div[@class="pagetitle"]')[0] \
            .text.strip()[:-1]
    except (AttributeError, AssertionError, IndexError):
        logger.warning('%sWarning on url %s: '
                       'There is no pagetitle on this page. Ignoring.',
                       log_prefix, url)
        return False, tree, None, None, final_url
    if namespace == '':
        namespace = 'Main'
    name = tree.xpath('//div[@class="pagetitle"]/span')[0].text.strip()

    type = determine_type(namespace)
    if type == 'Administrivia':
        return False, tree, namespace, name, final_url
    upsert_entity(session, namespace, name, type, final_url)
    process_redirections(session, url, final_url, namespace, name)
    return True, tree, namespace, name, final_url
Ejemplo n.º 8
0
def publish_ready_projects():
    ready_projects_to_publish = api.models.Project.objects.filter(
        publish_mode=api.models.Project.PUBLISH_MODE_READY,
        min_publish_date__lte=utc_now(),
    )
    published_projects_ids = []
    for project in ready_projects_to_publish:
        old_publish_mode = project.publish_mode
        project.publish_mode = api.models.Project.PUBLISH_MODE_PUBLISHED
        project.save()
        published_projects_ids.append(project.id)
        project.notify_owner(
            'project_publish_mode_change_by_target',
            {
                'target': None,
                'description': 'Project "%s" published because publication date has arrived - %s.' %(project.title, project.min_publish_date.strftime('%Y-%m-%d %H:%M')),
                'publishMode': project.publish_mode,
                'oldPublishMode': old_publish_mode,
                'publishDate': project.publish_date.strftime('%Y-%m-%d %H:%M'),
            },
            send_mail_with_template='IGNITE_notification_publish_mode_change',
        )
    if published_projects_ids:
        logger = get_task_logger('publish_ready_projects')
        logger.info('Published %i projects that were ready: %s.', len(published_projects_ids), ', '.join([str(x) for x in published_projects_ids]))
Ejemplo n.º 9
0
def archive(job_pk):
    """Starts a celery.chord that runs stat_addon for each
    complete addon attached to the Node, then runs
    #archive_node with the result

    :param job_pk: primary key of ArchiveJob
    :return: None
    """
    create_app_context()
    job = ArchiveJob.load(job_pk)
    src, dst, user = job.info()
    logger = get_task_logger(__name__)
    logger.info("Received archive task for Node: {0} into Node: {1}".format(src._id, dst._id))
    return celery.chain(
        [
            celery.group(
                stat_addon.si(
                    addon_short_name=target.name,
                    job_pk=job_pk,
                )
                for target in job.target_addons
            ),
            archive_node.s(
                job_pk=job_pk
            )
        ]
    )
Ejemplo n.º 10
0
def getdiskusage(directory):
    # Try an all-python solution here - in case the suprocess spawning is causing grief.  We could be opening
    # hundreds of instances of shells above.
    logger = get_task_logger('iondb.rundb.tasks.getdiskusage')
    def dir_size (start):
        if not start or not os.path.exists(start):
            return 0

        file_walker = (
            os.path.join(root, f)
            for root, _, files in os.walk( start )
            for f in files
        )
        total = 0L
        for f in file_walker:
            if os.path.isdir(f):
                total += dir_size(f)
                continue
            if not os.path.isfile(f):
                continue
            try:
                total += os.lstat(f).st_size
            except OSError:
                logger.exception("Cannot stat %s during calc_size", f)
        return total
    # Returns size in MB
    return dir_size(directory)/(1024*1024)
Ejemplo n.º 11
0
    def close_spider(self, spider):
        """Post results to BROKER API."""
        from celery.utils.log import get_task_logger
        logger = get_task_logger(__name__)
        if 'SCRAPY_JOB' not in os.environ:
            self._cleanup(spider)
            return

        if hasattr(spider, 'tmp_dir'):
            shutil.rmtree(path=spider.tmp_dir, ignore_errors=True)

        errors = getattr(spider, 'state', {}).get('errors', [])

        if self.count > 0 or errors:
            task_endpoint = spider.settings[
                'API_PIPELINE_TASK_ENDPOINT_MAPPING'
            ].get(
                spider.name,
                spider.settings['API_PIPELINE_TASK_ENDPOINT_DEFAULT'],
            )
            logger.info('Triggering celery task: %s.', task_endpoint)

            kwargs = self._prepare_payload(spider)
            logger.debug(
                '    Sending results:\n    %s',
                pprint.pformat(kwargs),
            )

            res = self.celery.send_task(task_endpoint, kwargs=kwargs)
            logger.info('Sent celery task %s', res)

        self._cleanup(spider)
Ejemplo n.º 12
0
def create_celery_app(flask_app=None):
    logger.debug("Creating celery app")
    if not flask_app:
        if has_app_context():
            logger.debug("Using current flask app")
            app = current_app
        else:
            logger.debug("No current flask app")
            app = create_app()
    else:
        app = flask_app
    celery = Celery(app.import_name,
                    broker=app.config['CELERY_BROKER_URL'],
                    backend=app.config['CELERY_RESULT_BACKEND'])
    celery.conf.update(app.config)
    TaskBase = celery.Task
    class ContextTask(TaskBase):
        abstract = True
        def __call__(self, *args, **kwargs):
            with app.app_context():
                return TaskBase.__call__(self, *args, **kwargs)
    celery.Task = ContextTask
    celery.logger = get_task_logger(__name__)
    app.celery = celery
    return app
Ejemplo n.º 13
0
def send_order_to_users_google_calendar(
        order_id, send_email=True, is_full_description=None,
        is_new_order=False):
    from orders_manager.models import Order, UserProfile
    from orders_manager.google_apis import GoogleApiHandler

    logger = get_task_logger(__name__)

    google_api_handler = GoogleApiHandler()
    order = Order.objects.get(id=order_id)

    if is_full_description is None:
        dt = datetime.datetime.strptime(
            '%s %s' % (order.celebrate_date, '00:00:00'), '%Y-%m-%d %H:%M:%S')
        today = datetime.datetime.now().replace(hour=0, minute=0, second=0,
                                                microsecond=0)
        dt_lim = today + datetime.timedelta(2)
        is_full_description = 0 < (dt_lim - dt).days <= 2

    date_str = '{0} {1}'.format(order.celebrate_date, order.celebrate_time)
    event_start = datetime.datetime.strptime(date_str, '%Y-%m-%d %H:%M:%S')
    event_duration = int(order.duration)
    event_end = event_start + datetime.timedelta(0, event_duration * 60)
    event_start = event_start.isoformat()
    event_end = event_end.isoformat()

    logger.debug('Sending order \'%s\' start \'%s\'' % (order.program.title,
                                                        event_start))

    executor_to_event_title = {}

    description = _get_order_description(order, is_full_description)

    for program_executor in order.program_executors.all():
        executor_to_event_title[program_executor.user_id] = order.program.title

    for service_to_executors in order.additional_services_executors.all():
        if executor_to_event_title.get(service_to_executors.executor.user_id):
            executor_to_event_title[service_to_executors.executor.user_id] += \
                ' + %s' % service_to_executors.additional_service.title

        else:
            executor_to_event_title[service_to_executors.executor.user_id] = \
                service_to_executors.additional_service.title

    for user_id, title in executor_to_event_title.items():
        executor = UserProfile.objects.get(user_id=user_id)
        try:
            summary = _get_order_summary(order, is_full_description)
            summary = summary.format(title=title)
            google_api_handler.send_event_to_user_calendar(
                executor, order.hex_id(), event_start, event_end, summary,
                description)
            if send_email is True:
                send_order_notice_to_email(order, executor,
                                           'create' if is_new_order else 'update')
        except Exception as ex:
            logger.error(ex.args[0])

    return '{0} was updated.'.format(order.program.title)
Ejemplo n.º 14
0
 def _apply_matrix(self, s, m):
     """Apply the matrix."""
     log = get_task_logger(__name__)
     s.shape = (1024, -1)
     coeffs = m.dot(s)
     coeffs = coeffs.view(np.ndarray)
     return coeffs
Ejemplo n.º 15
0
def select_dbpedia(query):
    logger = get_task_logger(__name__ + '.select_dbpedia')
    sparql = SPARQLWrapper("http://dbpedia.org/sparql")
    sparql.setReturnFormat(JSON)
    sparql.setQuery('''PREFIX dbpedia-owl: <http://dbpedia.org/ontology/>
    PREFIX dbpprop: <http://dbpedia.org/property/>'''+query)
    tried = 0
    wikipedia_limit = get_wikipedia_limit()
    while tried < wikipedia_limit:
        try:
            tried = tried + 1
            tuples = sparql.query().convert()['results']['bindings']
        except HTTPError as e:
            logger.exception('HTTPError %s: %s, tried %d/%d',
                             e.code, e.reason, tried, wikipedia_limit)
        except URLError as e:
            logger.exception('URLError %s, tried %d/%d',
                             e.args, tried, wikipedia_limit)
        except ConnectionResetError as e:
            logger.exception('ConnectionResetError %s', e)
        except IncompleteRead as e:
            logger.exception('Network Error, retry %d', tried)
        except EndPointNotFound as e:
            logger.exception('SQLAlchemy Error, retry %d', tried)
        else:
            return[{k: v['value'] for k, v in tupl.items()} for tupl in tuples]
    return []
Ejemplo n.º 16
0
def dispatch_task(task, log_name, r):
    logger = get_task_logger(log_name)

    task_id = task[0]
    url_path = task[1]
    max_depth = task[2]
    allow_domains = task[3]
    urls = []
    
    #check the argument
    if task_id == None:
        logger.error('task_id is None.')
        return
    if url_path == None:
        logger.error('url_path is None.')
        return
    if max_depth == None:
        logger.error('max_depth is None.')
        return
    elif max_depth < 0:
        logger.debug('max depth less than ZERO, set it to ZERO.')
        max_depth = 0
    logger.info(task)
    logger.info(task[1])

    try:
        with open(url_path, 'r') as f:
            urls = f.readlines()
    except Exception, e:
        logger.error(e)
        return
Ejemplo n.º 17
0
Archivo: util.py Proyecto: Eyra-is/Eyra
def errLog(x):
    """
    Logs x to celery INFO. Used as a callback in sh piping to manually print
      otherwise swallowed error logs.
    """
    logger = get_task_logger(__name__)
    logger.info(x)
Ejemplo n.º 18
0
def save_link(name, url):
    with psycopg2.connect(worker.conf.DB_FILENAME) as conn, \
            conn.cursor() as cur:
        try:
            cur.execute('INSERT INTO entities VALUES (%s, %s, %s, NULL, NULL)',
                        (name[0], name[1], url))
        except psycopg2.IntegrityError:
            cur.execute('UPDATE entities SET url = %s '
                        'WHERE namespace = %s and name = %s',
                        (url, name[0], name[1]))
            conn.rollback()
        conn.commit()
        cur.execute('SELECT count(*) FROM entities')
        get_task_logger(__name__ + '.save_link').info(
            'Total %d',
            cur.fetchone()[0]
        )
Ejemplo n.º 19
0
def get_logger(name):
    try:
        from celery.utils.log import get_task_logger
        logger = get_task_logger(name)
    except ImportError:
        import logging
        logger = logging.getLogger(name)
    return logger
Ejemplo n.º 20
0
def facebook_update():
    log = get_task_logger('facebook')
    log.info('[Facebook] Start')
    fbapi = FacebookAPI(
            settings.SOCIAL_FACEBOOK_APP_ID,
            settings.SOCIAL_FACEBOOK_APP_SECRET)
    fb = FacebookUpdater(fbapi)
    fb.update()
    log.info('[Facebook] End')
Ejemplo n.º 21
0
def printDb(self, id):
  ''' Useful task for probing the boxm2 database of a worker '''
  import boxm2_batch
  from vsi.tools import Redirect, Logger
  from celery.utils.log import get_task_logger
  l = get_task_logger(__name__);

  with Redirect(all=Logger(l)):
    l.error(boxm2_batch.print_db())
Ejemplo n.º 22
0
 def logger(self):
     """
     Get the logger for this action.
     """
     logname = self.__class__.get_name()
     if self.executed_by_celery:
         return get_task_logger(logname)
     else:
         return logging.getLogger(logname)
Ejemplo n.º 23
0
def get_redis_logger(id_):
    loggy = get_task_logger(id_)
    loggy.setLevel(logging.DEBUG)
    loggy.propagate = False
    loggy.handlers = []
    loggy.addHandler(RedisHandler(id_))
    # for test purposes:
    # loggy.addHandler(logging.StreamHandler())
    return loggy
Ejemplo n.º 24
0
Archivo: task.py Proyecto: h3/jobtastic
    def run(self, *args, **kwargs):
        if get_task_logger:
            self.logger = get_task_logger(self.__class__.__name__)
        else:
            # Celery 2.X fallback
            self.logger = self.get_logger(**kwargs)
        self.logger.info("Starting %s", self.__class__.__name__)

        self.cache_key = self._get_cache_key(**kwargs)

        # Record start time to give estimated time remaining estimates
        self.start_time = time.time()

        # Keep track of progress updates for update_frequency tracking
        self._last_update_count = 0

        # Report to the backend that work has been started.
        if self.request.id:
            self.update_state(None, PROGRESS, {
                "progress_percent": 0,
                "time_remaining": -1,
            })

        memleak_threshold = int(getattr(self, 'memleak_threshold', -1))
        if memleak_threshold >= 0:
            begining_memory_usage = self._get_memory_usage()

        self.logger.info("Calculating result")
        try:
            task_result = self.calculate_result(*args, **kwargs)
        except Exception:
            # Don't want other tasks waiting for this task to finish, since it
            # won't
            self._break_thundering_herd_cache()
            raise  # We can use normal celery exception handling for this

        if hasattr(self, 'cache_duration'):
            cache_duration = self.cache_duration
        else:
            cache_duration = -1  # By default, don't cache
        if cache_duration >= 0:
            # If we're configured to cache this result, do so.
            cache.set(self.cache_key, self.request.id, cache_duration)

        # Now that the task is finished, we can stop all of the thundering herd
        # avoidance
        self._break_thundering_herd_cache()

        if memleak_threshold >= 0:
            self._warn_if_leaking_memory(
                begining_memory_usage,
                self._get_memory_usage(),
                memleak_threshold,
                task_kwargs=kwargs,
            )

        return task_result
Ejemplo n.º 25
0
Archivo: tasks.py Proyecto: iizs/aor
def process_action(game_id, lsn, replay=False):
    logger = get_task_logger(game_id)
    action_queue = []

    with transaction.atomic():
        g = Game.objects.get(hashkey=game_id)
        logs = GameLog.objects.filter(game=g, lsn__gt=g.applied_lsn, lsn__lte=lsn, status=GameLog.ACCEPTED)
        info = json.loads(g.current_info, cls=GameInfoDecoder)
        for l in logs:
            action_dict = l.get_log_as_dict()
            user_id = l.player.user_id if l.player != None else None

            try:
                state = GameState.getInstance(info)
                logger.info('Applying ' + str(l) + ': ' + type(state).__name__ + ', ' + l.log)
                result = state.action(action_dict['action'], user_id=user_id, params=action_dict)

                info = state.info
                if 'random' in result.keys():
                    action_dict['random'] = result['random']
                if 'queue_action' in result.keys() :
                    action_queue.append(result['queue_action'])
                if 'msg' in result.keys():
                    for m in result['msg']:
                        l.add_info(m['user_id'], m['msg'])

                l.set_log(action_dict)
                l.status = GameLog.CONFIRMED
            except (GameState.NotSupportedAction, 
                    GameState.InvalidAction, 
                    Action.InvalidParameter, 
                    Action.WarNotResolved) as e:
                l.status = GameLog.FAILED
                logger.error(str(l) + " :" + type(e).__name__ + ": " + e.message)
                l.add_warning(user_id, e.message)
            g.applied_lsn = l.lsn
            l.save()
        g.set_current_info(info)

        if replay == False:
            for aq in action_queue:
                g.last_lsn += 1
                p = Player.objects.get(user_id=aq['_player']) if '_player' in aq else None
                a = GameLog(
                        game=g,
                        player=p,
                        lsn=g.last_lsn,
                    )
                a.set_log(log_dict=aq)
                a.save()
        g.save()

    if action_queue and replay == False:
        process_action.delay(g.hashkey, g.last_lsn)

    return g.applied_lsn
Ejemplo n.º 26
0
 def open(self):
     """Open the dataset, as a context manager."""
     log = get_task_logger(__name__)
     with self.dataset.open() as g:
         try:
             yield g[self.kind.h5path]
         except KeyError as e:
             log.error("Error opening {0} from {1} in {2}".format(self.kind.h5path, g.name, repr(self.dataset)))
             log.error("File: {0}".format(self.dataset.filename))
             raise
Ejemplo n.º 27
0
def query_transactions():
    logger = get_task_logger('bitcoin_transactions')
    logger.info("query_transactions: start task")
    with NonBlockingCacheLock("query_transactions_ongoing"):
        try:
            monitor_transactions(logger)
        except Exception as e:
            logger.error("query_transactions: exception %s\n%s" %
                         (e,  format_exc(e)))
            return
Ejemplo n.º 28
0
def refresh_all_wechat_token(self):
    """
    定时1小时,刷新所有已授权公众号
    """
    logger = get_task_logger('refresh_all_wechat_token')
    for wechat in Wechat.objects.exclude(appid=settings.TEST_APPID).all():
        if not wechat.authorized:
            logger.error('公众号{0}失去授权'.format(wechat.appid))
            continue
        refresh_wechat_token.delay(wechat.appid)
Ejemplo n.º 29
0
def process_wechat_query_auth_code_test(FromUserName, query_auth_code):
    """
    处理发布前微信的自动化测试query_auth_code
    """
    logger = get_task_logger('process_wechat_query_auth_code_test')
    logger.info(FromUserName)
    logger.info(query_auth_code)
    component = get_component()
    client = component.get_client_by_authorization_code(query_auth_code)
    client.message.send_text(FromUserName, query_auth_code+'_from_api')
Ejemplo n.º 30
0
def add_numbers(self, row_id):
    logger = get_task_logger(__name__)
    logger.info(u'[{0.id}]Function all_numbers called with params [{1}] with extended info:{0}'.format(
        self.request,
        row_id
    ))
    from .models import Adder
    record = Adder.objects.get(pk=row_id)
    record.result = record.x + record.y
    record.save()
Ejemplo n.º 31
0
# -*- coding: utf-8 -*-
from __future__ import absolute_import

import pickle
import pandas as pd
from handler.iddb_handler import TwseIdDBHandler, OtcIdDBHandler
from handler.hisdb_handler import TwseHisDBHandler, OtcHisDBHandler

from giant.celery import app
from celery import shared_task

from celery.utils.log import get_task_logger
logger = get_task_logger('handler')

hisdb_tasks = {'twse': TwseHisDBHandler, 'otc': OtcHisDBHandler}

iddb_tasks = {'twse': TwseIdDBHandler, 'otc': OtcIdDBHandler}

hisitems = ['stock', 'credit', 'future', 'trader']
iditems = ['stock', 'trader']


@shared_task(time_limit=60 * 60)
def collect_iditem(stream):
    args, kwargs = pickle.loads(stream)

    opt = kwargs.pop('opt', None)
    targets = kwargs.pop('targets', [])
    callback = kwargs.pop('callback', None)
    debug = kwargs.pop('debug', False)
Ejemplo n.º 32
0
from pytz import UTC

from xmodule.modulestore.django import modulestore
from track.views import task_track

from courseware.grades import iterate_grades_for
from courseware.models import StudentModule
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor_internal
from instructor_analytics.basic import enrolled_students_features
from instructor_analytics.csvs import format_dictlist
from instructor_task.models import ReportStore, InstructorTask, PROGRESS
from student.models import CourseEnrollment

# define different loggers for use within tasks and on client side
TASK_LOG = get_task_logger(__name__)

# define value to use when no task_id is provided:
UNKNOWN_TASK_ID = 'unknown-task_id'

# define values for update functions to use to return status to perform_module_state_update
UPDATE_STATUS_SUCCEEDED = 'succeeded'
UPDATE_STATUS_FAILED = 'failed'
UPDATE_STATUS_SKIPPED = 'skipped'


class BaseInstructorTask(Task):
    """
    Base task class for use with InstructorTask models.

    Permits updating information about task in corresponding InstructorTask for monitoring purposes.
Ejemplo n.º 33
0
import datetime
import requests
import pandas as pd
from utilities.models import db_config
import math
import psycopg2
import time

from celery.utils.log import get_task_logger

from metar.Metar import Metar, ParserError

logger = get_task_logger("redemet")


def get_date_and_standard_metar(raw_data):
    date_str, partially_cleaned_data = raw_data.split(' - ')
    observation_time = datetime.datetime.strptime(date_str, '%Y%m%d%H')
    # The default Metar expects COR modifiers to come after the
    # time data. We will just remove the COR reference and let it
    # be parsed as a regular entry (since it makes no difference
    # for our purposes).
    partially_cleaned_data = partially_cleaned_data.replace('COR ', '')
    cleaned_data = partially_cleaned_data.rstrip('=')
    return observation_time, cleaned_data


def humidity(temperature, dew_point):
    term_a = (17.625 * dew_point) / (243.04 + dew_point)
    term_b = (17.625 * temperature) / (243.04 + temperature)
    return 100 * (math.exp(term_a) / math.exp(term_b))
Ejemplo n.º 34
0
from datetime import datetime
from datetime import timedelta
from influxdb import InfluxDBClient

from .. import curse_login
from .. import celery
from .. import db
from .. import app
from .. import redis_store
from ..models import AddonModel, AddonStatusEnum

from .task_helpers import request_addons
from .task_helpers import request_addons_by_id
from .task_helpers import request_all_files

logger = get_task_logger(__name__)


@celery.task
def p_curse_checklogin():
    return curse_login.renew_session()


@celery.task
def p_remove_expired_caches():
    return requests_cache.core.remove_expired_responses()


@celery.task
def p_fill_incomplete_addons():
    request_addons(AddonModel.query.filter(AddonModel.name == None).all())
Ejemplo n.º 35
0
 def logtask():
     logger = get_task_logger(__name__)
     while getattr(logger, 'parent', None):
         assert logger.propagate == 1
         logger = logger.parent
Ejemplo n.º 36
0
"""Plugin implementation for a simple web application."""
from celery.utils.log import get_task_logger

from cloudlaunch.backend_plugins.base_vm_app import BaseVMAppPlugin
from cloudlaunch.backend_plugins.cloudman2_app import get_iam_handler_for
from cloudlaunch.configurers import AnsibleAppConfigurer

from rest_framework.serializers import ValidationError

log = get_task_logger('cloudlaunch')


def get_required_val(data, name, message):
    val = data.get(name)
    if not val:
        raise ValidationError({"error": message})
    return val


class RancherKubernetesApp(BaseVMAppPlugin):
    """
    Rancher Kubernetes Appliance.
    """
    @staticmethod
    def validate_app_config(provider, name, cloud_config, app_config):
        rancher_config = get_required_val(
            app_config, "config_rancher_kube", "Rancher configuration data"
            " must be provided. config_rancher_kube entry not found in"
            " app_config.")
        #user_data = "#!/bin/bash\n"
        #user_data += get_required_val(
Ejemplo n.º 37
0
    iterate_repeat_records,
)
from corehq.privileges import DATA_FORWARDING, ZAPIER_INTEGRATION
from corehq.util.metrics import make_buckets_from_timedeltas
from corehq.util.soft_assert import soft_assert

_check_repeaters_buckets = make_buckets_from_timedeltas(
    timedelta(seconds=10),
    timedelta(minutes=1),
    timedelta(minutes=5),
    timedelta(hours=1),
    timedelta(hours=5),
    timedelta(hours=10),
)
_soft_assert = soft_assert(to='@'.join(('nhooper', 'dimagi.com')))
logging = get_task_logger(__name__)


@periodic_task(
    run_every=crontab(day_of_month=27),
    queue=settings.CELERY_PERIODIC_QUEUE,
)
def clean_logs():
    """
    Drop MOTECH logs older than 90 days.

    Runs on the 27th of every month.
    """
    ninety_days_ago = datetime.now() - timedelta(days=90)
    RequestLog.objects.filter(timestamp__lt=ninety_days_ago).delete()
Ejemplo n.º 38
0
    Transcript, clean_video_id, get_transcript_from_contentstore,
    TranscriptsGenerationException)
from xmodule.modulestore import ModuleStoreEnum
from xmodule.exceptions import NotFoundError
from edxval.api import (
    ValCannotCreateError,
    create_video_transcript,
    is_video_available,
    is_transcript_available,
    create_or_update_video_transcript,
    create_external_video,
)

User = get_user_model()

LOGGER = get_task_logger(__name__)
FILE_READ_CHUNK = 1024  # bytes
FULL_COURSE_REINDEX_THRESHOLD = 1
DEFAULT_ALL_COURSES = False
DEFAULT_FORCE_UPDATE = False
DEFAULT_COMMIT = False
MIGRATION_LOGS_PREFIX = 'Transcript Migration'

RETRY_DELAY_SECONDS = 30
COURSE_LEVEL_TIMEOUT_SECONDS = 1200
VIDEO_LEVEL_TIMEOUT_SECONDS = 300


@chord_task(bind=True)
def task_status_callback(
        self,
Ejemplo n.º 39
0
 def log(self):
     return get_task_logger("%s.%s" % (__name__, self.alert_name))
Ejemplo n.º 40
0
import mioji.common.pool
import mioji.common.pages_store
import pymongo
import datetime
import mioji.common
import pymongo.errors
from proj.my_lib.logger import func_time_logger
from proj.list_config import cache_config, list_cache_path, cache_type, none_cache_config
from proj.my_lib.Common.BaseSDK import BaseSDK
from proj.my_lib.ServiceStandardError import ServiceStandardError
from proj import config
from mongo_pool import mongo_data_client
from proj.my_lib.Common.Browser import proxy_pool

mioji.common.pool.pool.set_size(2024)
logger = get_task_logger('hotel_list')
mioji.common.logger.logger = logger
mioji.common.pages_store.cache_dir = list_cache_path
mioji.common.pages_store.STORE_TYPE = cache_type

# client = pymongo.MongoClient(host='10.10.213.148', maxPoolSize=20)
# collections = client['data_result']['HotelList']
# pymongo client

client = pymongo.MongoClient('mongodb://*****:*****@10.19.2.103:27017/')
collections = client['data_result']['hotel_list']
filter_collections = client['data_result']['hotel_filter']
# 初始化工作 (程序启动时执行一次即可)
insert_db = None
# get_proxy = simple_get_socks_proxy
get_proxy = proxy_pool.get_proxy
Vim�UnDo���p~&�vƽk�&
≅=���č���Dz��	class Gateway:
^j��_�����

V
^j���&    def __init__(self, response_json):        try:)            self.response = response_json,            self.data = response_json.json()        except Exception:            self.data = {}5�_�����V^j���    def to_dict(self):        return self.data5�_�����V^j���,from celery.utils.log import get_task_logger"logger = get_task_logger(__name__)5�_�����V^j���5�_�����V^j����5�_�����V^j���5�_�����V^j���=from coinmine.coin.apis.base_connection import BaseConnection5�_�	����V^j���3from coinmine.coin.apis.base_ import BaseConnection5�_�
	0����V^j���:from coinmine.coin.apis.base_gateway import BaseConnection5�_�	
0����V^j���0from coinmine.coin.apis.base_gateway import Base5�_�
6����V^j���7from coinmine.coin.apis.base_gateway import BaseGatewayclass Gateway:    def status(self):-        return self.data.get("status", False)    def error(self):+        return self.data.get("error", None)5�_�
����V^j���	class Gateway:5��
Ejemplo n.º 42
0
from datetime import datetime, timedelta

import requests

from celery import group
from celery.utils.log import get_task_logger
from celery.exceptions import Ignore

from tasks import settings
from tasks.application import app
from tasks.utils.session import session
from tasks.utils.database import databases
from tasks.errors.parse_error import ParseError, ParserNotFound
from tasks.parsers import Parser

logger = get_task_logger('tasks')  # pylint: disable=C0103

mapping = {
    'hotel': 'hotel_orders',
    'flight': 'flight_orders',
    'restaurant': 'restaurant_orders'
}
ai_base = 'https://ai.weego.me/api/v3/ai/user_profiles/'


@app.task
def refresh_access_token(email: str):  # Type: Bool
    """Refresh access token by refresh token
    """
    # scripture = databases('scripture')
    # user = scripture.g_users.find_one({'email': email})
Ejemplo n.º 43
0
from __future__ import absolute_import

import urllib2
import json

from celery import shared_task
from celery.utils.log import get_task_logger

from django.conf import settings

from lrs.models import Activity, Statement
from lrs.util import StatementValidator as SV

celery_logger = get_task_logger('celery-task')


@shared_task
def check_activity_metadata(stmts):
    activity_ids = list(
        Activity.objects.filter(
            object_of_statement__statement_id__in=stmts).values_list(
                'activity_id', flat=True).distinct())
    [get_activity_metadata(a_id) for a_id in activity_ids]


@shared_task
def void_statements(stmts):
    try:
        Statement.objects.filter(statement_id__in=stmts).update(voided=True)
    except Exception, e:
        celery_logger.exception("Voiding Statement Error: " + e.message)
Ejemplo n.º 44
0
 def get_logger(self, **kwargs):
     return get_task_logger(self.name)
Ejemplo n.º 45
0
import os
import sys

from celery.utils.log import get_task_logger
from downloader_app.celeryapp import app
from downloader_app.tiff_downloader import download_tiffs as td

BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
WORK_DIR = os.path.join(BASE_DIR, 'downloader_app')
sys.path.insert(0, WORK_DIR)


logger = get_task_logger('downloader_app')


@app.task
def download_source(source, dates, point1, point2, opt=False):
    """
    Download satelite tiff files and save it to the directory 'downloadedFiles'.
    """

    try:
        logger.info('Fetch {} {}'.format(source, dates))
        td(source, dates, point1, point2, opt)

    except Exception as e:
        logger.error(
            '[EE] fetching from {} at {} error: {}'.format(source, dates, e)
        )
        return
Ejemplo n.º 46
0
from proj.my_lib.Common.Browser import proxy_pool
from celery.utils.log import get_task_logger
import mioji.common.logger
import mioji.common.pool
import mioji.common.pages_store
import urlparse
from lxml import html
import re
import time
import pymongo
from proj.my_lib.Common.Task import Task as Task_to
import requests
import traceback
mioji.common.pool.pool.set_size(1024)

logger = get_task_logger('daodaoHotel')
mioji.common.logger.logger = logger
mioji.common.pages_store.cache_dir = list_cache_path
mioji.common.pages_store.STORE_TYPE = cache_type
# 初始化工作 (程序启动时执行一次即可)
insert_db = None
# get_proxy = simple_get_socks_proxy
get_proxy = proxy_pool.get_proxy
debug = False
spider_factory.config_spider(insert_db, get_proxy, debug, need_flip_limit=False)
clients = pymongo.MongoClient(host='10.10.231.105')
SourceIDS = clients['ImagesMD5']['SourceId']


def hotel_url_to_database(source, keyword, need_cache=False):
    task = Task()