Ejemplo n.º 1
0
def gen_primes(self, **kwargs):
    x = kwargs['value']

    if x <= 0:
        raise Reject('Value must be greater than zero', requeue=False)

    multiples = []
    results = []
    for i in range(2, x + 1):
        if i not in multiples:
            results.append(i)
            for j in range(i * i, x + 1, i):
                multiples.append(j)

    return results
Ejemplo n.º 2
0
def unsubscribe(*args, **kwargs):
    """ Unsubscribe from a user's fitbit data """

    # Ignore updated token, it's not needed. The session gets the new token
    # automatically
    fb = utils.create_fitbit(refresh_cb=lambda token: None, **kwargs)
    try:
        for sub in fb.list_subscriptions()['apiSubscriptions']:
            if sub['ownerId'] == kwargs['user_id']:
                fb.subscription(sub['subscriptionId'],
                                sub['subscriberId'],
                                method="DELETE")
    except Exception as e:
        logger.exception("Error unsubscribing user: %s" % e)
        raise Reject(e, requeue=False)
Ejemplo n.º 3
0
def seed_computations(max_num):
    results_filename = get_results_filename(max_num)
    if os.path.exists(get_results_filename(max_num)):
        raise Reject(f"Computations for '{results_filename}' have already been seeded!")

    # create the results file, to mark that computations have been seeded
    open(results_filename, 'w').close()

    from celery import chunks
    chord(
        compute_prime_matrix_element.chunks(
            ((x, y, num) for x, y, num in gen_input_params(max_num)),
            guesstimate_optimal_chunk_size(max_num),
        ).group(),
        store_prime_matrix.s(max_num)
    ).delay()
Ejemplo n.º 4
0
    def test_get_log_policy(self):
        einfo = Mock(name='einfo')
        einfo.internal = False
        assert get_log_policy(self.add, einfo, Reject()) is log_policy_reject
        assert get_log_policy(self.add, einfo, Ignore()) is log_policy_ignore

        self.add.throws = (TypeError,)
        assert (get_log_policy(self.add, einfo, KeyError()) is
                log_policy_unexpected)
        assert (get_log_policy(self.add, einfo, TypeError()) is
                log_policy_expected)

        einfo2 = Mock(name='einfo2')
        einfo2.internal = True
        assert (get_log_policy(self.add, einfo2, KeyError()) is
                log_policy_internal)
Ejemplo n.º 5
0
    def run(self, numbers, target, client_id, message_queue, *args, **kwargs):
        self.update_state(state='PROGRESS', meta={})
        try:
            solution, recursions, exec_time = self._numbers_service.solve(target, numbers)
            msg = {
                'solution': solution,
                'recursions': recursions,
                'exec_time': exec_time
            }

            # Websockets support
            local_socketio = SocketIO(message_queue=message_queue)
            local_socketio.emit('solution', msg, room=client_id)
            return msg
        except ServiceException as f:
            self.update_state(state='FAILURE', meta={'exc_type': '', 'exc_message': (f.errorcode.value, f.message)})
            raise Reject()
Ejemplo n.º 6
0
def archive_blog(url=None, offset=0, totalposts=0):
    if not url:
        raise ValueError("Blog URL parameter is missing.")

    redis = archive_blog.redis
    db = archive_blog.db
    tumblr = archive_blog.tumblr_client

    cache_ids(redis, db, url)

    try:
        if int(redis.get("cache:bad:" + url)) >= 5:
            return {"status": "done"}
    except (TypeError, ValueError):
        pass

    # Check if we can access the API.
    check_ratelimit(redis, "api_access")

    # Set the counter of total blog posts.
    try:
        if totalposts == 0:
            totalposts = tumblr.blog_info(url)["blog"]["posts"]
    except KeyError:
        raise Reject("Could not get number of posts. Data: %s" %
                     tumblr.blog_info(url))
    except Exception as e:
        archive_blog.retry(exc=e, eta=datetime.now() + timedelta(minutes=1))

    # Get the posts
    try:
        posts = tumblr.posts(url + ".tumblr.com", offset=offset)['posts']
    except Exception as e:
        archive_blog.retry((url, offset, totalposts),
                           exc=e,
                           eta=datetime.now() + timedelta(seconds=15))

    # Archive posts
    for post in posts:
        add_post.delay(post)

    # Start the next task.
    archive_blog.apply_async((url, offset + 20, totalposts),
                             eta=datetime.now() + timedelta(seconds=1))
def create_schema_task(tenant_id) -> None:
    try:
        tenant = TenantData.objects.get(id=tenant_id)
    except (TenantData.MultipleObjectsReturned, TenantData.DoesNotExist) as ex:
        raise Reject(ex, requeue=False)

    try:
        tenant.create_schema(check_if_exists=True)

        with tenant_context(tenant):
            call_command('installwatson')

    except BaseException:
        # We failed creating the tenant, delete what we created and
        # re-raise the exception
        tenant.delete(force_drop=True)
        raise

    tenant_prepared.send_robust(sender=TenantData, tenant=tenant)
Ejemplo n.º 8
0
def email_when_complete(self, url, address):
    """ Task to check a URL and send an email once the result has a non-incomplete status
    Used for periodically checking whether a hive job has finished. If status is not complete,
    the task is retried
    Arguments:
      url - URL to check for job completion. Must return JSON containing status, subject and body fields
      address - address to send email
    """
    # allow infinite retries
    self.max_retries = None
    try:
        with http_session() as session:
            response = session.get(url)
        result = response.json()
    except requests.RequestException as e:
        logger.error('RequestsException: %s', e)
        raise self.retry(countdown=retry_wait, max_retries=120)
    except json.JSONDecodeError:
        err = 'Invalid response. Status: {} URL: {}'.format(
            response.status_code, response.url)
        logger.error('%s Body: %s', err, response.text)
        raise self.retry(countdown=retry_wait, max_retries=120)

    try:
        status = result['status']
        if status in ('incomplete', 'running', 'submitted'):
            # job incomplete so retry task after waiting
            raise self.retry(countdown=retry_wait)
        subject = result['subject']
        body = result['body']
    except KeyError as e:
        err = 'Invalid response. Missing parameter "{}". URL: {}'.format(
            str(e), response.url)
        logger.error('%s Body: %s', err, response.text)
        raise Reject(err, requeue=False)
    # job complete so send email and complete task
    send_email(smtp_server=smtp_server,
               from_email_address=from_email_address,
               to_address=address,
               subject=subject,
               body=body)
    return result
Ejemplo n.º 9
0
def process_file_async(self, bucket_id, key_id):
    """Process file with processor tika."""
    try:
        current_app.logger.debug(f"Processing file {bucket_id}:{key_id}")

        obj = ObjectVersion.get(bucket_id, key_id)  # type: ObjectVersion
        processor = current_processors.get_processor(
            name=TikaProcessor.id())  # type: TikaProcessor
        processor.process(obj)

        current_app.logger.debug(f"Processed file {bucket_id}:{key_id}")
    except InvalidProcessor:
        # Because we use use reject_on_worker_lost, we need to handle occasional processed files been requeued.
        current_app.logger.debug(
            f"Requeued file {bucket_id}:{key_id} already processed")
    except Exception:
        try:
            raise self.retry()
        except MaxRetriesExceededError as e:
            raise Reject(str(e), requeue=False)
Ejemplo n.º 10
0
def simulate_pendulum():
    if os.path.exists(get_experiment_status_filename('started')):
        raise Reject('Computations have already been seeded!')

    record_experiment_status.si('started').delay()

    theta_resolution = app.conf.RESOLUTION
    dt = app.conf.DT
    tmax = app.conf.TMAX
    L1 = app.conf.L1
    L2 = app.conf.L2
    m1 = app.conf.M1
    m2 = app.conf.M2
    results_path = app.conf.RESULTS_PATH

    chord(
        (solve.s(L1, L2, m1, m2, tmax, dt, np.array([theta1_init, 0, theta2_init, 0]), theta1_init, theta2_init)
         for theta1_init, theta2_init in gen_simulation_model_params(theta_resolution)),
        store_results.s(results_path)
    ).delay()
Ejemplo n.º 11
0
    def run(self, sudoku, client_id, message_queue, *args, **kwargs):
        self.update_state(state='PROGRESS', meta={})

        # Websockets support
        local_socketio = SocketIO(message_queue=message_queue)

        try:
            solution = self._sudoku_service.solve(sudoku)
            msg = {'solution': solution}

            local_socketio.emit('sudokuSol', msg, room=client_id)
            return msg
        except ServiceException as f:
            self.update_state(state='FAILURE',
                              meta={
                                  'exc_type': '',
                                  'exc_message': (f.errorcode.value, f.message)
                              })
            local_socketio.emit('sudokuErr',
                                '{}-{}'.format(f.errorcode.value, f.message),
                                room=client_id)
            raise Reject()
Ejemplo n.º 12
0
    def wrapper(*args, **kwargs):
        task = args[0]
        assert isinstance(task, Task), 'bind=True must be set to enable retries'
        assert task.acks_late, 'acks_late=True must be set to send rejected tasks to the dead letter queue'

        autoretry_for = tuple(
            getattr(task, 'autoretry_for', ())
        )
        retry_backoff = int(
            getattr(task, 'retry_backoff', False)
        )
        retry_backoff_max = int(
            getattr(task, 'retry_backoff_max', 600)
        )
        retry_jitter = getattr(task, 'retry_jitter', True)

        countdown = None
        if retry_backoff:
            countdown = get_exponential_backoff_interval(
                factor=retry_backoff,
                retries=task.request.retries,
                maximum=retry_backoff_max,
                full_jitter=retry_jitter)

        try:
            if not autoretry_for:
                return func(*args, **kwargs)
            else:
                try:
                    return func(*args, **kwargs)
                except autoretry_for as retry_exc:
                    raise task.retry(exc=retry_exc, countdown=countdown)
        except TaskPredicate:
            # pass through celery specific exceptions
            raise
        except Exception as exc:
            # reject if max_retries exceeded
            raise Reject(exc, requeue=False) from exc
    def run(self, latitude, longitude, colors, client_id, message_queue, *args,
            **kwargs):
        self.update_state(state='PROGRESS', meta={})

        # Websockets support
        local_socketio = SocketIO(message_queue=message_queue)

        try:
            solution = self._map_service.solve(latitude, longitude, colors)
            msg = {'solution': solution}

            local_socketio.emit('mapSol', self.request.id, room=client_id)
            return msg
        except ServiceException as f:
            self.update_state(state='FAILURE',
                              meta={
                                  'exc_type': '',
                                  'exc_message': (f.errorcode.value, f.message)
                              })
            local_socketio.emit('mapErr',
                                '{}-{}'.format(f.errorcode.value, f.message),
                                room=client_id)
            raise Reject()
Ejemplo n.º 14
0
def reject_test(self):
    raise Reject('no reason', requeue=False)
Ejemplo n.º 15
0
    def retry(self,
              args=None,
              kwargs=None,
              exc=None,
              throw=True,
              eta=None,
              countdown=None,
              max_retries=None,
              **options):
        """Retry the task.

        :param args: Positional arguments to retry with.
        :param kwargs: Keyword arguments to retry with.
        :keyword exc: Custom exception to report when the max restart
            limit has been exceeded (default:
            :exc:`~@MaxRetriesExceededError`).

            If this argument is set and retry is called while
            an exception was raised (``sys.exc_info()`` is set)
            it will attempt to re-raise the current exception.

            If no exception was raised it will raise the ``exc``
            argument provided.
        :keyword countdown: Time in seconds to delay the retry for.
        :keyword eta: Explicit time and date to run the retry at
                      (must be a :class:`~datetime.datetime` instance).
        :keyword max_retries: If set, overrides the default retry limit for
            this execution. Changes to this parameter do not propagate to
            subsequent task retry attempts. A value of :const:`None`, means
            "use the default", so if you want infinite retries you would
            have to set the :attr:`max_retries` attribute of the task to
            :const:`None` first.
        :keyword time_limit: If set, overrides the default time limit.
        :keyword soft_time_limit: If set, overrides the default soft
                                  time limit.
        :keyword \*\*options: Any extra options to pass on to
                              :meth:`apply_async`.
        :keyword throw: If this is :const:`False`, do not raise the
                        :exc:`~@Retry` exception,
                        that tells the worker to mark the task as being
                        retried.  Note that this means the task will be
                        marked as failed if the task raises an exception,
                        or successful if it returns.

        :raises celery.exceptions.Retry: To tell the worker that
            the task has been re-sent for retry. This always happens,
            unless the `throw` keyword argument has been explicitly set
            to :const:`False`, and is considered normal operation.

        **Example**

        .. code-block:: pycon

            >>> from imaginary_twitter_lib import Twitter
            >>> from proj.celery import app

            >>> @app.task(bind=True)
            ... def tweet(self, auth, message):
            ...     twitter = Twitter(oauth=auth)
            ...     try:
            ...         twitter.post_status_update(message)
            ...     except twitter.FailWhale as exc:
            ...         # Retry in 5 minutes.
            ...         raise self.retry(countdown=60 * 5, exc=exc)

        Although the task will never return above as `retry` raises an
        exception to notify the worker, we use `raise` in front of the retry
        to convey that the rest of the block will not be executed.

        """
        request = self.request
        retries = request.retries + 1
        max_retries = self.max_retries if max_retries is None else max_retries

        # Not in worker or emulated by (apply/always_eager),
        # so just raise the original exception.
        if request.called_directly:
            maybe_reraise()  # raise orig stack if PyErr_Occurred
            raise exc or Retry('Task can be retried', None)

        if not eta and countdown is None:
            countdown = self.default_retry_delay

        is_eager = request.is_eager
        S = self.signature_from_request(request,
                                        args,
                                        kwargs,
                                        countdown=countdown,
                                        eta=eta,
                                        retries=retries,
                                        **options)

        if max_retries is not None and retries > max_retries:
            if exc:
                # first try to re-raise the original exception
                maybe_reraise()
                # or if not in an except block then raise the custom exc.
                raise exc
            raise self.MaxRetriesExceededError(
                "Can't retry {0}[{1}] args:{2} kwargs:{3}".format(
                    self.name, request.id, S.args, S.kwargs))

        ret = Retry(exc=exc, when=eta or countdown)

        if is_eager:
            # if task was executed eagerly using apply(),
            # then the retry must also be executed eagerly.
            S.apply().get()
            if throw:
                raise ret
            return ret

        try:
            S.apply_async()
        except Exception as exc:
            raise Reject(exc, requeue=False)
        if throw:
            raise ret
        return ret
Ejemplo n.º 16
0
def get_time_series_data(self, fitbit_user, cat, resource, date=None):
    """ Get the user's time series data """
    try:
        _type = TimeSeriesDataType.objects.get(category=cat, resource=resource)
    except TimeSeriesDataType.DoesNotExist as e:
        logger.exception("The resource %s in category %s doesn't exist" %
                         (resource, cat))
        raise Reject(e, requeue=False)

    # Create a lock so we don't try to run the same task multiple times
    sdat = date.strftime('%Y-%m-%d') if date else 'ALL'
    lock_id = '{0}-lock-{1}-{2}-{3}'.format(__name__, fitbit_user, _type, sdat)
    if not cache.add(lock_id, 'true', LOCK_EXPIRE):
        logger.debug('Already retrieving %s data for date %s, user %s' %
                     (_type, fitbit_user, sdat))
        raise Ignore()

    try:
        fbusers = UserFitbit.objects.filter(fitbit_user=fitbit_user)
        default_period = utils.get_setting('FITAPP_DEFAULT_PERIOD')
        if default_period:
            dates = {'base_date': 'today', 'period': default_period}
        else:
            dates = {'base_date': 'today', 'period': 'max'}
        if date:
            dates = {'base_date': date, 'end_date': date}
        for fbuser in fbusers:
            data = utils.get_fitbit_data(fbuser, _type, **dates)
            if utils.get_setting('FITAPP_GET_INTRADAY'):
                tz_offset = utils.get_fitbit_profile(fbuser,
                                                     'offsetFromUTCMillis')
                tz_offset = tz_offset / 3600 / 1000 * -1  # Converted to positive hours
            for datum in data:
                # Create new record or update existing record
                date = parser.parse(datum['dateTime'])
                if _type.intraday_support and \
                        utils.get_setting('FITAPP_GET_INTRADAY'):
                    resources = TimeSeriesDataType.objects.filter(
                        intraday_support=True)
                    for i, _type in enumerate(resources):
                        # Offset each call by 2 seconds so they don't bog down
                        # the server
                        get_intraday_data(fbuser.fitbit_user, _type.category,
                                          _type.resource, date, tz_offset)
                tsd, created = TimeSeriesData.objects.get_or_create(
                    user=fbuser.user,
                    resource_type=_type,
                    date=date,
                    intraday=False)
                tsd.value = datum['value']
                tsd.save()
        # Release the lock
        cache.delete(lock_id)
    except HTTPTooManyRequests as e:
        # We have hit the rate limit for the user, retry when it's reset,
        # according to the reply from the failing API call
        countdown = e.retry_after_secs + int(
            # Add exponential back-off + random jitter
            random.uniform(2, 4)**self.request.retries)
        logger.debug('Rate limit reached, will try again in {} seconds'.format(
            countdown))
        raise get_time_series_data.retry(exc=e, countdown=countdown)
    except HTTPBadRequest as e:
        # If the resource is elevation or floors, we are just getting this
        # error because the data doesn't exist for this user, so we can ignore
        # the error
        if not ('elevation' in resource or 'floors' in resource):
            exc = sys.exc_info()[1]
            logger.exception("Exception updating data for user %s: %s" %
                             (fitbit_user, exc))
            raise Reject(exc, requeue=False)
    except Exception:
        exc = sys.exc_info()[1]
        logger.exception("Exception updating data for user %s: %s" %
                         (fitbit_user, exc))
        raise Reject(exc, requeue=False)
Ejemplo n.º 17
0
def reject_test_redo(self):
    raise Reject('no reason', requeue=True)
Ejemplo n.º 18
0
 def after_return(self, status, retval, task_id, args, kwargs, einfo):
     if self.max_retries == self.request.retries or status == "FAILURE":
         raise Reject(reason=einfo, requeue=False)
Ejemplo n.º 19
0
def get_intraday_data(fitbit_user, cat, resource, date, tz_offset):
    """
    Get the user's intraday data for a specified date, convert to UTC prior to
    saving.

    The Fitbit API stipulates that intraday data can only be retrieved for one
    day at a time.
    """
    try:
        _type = TimeSeriesDataType.objects.get(category=cat, resource=resource)
    except TimeSeriesDataType.DoesNotExist:
        logger.exception("The resource %s in category %s doesn't exist" %
                         (resource, cat))
        raise Reject(sys.exc_info()[1], requeue=False)
    if not _type.intraday_support:
        logger.exception("The resource %s in category %s does not support "
                         "intraday time series" % (resource, cat))
        raise Reject(sys.exc_info()[1], requeue=False)

    # Create a lock so we don't try to run the same task multiple times
    sdat = date.strftime('%Y-%m-%d')

    fbusers = UserFitbit.objects.filter(fitbit_user=fitbit_user)
    dates = {'base_date': date, 'period': '1d'}
    try:
        with transaction.atomic():
            for fbuser in fbusers:
                data = utils.get_fitbit_data(fbuser,
                                             _type,
                                             return_all=True,
                                             **dates)
                resource_path = _type.path().replace('/', '-')
                key = resource_path + "-intraday"
                if data[key]['datasetType'] != 'minute':
                    logger.exception("The resource returned is not "
                                     "minute-level data")
                    raise Reject(sys.exc_info()[1], requeue=False)
                intraday = data[key]['dataset']
                logger.info("Date for intraday task: {}".format(date))
                for minute in intraday:
                    datetime = parser.parse(minute['time'], default=date)
                    utc_datetime = datetime + timedelta(hours=tz_offset)
                    utc_datetime = utc_datetime.replace(tzinfo=utc)
                    value = minute['value']
                    # Don't create unnecessary records
                    if not utils.get_setting(
                            'FITAPP_SAVE_INTRADAY_ZERO_VALUES'):
                        if int(float(value)) == 0:
                            continue
                    # Create new record or update existing
                    tsd, created = TimeSeriesData.objects.get_or_create(
                        user=fbuser.user,
                        resource_type=_type,
                        date=utc_datetime,
                        intraday=True)
                    tsd.value = value
                    tsd.save()
            # Release the lock
    except HTTPTooManyRequests:
        # We have hit the rate limit for the user, retry when it's reset,
        # according to the reply from the failing API call
        e = sys.exc_info()[1]
        logger.debug(
            'Rate limit reached for user %s, will try again in %s seconds' %
            (fitbit_user, e.retry_after_secs))
        raise get_intraday_data.retry(exc=e, countdown=e.retry_after_secs)
    except HTTPBadRequest:
        # If the resource is elevation or floors, we are just getting this
        # error because the data doesn't exist for this user, so we can ignore
        # the error
        if not ('elevation' in resource or 'floors' in resource):
            exc = sys.exc_info()[1]
            logger.exception(
                "Exception updating intraday data for user %s: %s" %
                (fitbit_user, exc))
            raise Reject(exc, requeue=False)
    except Exception:
        exc = sys.exc_info()[1]
        logger.exception("Exception updating data for user %s: %s" %
                         (fitbit_user, exc))
        raise Reject(exc, requeue=False)
Ejemplo n.º 20
0
def get_time_series_data(self, fitbit_user, cat, resource, date=None):
    """ Get the user's time series data """

    try:
        _type = TimeSeriesDataType.objects.get(category=cat, resource=resource)
    except TimeSeriesDataType.DoesNotExist as e:
        logger.exception("The resource %s in category %s doesn't exist" %
                         (resource, cat))
        raise Reject(e, requeue=False)

    # Create a lock so we don't try to run the same task multiple times
    if date:
        if isinstance(date, six.string_types):
            date = parser.parse(date)
        sdat = date.strftime('%Y-%m-%d')
    else:
        sdat = 'ALL'
    lock_id = '{0}-lock-{1}-{2}-{3}'.format(__name__, fitbit_user, _type, sdat)
    if not cache.add(lock_id, 'true', LOCK_EXPIRE):
        logger.debug('Already retrieving %s data for date %s, user %s' %
                     (_type, fitbit_user, sdat))
        raise Ignore()

    try:
        with transaction.atomic():
            # Block until we have exclusive update access to this UserFitbit, so
            # that another process cannot step on us when we update tokens
            fbusers = UserFitbit.objects.select_for_update().filter(
                fitbit_user=fitbit_user)
            dates = {'base_date': 'today', 'period': 'max'}
            if date:
                dates = {'base_date': date, 'end_date': date}

            for fbuser in fbusers:
                data = utils.get_fitbit_data(fbuser, _type, **dates)
                for datum in data:
                    # Create new record or update existing record
                    date = parser.parse(datum['dateTime'])
                    tsd, created = TimeSeriesData.objects.get_or_create(
                        user=fbuser.user, resource_type=_type, date=date)
                    tsd.value = datum['value']
                    tsd.save()
            # Release the lock
            cache.delete(lock_id)
    except HTTPTooManyRequests as e:
        # We have hit the rate limit for the user, retry when it's reset,
        # according to the reply from the failing API call
        countdown = e.retry_after_secs + int(
            # Add exponential back-off + random jitter
            random.uniform(2, 4)**self.request.retries)
        logger.debug('Rate limit reached, will try again in {} seconds'.format(
            countdown))
        raise get_time_series_data.retry(exc=e, countdown=countdown)
    except HTTPBadRequest as e:
        # If the resource is elevation or floors, we are just getting this
        # error because the data doesn't exist for this user, so we can ignore
        # the error
        if not ('elevation' in resource or 'floors' in resource):
            logger.exception("Exception updating data: ".format(e))
            raise Reject(e, requeue=False)
    except Exception as e:
        logger.exception("Exception updating data: %s" % e)
        raise Reject(e, requeue=False)
Ejemplo n.º 21
0
def requeues(self):
    if not getattr(self.request, 'redelivered', False):
        raise Reject('no reason', requeue=True)
    print('received two times')
Ejemplo n.º 22
0
 def test_attrs(self):
     x = Reject('foo', requeue=True)
     assert x.reason == 'foo'
     assert x.requeue
Ejemplo n.º 23
0
 def test_repr(self):
     assert repr(Reject('foo', True))
Ejemplo n.º 24
0
def do_job(self,tokens, task_color,stage_in_source,stage_in_dest,stage_out_dest,stage_in_source_path,stage_in_dest_path,stage_out_dest_path):

    def post_refresh_message(token_data):
        print("I got called")
        requests.post('http://localhost:8081/api/messenger', headers={'content-type': 'application/json'},data=json.dumps({'key_message':token_data.by_resource_server['transfer.api.globus.org']['access_token'], 'task_id':task_id,'step':'1','task_color':task_color}))


    #socketio.emit('message_log', {'message_body':'Testing for emit'})

    auth_client = dill.loads(redis_store.get('auth_client'))

    #send json message with key special_message that include new access token
    #requests.post('http://localhost:8081/api/messenger', headers={'content-type': 'application/json'},data=json.dumps({'key_message':token_data.by_resource_server['transfer.api.globus.org']['access_token'], 'task_id':task_id}
    authorizer = globus_sdk.RefreshTokenAuthorizer(tokens['transfer.api.globus.org']['refresh_token'], auth_client,tokens['transfer.api.globus.org']['access_token'], expires_at=tokens['transfer.api.globus.org']['expires_at_seconds'],on_refresh=post_refresh_message)

    #stage_in_source = stage_in_source
    stage_in_destination= stage_in_dest
    stage_out_destination = stage_out_dest
    
    #stage_in_source_path = redis_store.get('stage_in_source_path').decode('utf-8')
    stage_in_destination_path = stage_in_dest_path
    stage_out_destination_path = stage_out_dest_path
    task_id = do_job.request.id


    tc = TransferClient(authorizer=authorizer)   
   

    #auth_client=load_auth_client()


    data = globus_sdk.TransferData(tc,stage_in_source, stage_in_destination,label="stagein")

    data.add_item(stage_in_source_path, stage_in_destination_path, True)
    
    status = tc.submit_transfer(data)

    requests.post('http://localhost:8081/api/messenger', headers={'content-type': 'application/json'},data=json.dumps({'message':'['+task_id+']Queue wait is done, now initiating Stage in....','task_id':task_id,'step':'2','task_color':task_color}))

    tc.task_wait(status["task_id"])#task id of the stage_in


    result_in=tc.get_task(status["task_id"])
    #print("The response for task is :")
    #print(result_in)

    complete_status = result_in['status']
    print("The complete status is :")
    print(complete_status)

    if complete_status == "SUCCEEDED":
        requests.post('http://localhost:8081/api/messenger', headers={'content-type': 'application/json'},data=json.dumps({'message':'['+task_id+']      Stage In succeeded', 'task_id':task_id,'step':'2','task_color':task_color}))

    else:
        requests.post('http://localhost:8081/api/messenger', headers={'content-type': 'application/json'},data=json.dumps({'message':'['+task_id+']      Stage In failed, canceling the job..... ','task_id':task_id,'step':'2','task_color':task_color}))
        # stop and delete the job
        raise Reject("Stage in Failed",requeue=False)

   

    

    #print to the log that job informations, with id, running the fake job
    requests.post('http://localhost:8081/api/messenger', headers={'content-type': 'application/json'},data=json.dumps({'message':'['+task_id+']Running the job','task_id':task_id,'step':'3','task_color':task_color}))
    
    time.sleep(3)

    #fetching new token
    #validate now active 


    #fake job is done
    requests.post('http://localhost:8081/api/messenger', headers={'content-type': 'application/json'},data=json.dumps({'message':'['+task_id+']Job is done','task_id':task_id,'step':'3','task_color':task_color}))


    requests.post('http://localhost:8081/api/messenger', headers={'content-type': 'application/json'},data=json.dumps({'message':'['+task_id+']      Initiating Stage out.... ','task_id':task_id,'step':'4','task_color':task_color}))

    
    #tc = TransferClient(authorizer=authorizer)   

    data = globus_sdk.TransferData(tc, stage_in_destination, stage_out_destination,label="stageout")

    data.add_item(stage_in_destination_path, stage_out_destination_path, True)

    #hopefully refresh token lambda called here or after here supposed to log refreshed ok
    status = tc.submit_transfer(data)

    
    tc.task_wait(status["task_id"])


    result_in=tc.get_task(status["task_id"])

    complete_status = result_in['status']

    if complete_status == "SUCCEEDED":
        requests.post('http://localhost:8081/api/messenger', headers={'content-type': 'application/json'},data=json.dumps({'message':'['+task_id+']      Stage Out succeeded ','task_id':task_id,'step':'4','task_color':task_color}))

    else:
        requests.post('http://localhost:8081/api/messenger', headers={'content-type': 'application/json'},data=json.dumps({'message':'['+task_id+']      Stage Out failed, canceling the job.....','task_id':task_id,'step':'4','task_color':task_color}))
        raise Reject("Stage out Failed",requeue=False)
Ejemplo n.º 25
0
def get_tenant_or_raise_reject(tenant_id: int) -> TenantModel:
    try:
        tenant = TenantModel.objects.get(id=tenant_id)
    except TenantModel.DoesNotExist as ex:
        raise Reject(ex, requeue=False) from ex
    return tenant
Ejemplo n.º 26
0
    def retry(self,
              args=None,
              kwargs=None,
              exc=None,
              throw=True,
              eta=None,
              countdown=None,
              max_retries=None,
              **options):
        """Retry the task, adding it to the back of the queue.

        Example:
            >>> from imaginary_twitter_lib import Twitter
            >>> from proj.celery import app

            >>> @app.task(bind=True)
            ... def tweet(self, auth, message):
            ...     twitter = Twitter(oauth=auth)
            ...     try:
            ...         twitter.post_status_update(message)
            ...     except twitter.FailWhale as exc:
            ...         # Retry in 5 minutes.
            ...         raise self.retry(countdown=60 * 5, exc=exc)

        Note:
            Although the task will never return above as `retry` raises an
            exception to notify the worker, we use `raise` in front of the
            retry to convey that the rest of the block won't be executed.

        Arguments:
            args (Tuple): Positional arguments to retry with.
            kwargs (Dict): Keyword arguments to retry with.
            exc (Exception): Custom exception to report when the max retry
                limit has been exceeded (default:
                :exc:`~@MaxRetriesExceededError`).

                If this argument is set and retry is called while
                an exception was raised (``sys.exc_info()`` is set)
                it will attempt to re-raise the current exception.

                If no exception was raised it will raise the ``exc``
                argument provided.
            countdown (float): Time in seconds to delay the retry for.
            eta (~datetime.datetime): Explicit time and date to run the
                retry at.
            max_retries (int): If set, overrides the default retry limit for
                this execution.  Changes to this parameter don't propagate to
                subsequent task retry attempts.  A value of :const:`None`,
                means "use the default", so if you want infinite retries you'd
                have to set the :attr:`max_retries` attribute of the task to
                :const:`None` first.
            time_limit (int): If set, overrides the default time limit.
            soft_time_limit (int): If set, overrides the default soft
                time limit.
            throw (bool): If this is :const:`False`, don't raise the
                :exc:`~@Retry` exception, that tells the worker to mark
                the task as being retried.  Note that this means the task
                will be marked as failed if the task raises an exception,
                or successful if it returns after the retry call.
            **options (Any): Extra options to pass on to :meth:`apply_async`.

        Raises:

            celery.exceptions.Retry:
                To tell the worker that the task has been re-sent for retry.
                This always happens, unless the `throw` keyword argument
                has been explicitly set to :const:`False`, and is considered
                normal operation.
        """
        request = self.request
        retries = request.retries + 1
        max_retries = self.max_retries if max_retries is None else max_retries

        # Not in worker or emulated by (apply/always_eager),
        # so just raise the original exception.
        if request.called_directly:
            # raises orig stack if PyErr_Occurred,
            # and augments with exc' if that argument is defined.
            raise_with_context(exc or Retry('Task can be retried', None))

        if not eta and countdown is None:
            countdown = self.default_retry_delay

        is_eager = request.is_eager
        S = self.signature_from_request(request,
                                        args,
                                        kwargs,
                                        countdown=countdown,
                                        eta=eta,
                                        retries=retries,
                                        **options)

        if max_retries is not None and retries > max_retries:
            if exc:
                # On Py3: will augment any current exception with
                # the exc' argument provided (raise exc from orig)
                raise_with_context(exc)
            raise self.MaxRetriesExceededError(
                "Can't retry {0}[{1}] args:{2} kwargs:{3}".format(
                    self.name, request.id, S.args, S.kwargs),
                task_args=S.args,
                task_kwargs=S.kwargs)

        ret = Retry(exc=exc, when=eta or countdown)

        if is_eager:
            # if task was executed eagerly using apply(),
            # then the retry must also be executed eagerly.
            S.apply().get()
            if throw:
                raise ret
            return ret

        try:
            S.apply_async()
        except Exception as exc:
            raise Reject(exc, requeue=False)
        if throw:
            raise ret
        return ret
Ejemplo n.º 27
0
Archivo: low.py Proyecto: kodless/leek
def rejected_task(self):
    raise Reject("Test rejection", requeue=False)
Ejemplo n.º 28
0
 def test_repr(self):
     self.assertTrue(repr(Reject('foo', True)))
Ejemplo n.º 29
0
 def rejecting():
     raise Reject()
Ejemplo n.º 30
0
def print_hi(name):
    print(f'Hi, {name}')
    raise Reject(Exception('dead-letter exception'), requeue=False)