コード例 #1
0
ファイル: monitoring.py プロジェクト: inc0/selena
def test_service(service):
    task_uuid = str(uuid4())
    monitored_phrases, cached_wordchecks = _get_monitored_phrases(service)
    cases = _get_test_cases(service, monitored_phrases)
    start_time = int(time.time())
    cases_cache = {}
    jobs = []
    for case in cases:
        agent_queue = django_rq.get_queue(case['agent_queue_name'])
        case.update({'uuid': task_uuid})
        job = agent_queue.enqueue_call(
            func=run_test,
            kwargs={'config': case, 'start_time': start_time},
            timeout=45,
            result_ttl=180,
        )
        jobs.append((job.id, case['agent_queue_name']))
        cases_cache[job.id] = case
    queue = django_rq.get_queue(
        name='monitors' if 'monitors' in settings.RQ_QUEUES else 'default',
    )
    queue.enqueue_call(
        func=_test_service_summary,
        kwargs={
            'service_id': service.id,
            'task_uuid': task_uuid,
            'start_time': start_time,
            'jobs': jobs,
            'sensitivity': service.sensitivity,
            'cases': cases_cache,
            'wordchecks': cached_wordchecks,
        },
        timeout=45,
        result_ttl=0,
    )
コード例 #2
0
ファイル: summary.py プロジェクト: SashaBorandi/detective.io
 def summary_export(self, bundle, request):
     self.method_check(request, allowed=['get'])
     # check from cache
     cache_key = "summary_export_{type}_{query}" \
         .format( type  = request.GET.get("type", "all"),
                  query = hashlib.md5(request.GET.get("q", "null")).hexdigest())
     response_in_cache = utils.topic_cache.get(self.topic, cache_key)
     if response_in_cache: # could be empty or str("<filename>")
         logger.debug("export already exist from cache")
         response = dict(status="ok", file_name=response_in_cache)
     else:
         # return a quick response
         response = dict(
             status = "enqueued")
         # check if a job already exist
         for job in django_rq.get_queue('high').jobs:
             if job.meta["cache_key"] == cache_key:
                 response["token"] = job.id
                 logger.debug("job_already_exist")
                 break
         else:
             # enqueue the job
             queue = django_rq.get_queue('high', default_timeout=360)
             job = queue.enqueue(render_csv_zip_file,
                                 topic      = self.topic,
                                 model_type = request.GET.get("type"),
                                 query      = json.loads(request.GET.get('q', 'null')),
                                 cache_key  = cache_key)
             # save the cache_key in the meta data in order to check if a job already exist for this key later
             job.meta["cache_key"] = cache_key
             job.save()
             response['token'] = job.id
     self.log_throttled_access(request)
     return response
コード例 #3
0
ファイル: tasks.py プロジェクト: DanRunfola/OIPA
def delete_all_tasks_from_queue(queue_name):
    if queue_name == "failed":
        q = django_rq.get_failed_queue()
    elif queue_name == "parser":
        q = django_rq.get_queue("parser")
    else:
        q = django_rq.get_queue("default")

    while True:
        job = q.dequeue()
        if not job:
            break
        job.delete()
コード例 #4
0
ファイル: qm2.py プロジェクト: 0xDEC0DE8/LookingGlass
 def showStats(self):
     import pprint
     q = django_rq.get_queue()
     qp = django_rq.get_queue('needs_passphrase')
     s = django_rq.get_scheduler()
     pp = pprint.PrettyPrinter()
     print
     print 'Job Queue:'
     pp.pprint( q.jobs )
     pp.pprint( qp.jobs )
     print
     print 'Scheduled tasks:'
     pp.pprint( s.get_jobs(with_times=True) )
     print
コード例 #5
0
    def handle(self, *args, **kwargs):
        queue = django_rq.get_queue('default')

        return queue.enqueue_call(
            func=foo,
            args=(random.randint(1, 100),),
        ).id
コード例 #6
0
def fetch_job(job_id):
    queue = django_rq.get_queue()
    try:
        job = queue.fetch_job(job_id)
    except:
        job = queue.safe_fetch_job(job_id)
    return job
コード例 #7
0
    def handle(self, *args, **kwargs):
        queue = django_rq.get_queue('default')

        return queue.enqueue_call(
            func=evil,
            timeout=15,
        ).id
コード例 #8
0
ファイル: reports.py プロジェクト: andrzej-jankowski/ralph
 def get_data(self, *args, **kwargs):
     cache_key = get_cache_key(
         self.data_provider.func_name,
         *args,
         **kwargs
     )
     cache = get_cache(
         self.data_provider.async_report_cache_alias,
     )
     data = cache.get(cache_key)
     if data is not None:
         return None if data == 'in progress' else data
     cache.set(
         cache_key,
         'in progress',
         self.data_provider.async_report_results_expiration,
     )
     queue = django_rq.get_queue(
         name='reports' if 'reports' in settings.RQ_QUEUES else 'default',
     )
     queue.enqueue_call(
         func='%s.%s' % (
             self.data_provider.__module__,
             self.data_provider.func_name,
         ),
         args=args,
         kwargs=kwargs,
         timeout=3600,
         result_ttl=0,
     )
コード例 #9
0
ファイル: admin.py プロジェクト: funkyminh/archiprod
    def old_fashioned_uploads(self, request):
        """ Admin view to upload file from archiprod-uploads server
        and monitor archive currently transfered.
        See archive command for effective archive transfert.
        """
        opts = self.model._meta
        app_label = opts.app_label
        if request.method == 'POST':
            form = UploadFileFromServer(request.POST)
            if form.is_valid():
                file_path = form.cleaned_data['file_path']
                media = form.cleaned_data['media']
                media_id = None
                if media is not None:
                    media_id = media.id
                queue = django_rq.get_queue('archive')
                queue.enqueue(call_command, args=('archive', file_path, media_id), timeout=86400)

        form = UploadFileFromServer()
        return render_to_response('admin/archives/media/upload-from-server.html',
                              {'form': form, 'app_label': app_label, 'opts':opts,
                               'current_files_copied_in_queue': self._current_files_copied_in_queue(),
                               'current_file_copied_in_progress': self._current_file_copied_in_progress(),
                               'failed_copies': self._failed_copies()
                               },
                              context_instance=RequestContext(request))
コード例 #10
0
ファイル: healthcheck.py プロジェクト: 18F/calc
def healthcheck(request):
    '''
    Return a JSON response with health-related information about the
    current state of the app.

    For more details, see `docs/monitoring.md`.
    '''

    canonical_url = get_canonical_url(request)
    request_url = request.build_absolute_uri()

    results = {
        'version': __version__,
        'canonical_url': canonical_url,
        'request_url': request_url,
        'canonical_url_matches_request_url': canonical_url == request_url,
        'rq_jobs': len(django_rq.get_queue().jobs),
        **get_database_info(),
    }

    ok = True

    if not (results['is_database_synchronized'] and
            results['canonical_url_matches_request_url']):
        ok = False

    # We're always returning 200 but indicating whether everything
    # is *really* ok in the `is_everything_ok` key. We used to
    # return 500 if the healthcheck failed, but this ended up
    # causing odd behavior with CloudFront. For more details, see:
    #
    # https://github.com/18F/calc/issues/1516
    results['is_everything_ok'] = ok

    return JsonResponse(results, status=200)
コード例 #11
0
ファイル: utils.py プロジェクト: goldenhelix/avocado
def async_get_result_rows(context, view, query_options, job_options=None):
    """
    Creates a new job to asynchronously get result rows and returns the job ID.

    Args:
        See get_result_rows argument list.

    Keyword Arugments:
        Set as properties on the returned job's meta.

    Returns:
        The ID of the created job.
    """
    if not job_options:
        job_options = {}

    queue = get_queue(settings.ASYNC_QUEUE)
    job = queue.enqueue(get_result_rows,
                        context,
                        view,
                        query_options,
                        evaluate_rows=True)
    job.meta.update(job_options)
    job.save()

    return job.id
コード例 #12
0
ファイル: tasks.py プロジェクト: zimmerman-zimmerman/OIPA
def start_searchable_activities_task(counter=0):
    workers = Worker.all(connection=redis_conn)
    queue = django_rq.get_queue("parser")

    has_other_jobs = False
    already_running_update = False

    for w in workers:
        if len(w.queues):
            if w.queues[0].name == "parser":
                current_job = w.get_current_job()
                if current_job:
                    if ('start_searchable_activities_task'
                            not in current_job.description):
                        has_other_jobs = True
                    if ('update_searchable_activities'
                            in current_job.description):
                        already_running_update = True

    if already_running_update:
        # update_searchable_activities already running or other
        # start_searchable_activities_task running, invalidate task
        pass
    elif not has_other_jobs:
        queue.enqueue(update_searchable_activities)
    elif counter > 180:
        raise Exception(
            "Waited for 30 min, still jobs runnings so invalidating this task. \
                    If this happens please contact OIPA devs!")
    else:
        counter += 1
        time.sleep(120)
        queue.enqueue(start_searchable_activities_task,
                      args=(counter,), timeout=300)
コード例 #13
0
ファイル: tasks.py プロジェクト: ReJeCtAll/ralph
def discover_all(interactive=False, update_existing=False, outputs=None):
    """Runs discovery on all networks defined in the database."""
    sanity_check()
    if outputs:
        stdout, stdout_verbose, stderr = outputs
    else:
        stdout = output.get(interactive)
    nets = Network.objects.filter(
        environment__isnull=False,
        environment__queue__isnull=False,
    )
    for net in nets:
        if interactive:
            discover_network(
                net.network,
                interactive=True,
                update_existing=True,
            )
        else:
            queue = django_rq.get_queue()
            queue.enqueue(
                discover_network,
                net.network,
                update_existing=update_existing,
            )
    stdout()
コード例 #14
0
ファイル: tasks.py プロジェクト: zimmerman-zimmerman/OIPA
def force_parse_by_publisher_ref(org_ref):
    queue = django_rq.get_queue("parser")
    for e in Dataset.objects.filter(publisher__publisher_iati_id=org_ref):
        queue.enqueue(force_parse_source_by_id, args=(e.id,), timeout=14400)

    if settings.ROOT_ORGANISATIONS:
        queue.enqueue(start_searchable_activities_task, args=(0,), timeout=300)
コード例 #15
0
ファイル: tasks.py プロジェクト: ReJeCtAll/ralph
def _select_run_method(context, interactive, function, after):
    """Return a function that either executes the task directly (if
    `interactive` is True), enqueues it right away or schedules its enqueueing
    (if `after` is given).
    """

    if interactive:
        return function
    set_queue(context)
    if after:
        # FIXME: what about timeout= and result_ttl= for scheduled tasks?
        scheduler = django_rq.get_scheduler(context['queue'], )
        if isinstance(after, timedelta):
            enqueue = scheduler.enqueue_in
        elif isinstance(after, datetime):
            enqueue = scheduler.enqueue_at
        else:
            raise NotImplementedError(
                "after={!r} not supported.".format(after),
            )
        return partial(enqueue, after, function)
    queue = django_rq.get_queue(
        context['queue'],
    )
    return partial(_enqueue, queue, function)
コード例 #16
0
ファイル: tests.py プロジェクト: funkyminh/archiprod
    def test_media_encoded_state(self):
        #we create media without encoding
        self.media = MediaFactory.build(title='test_media_encoded_state', file__from_path=os.path.join(os.path.dirname(__file__), 'tests/data/audio-mini.mp3'))
        self.media.save(encode=False)
        
        #check it has been well created and encoding state is not encoded
        self.assertEqual(self.media.id, 1)
        self.assertEqual(self.media.encoding_state, ENCODING_NOT_ENCODED)

        #put it in encoding queue
        queue = django_rq.get_queue('default')
        job = queue.enqueue(call_command, args=('encode', self.media.id))

        #check job is in encoding queue
        self.assertTrue(job.is_queued)

        #start encoding
        worker = get_worker('default')
        worker.work(burst=True)

        #check encoding state is encoded, job is now in finished status
        self.assertEqual(self.media.encoding_state, ENCODING_ENCODED)
        self.assertFalse(job.is_queued)
        self.assertTrue(job.is_finished)
        failed_queue = Queue(name='failed', connection=queue.connection)
        self.assertFalse(job.id in failed_queue.job_ids)
コード例 #17
0
ファイル: Gift.py プロジェクト: Fightclub/server
 def Redeem(self):
   if self.status == Gift.GIFT_STATUS_CREATED:
     try:
       card = Card.Card.objects.filter(vendor=self.product.vendor, user=None, master=False)[:1]
     except Card.Card.DoesNotExist:
       card = None
     expireTimeUTC = None
     if card:
       card = card[0]
       card.user = self.receiver
       card.save()
       queue = rq.get_queue('high')
       load = queue.enqueue(Card.SetBalance, card.id, self.product.price)
       scheduler = rq.get_scheduler('low')
       expireTimeUTC = datetime.utcnow() + timedelta(minutes=5)
       expireTime = datetime.now() + timedelta(minutes=5)
       unload = scheduler.enqueue_at(expireTime, CheckRedemption, self.id)
       self.activated = datetime.utcnow().replace(tzinfo=utc)
       self.status = self.GIFT_STATUS_ACTIVE
       self.payment = card
       self.save()
   elif self.status == Gift.GIFT_STATUS_ACTIVE:
     card = self.payment
     expireTimeUTC = self.activated + timedelta(minutes=5)
   return (card, expireTimeUTC)
コード例 #18
0
ファイル: job.py プロジェクト: nicolasleger/detective.io
 def obj_get(self, bundle, **kwargs):
     """
     Returns redis document from provided id.
     """
     queue = django_rq.get_queue('default')
     job = Job.fetch(kwargs['pk'], connection=queue.connection)
     return Document(**job.__dict__)
コード例 #19
0
    def save(self, *args, **kwargs):
        if self.name:
            self.name = self.name.strip()

        if (self.status == 'ready for hathi') or (self.status == 'retry'):
            if self.status == 'retry':
                # Reset the status on the failed KDips so they will be retried.
                for k in self.kdip_set.all():
                    if k.status == 'upload_fail':
                        k.status = 'new'
                        k.save()
            # Send volumes to the upload task.
            self.status = 'uploading'
            # Add the celery task.
            # At this point the work is passed off to rq and executes
            # `tasks.py`
            from tasks import upload_for_ht
            queue = django_rq.get_queue('high')
            queue.enqueue(upload_for_ht, self)

        elif self.status == 'ready for zephir':
            zephir_status = send_to_zephir(self)
            # Set status
            self.status = zephir_status

        super(Job, self).save(*args, **kwargs)
コード例 #20
0
    def test_process_header_only(self):
        """
        Test that the task will properly process the file and extract
        location and scan_number from the filename, with the rest of the
        values being default.
        """
        file_data = ('\\*File list\n'
                     '\\Version: 0x05120130\n'
                     '\\*File list end\n')

        scan_file = SimpleUploadedFile('s0001a_r.001',
                                       file_data.encode('cp1252'),
                                       content_type='text/plain')

        queue = get_queue('default', async=False)
        job = queue.enqueue(tasks.process_nanoscope_file, scan_file)

        self.assertEqual(len(job.result), 1)

        data = {
            'rms': 0.0,
            'zrange': 0.0,
            'size': 0.0,
            'scan_number': 1,
            'location': 'r',
            'image_type': 'Raw',
            'state': 'raw',
            'content_type': 'application/octet-stream',
        }
        self.assertDictEqual(job.result[0].kwargs, data)
コード例 #21
0
def import_file(request):

    """Import a CSV or XLS file into the database.

    This view will create a report with the uploaded file and the email address
    especified on the form, then it will save the querystrings (if any) and
    get the full path to the uploaded file. After that we just queue the
    processing task with django-rq.

    Args:
        request: The main request

    Returns:
        A template confirming that the task has been queued
    """
    form = ReportForm(request.POST, request.FILES)
    if request.method == 'POST':
        try:
            # Save the data
            report = Report(email=request.POST.get("email", ''),
                            original_file=request.FILES['original_file'],
                            label=str(datetime.now()))
            report.save()
            # Start the task queue
            queue = django_rq.get_queue('importer')
            queue.enqueue(trigger_queue, args=(request.META['QUERY_STRING'], report))
            return render_to_response('thanks.html')
        except Exception as e:
            logger.error("CRITICAL ERROR: THE TASK COULDN'T BE EXECUTED.")
            logger.error(e)
            return render_to_response('error.html')
    else:
        return render_to_response('import.html', {'form': form},
                                  context_instance=RequestContext(request))
コード例 #22
0
    def handle(self, *args, **kwargs):
        if sum([kwargs['limit'], kwargs['job_id']]) > 1:
            raise CommandError("You can't mix limit and job_id options.")
        elif sum([kwargs['limit'], kwargs['job_id']]) == 0:
            raise CommandError("Choose something...")
        if not args:
            raise CommandError("Specify limit or job_id.")

        queue = django_rq.get_queue('default')

        if kwargs.get('limit', False):
            try:
                limit = int(args[0])
            except (IndexError, ValueError) as e:
                raise CommandError(e)
            job = queue.enqueue_call(
                func=prime_numbers,
                args=(limit,),
                timeout=3600,
                result_ttl=60,
            )
            self.stdout.write("JOB ID = %s" % job.id)

        if kwargs.get('job_id', False):
            job = rq.job.Job.fetch(args[0], django_rq.get_connection())

        while job.result is None and not job.is_failed:
            time.sleep(0.5)
            sys.stdout.write('.')
            sys.stdout.flush()
        if job.is_failed:
            self.stdout.write('\nJOB FAILED\n')
        else:
            self.stdout.write('\n%s\n' % job.result)
コード例 #23
0
def ChangeInitiatorHelper(requestDic,owner):
    '''
    Change the initiator for SCST (do not change it in the saturnring DB though)
    '''
    logger = getLogger(__name__)
    try:
        user = User.objects.get(username=owner);
        iqntar = requestDic['iqntar']
        newini = requestDic['newini']
        target = Target.objects.get(owner=user,iqntar=iqntar);
    except:
        errorstring = format_exc()
        logger.error(errorstring)
        return (-1,errorstring)

    config = ConfigReader()
    numqueues = config.get('saturnring','numqueues')
    queuename = 'queue'+str(hash(target.targethost)%int(numqueues))
    queue = get_queue(queuename)
    job = queue.enqueue(ExecChangeInitiator,args=(iqntar,newini),timeout=45,ttl=60)
    while (job.result != 0)  and (job.result != -1) :
        sleep(1)
        logger.info("...Working on changing target %s initiator name to %s" %(iqntar,newini))
        #logger.info(str(job))
    return (job.result,str(job))
コード例 #24
0
ファイル: models.py プロジェクト: funkyminh/archiprod
    def save(self, encode=True, *args, **kwargs):
        # Set slug to the media
        if not self.slug:
            if self.work:
                if self.work.composers :
                    composers = '-'.join(["%s" % (slugify(c.__unicode__())) for c in self.work.composers.all()])
                    self.slug = 'x%s_%s-%s' % (hexlify(os.urandom(3)), slugify(self.work.title), slugify(composers))
                else:
                    self.slug = 'x%s_%s' % (hexlify(os.urandom(3)), slugify(self.work.title))
                self.slug = self.slug[:50]
            elif self.title:
                self.slug = 'x%s_%s' % (hexlify(os.urandom(3)), slugify(self.title[:42]))
            else:
                self.slug = 'x%s' % hexlify(os.urandom(3))

        if self.file:
            # Set media duration (if file exists)

            self.duration = get_media_duration(self.file.path)
            # Set mimetype
            self.mime_type = mimetypes.guess_type(self.file.path)[0]
            if self.mime_type is None:
                # TODO: this is due to original archiprod archives files
                # guesstype doesn't find the mimetype because theses archives
                # doesn't have extension name
                if self.file.name.find('VI') != -1:
                    self.mime_type = 'video'

        super(Media, self).save(*args, **kwargs)
        # encode param allow to bypass the encoding process
        if self.file and encode:
            # Call asynchronous encode command
            if self.encoding_state == ENCODING_NOT_ENCODED:
                queue = django_rq.get_queue('default')
                queue.enqueue(call_command, args=('encode', self.id, ), timeout=86400)
コード例 #25
0
ファイル: admin.py プロジェクト: funkyminh/archiprod
 def _current_files_copied_in_queue(self):
     """ return current archives being processed (from server to archiprod) """
     _in_queue = []
     queue = django_rq.get_queue('archive')
     for index, job in enumerate(queue.jobs):
         _in_queue.append(job.args[0])
     return _in_queue
コード例 #26
0
ファイル: manual.py プロジェクト: wmatyskiewicz/ralph
def scan_address(address, plugins):
    """Queue manual discovery on the specified address."""

    try:
        network = Network.from_ip(address)
    except IndexError:
        raise NoQueueError(
            "Address {0} doesn't belong to any configured "
            "network.".format(address),
        )
    if not network.queue:
        raise NoQueueError(
            "The network {0} has no discovery queue.".format(network),
        )
    ipaddress, created = IPAddress.objects.get_or_create(address=address)
    queue_name = network.queue.name
    queue = django_rq.get_queue(queue_name)
    job = queue.enqueue_call(
        func=_scan_address,
        args=(
            address,
            plugins,
        ),
        kwargs={
            'snmp_community': ipaddress.snmp_community,
#            'snmp_version': ipaddress.snmp_version,
            'snmp_version': '2c',
            'http_family': ipaddress.http_family,
            'snmp_name': ipaddress.snmp_name,
        },
        timeout=60,
        result_ttl=3600,
    )
    return job
コード例 #27
0
    def receive_resources_async(self):
        aws_user = AwsUser.objects.get(user=self.user)
        aws_user.resources_last_updated = datetime.now(timezone.utc)
        aws_user.save()


        self.initialize_connections()
        aws_accounts = aws_user.aws_accounts.all()
        regions = Region.objects.all()
        params = {'connections': self.__CONNECTIONS,
                  'regions': regions,
                  'aws_accounts': aws_accounts}

        queue = django_rq.get_queue('high')

        queue.enqueue(receive_keypairs_async, **params)
        queue.enqueue(receive_security_groups_async, **params)
        queue.enqueue(receive_amis_async, **params)
        queue.enqueue(receive_instances_async, **params)
        queue.enqueue(add_instance_prices)
        queue.enqueue(receive_snapshots_async, **params)
        queue.enqueue(receive_volumes_async, **params)
        queue.enqueue(receive_elastic_ips_async, **params)
        queue.enqueue(receive_load_balancers_async,
                      self.__ELB_CONNECTIONS,
                      regions,
                      aws_accounts)
コード例 #28
0
    def clear_queue(self):
        """Clear all queued analyze_frame jobs, and their corresponding frames"""

        frame_class = self.get_frame_class()

        queue = django_rq.get_queue()
        jobs = queue.get_jobs()
        cleared = 0
        frames_deleted = 0
        for job in jobs:
            # Delete jobs for this task but not scheduler jobs
            if (job.meta.get('analysis.task.key') == self.key) and not job.meta.get('analysis.task.schedule'):
                cleared += 1
                job.cancel()

                frame_id = job.meta.get('analysis.frame.id')
                if frame_id:
                    # Delete the corresponding frame
                    try:
                        frame_class.objects.filter(pk=frame_id, calculated=False).delete()
                        frames_deleted += 1
                    except Exception as e:
                        logger.warn(e, exc_info=True)

        return cleared, frames_deleted
コード例 #29
0
def DeleteTarget(requestDic,owner):
    '''
    Delete iSCSI target
    This function dispatches a request to the worker queue of the Saturn host
    to delete the object (iSCSI target object).
    R: RequestDic - which may contain one of the following
    iqntarget name, 
    initiator name (all targets provisioned for that initiator),
    targethost (DNS name of Saturn server).

    '''
    logger = getLogger(__name__)
    queryset = None
    if 'iqntar' in requestDic:
        queryset=Target.objects.filter(iqntar=requestDic['iqntar'],owner=owner)
    if 'iqnini' in requestDic:
        if queryset is None:
            queryset=Target.objects.filter(iqnini=requestDic['iqnini'],owner=owner)
        else:
            queryset=queryset.objects.filter(iqnini=requestDic['iqnini'])
    if 'targethost' in requestDic:
        if queryset is None:
            queryset=Target.objects.filter(targethost=requestDic['targethost'],owner=owner)
        else:
            queryset=queryset.objects.filter(targethost=requestDic['targethost'])
    if queryset is None:
        return (1,"No targets to delete, or check delete API call")
    config = ConfigReader()
    numqueues = config.get('saturnring','numqueues')
    jobs =[]
    logger.info("DeleteTarget has %d targets to delete" % (queryset.count()))
    for obj in queryset:
        logger.info("DeleteTarget Working on deleting target %s" % (obj.iqntar,))
        queuename = 'queue'+str(hash(obj.targethost)%int(numqueues))
        queue = get_queue(queuename)
        jobs = []
        jobs.append( (queue.enqueue(DeleteTargetObject,args=(obj.iqntar,),timeout=45,ttl=60), obj.iqntar) )
        logger.info("Using queue %s for deletion" %(queuename,))
    rtnStatus= {}
    rtnFlag=0
    numDone=0
    while numDone < len(jobs):
        ii=0
        sleep(1)
        for ii in range(0,len(jobs)):
            if jobs[ii] == 0:
                continue
            (job,target) = jobs[ii]
            if (job.result == 0) or (job.result == 1) or job.is_failed:
                if job.result==1 or job.is_failed:
                    logger.error("Failed deletion of " + target)
                    rtnStatus[target] = "Failed deletion of " + target
                rtnFlag=rtnFlag + job.result + int(job.is_failed)
                jobs[ii]=0
                numDone=numDone+1
            else:
                logger.info('...Working on deleting target '+target)
                break
    return (rtnFlag,str(rtnStatus))
コード例 #30
0
ファイル: models.py プロジェクト: allenling/dbss
def cron_update_index(sender, **kwargs):
    index_redis = get_redis_connection('djrq')
    index_count = int(index_redis.incr(settings.INDEX_NAME))
    if index_count > settings.INDEX_COUNT + 1:
        index_redis.set(settings.INDEX_NAME, 0)
        index_queue = django_rq.get_queue(settings.INDEX_QUEUE)
        if index_queue.count < 1:
            index_queue.enqueue(warp_update_index)
コード例 #31
0
def send_mails_smtp(request):
    queue_name = os.path.basename(settings.BASE_DIR)
    queue = django_rq.get_queue(queue_name)

    for (idx, row) in enumerate(request.session['csv']):
        kwargs = {
            'idx': idx,
            'row': row,
            'session': request.session,
            'mode': 'smtp',
        }
        queue.enqueue(send_msg, ttl=100, **kwargs)
コード例 #32
0
def add_task(request):
    task = request.GET.get('task')
    parameters = request.GET.get('parameters')
    queue_to_be_added_to = request.GET.get('queue')
    queue = django_rq.get_queue(queue_to_be_added_to)
    func = getattr(tasks, task)
    
    if parameters:
        queue.enqueue(func, args=(parameters,))
    else:
        queue.enqueue(func)
    return HttpResponse(json.dumps(True), content_type='application/json')
コード例 #33
0
ファイル: test_views.py プロジェクト: Yolley/django-rq
    def test_deferred_jobs(self):
        """Ensure that active jobs page works properly."""
        queue = get_queue('django_rq_test')
        queue_index = get_queue_index('django_rq_test')

        job = queue.enqueue(access_self)
        registry = DeferredJobRegistry(queue.name, queue.connection)
        registry.add(job, 2)
        response = self.client.get(
            reverse('rq_deferred_jobs', args=[queue_index])
        )
        self.assertEqual(response.context['jobs'], [job])
コード例 #34
0
ファイル: test_views.py プロジェクト: Yolley/django-rq
 def test_delete_job(self):
     """
     In addition to deleting job from Redis, the job id also needs to be
     deleted from Queue.
     """
     queue = get_queue('django_rq_test')
     queue_index = get_queue_index('django_rq_test')
     job = queue.enqueue(access_self)
     self.client.post(reverse('rq_delete_job', args=[queue_index, job.id]),
                      {'post': 'yes'})
     self.assertFalse(Job.exists(job.id, connection=queue.connection))
     self.assertNotIn(job.id, queue.job_ids)
コード例 #35
0
ファイル: admin.py プロジェクト: IATI/iati.cloud
    def add_to_parse_queue(self, request):
        xml_id = request.GET.get('xml_id')
        obj = get_object_or_404(Dataset, pk=xml_id)
        queue = django_rq.get_queue("parser")
        queue.enqueue(force_parse_source_by_url,
                      args=(obj.source_url, True),
                      timeout=7200)

        # This is needed for direct debugging
        # force_parse_source_by_url(obj.source_url, True)

        return HttpResponse('Success')
コード例 #36
0
    def test_job_details_on_deleted_dependency(self):
        """Page doesn't crash even if job.dependency has been deleted"""
        queue = get_queue('default')
        queue_index = get_queue_index('default')

        job = queue.enqueue(access_self)
        second_job = queue.enqueue(access_self, depends_on=job)
        job.delete()
        url = reverse('rq_job_detail', args=[queue_index, second_job.id])
        response = self.client.get(url)
        self.assertEqual(response.status_code, 200)
        self.assertIn(second_job._dependency_id, response.content.decode())
コード例 #37
0
ファイル: api.py プロジェクト: Python3pkg/InkPy
def generate_pdf_async(source_path, output_path, data):
    queue = django_rq.get_queue('inkpy')

    return queue.enqueue_call(
        func=generate_pdf,
        args=(
            source_path,
            output_path,
            data,
        ),
        timeout=600,
    ).id
コード例 #38
0
ファイル: version.py プロジェクト: xiya233/DCRM
 def update_storage(self):
     """
     Update control fields and write to deb files
     This method is executed async.
     """
     control = self.get_control_dict()
     path = self.storage.name
     if settings.ENABLE_REDIS is True:
         queue = django_rq.get_queue('high')
         queue.enqueue(write_to_package_job, control, path, self.id)
     else:
         write_to_package_job(control, path, self.id)
コード例 #39
0
ファイル: webhooks.py プロジェクト: zhyh329/netbox
def enqueue_webhooks(instance, user, request_id, action):
    """
    Find Webhook(s) assigned to this instance + action and enqueue them
    to be processed
    """
    # Determine whether this type of object supports webhooks
    app_label = instance._meta.app_label
    model_name = instance._meta.model_name
    if model_name not in registry['model_features']['webhooks'].get(
            app_label, []):
        return

    # Retrieve any applicable Webhooks
    content_type = ContentType.objects.get_for_model(instance)
    action_flag = {
        ObjectChangeActionChoices.ACTION_CREATE: 'type_create',
        ObjectChangeActionChoices.ACTION_UPDATE: 'type_update',
        ObjectChangeActionChoices.ACTION_DELETE: 'type_delete',
    }[action]
    webhooks = Webhook.objects.filter(content_types=content_type,
                                      enabled=True,
                                      **{action_flag: True})

    if webhooks.exists():

        # Get the Model's API serializer class and serialize the object
        serializer_class = get_serializer_for_model(instance.__class__)
        serializer_context = {
            'request': None,
        }
        serializer = serializer_class(instance, context=serializer_context)

        # Gather pre- and post-change snapshots
        snapshots = {
            'prechange':
            getattr(instance, '_prechange_snapshot', None),
            'postchange':
            serialize_object(instance)
            if action != ObjectChangeActionChoices.ACTION_DELETE else None,
        }

        # Enqueue the webhooks
        webhook_queue = get_queue('default')
        for webhook in webhooks:
            webhook_queue.enqueue("extras.webhooks_worker.process_webhook",
                                  webhook=webhook,
                                  model_name=instance._meta.model_name,
                                  event=action,
                                  data=serializer.data,
                                  snapshots=snapshots,
                                  timestamp=str(timezone.now()),
                                  username=user.username,
                                  request_id=request_id)
コード例 #40
0
def add_task(request):
    import django_rq
    task = request.GET.get('task')
    parameters = request.GET.get('parameters')
    queue_to_be_added_to = request.GET.get('queue')
    queue = django_rq.get_queue(queue_to_be_added_to)

    if parameters:
        queue.enqueue(getattr(tasks, task), args=(parameters, ), timeout=7200)
    else:
        queue.enqueue(getattr(tasks, task), timeout=7200)
    return HttpResponse('Success')
コード例 #41
0
def get_filings(request):
    for j in django_rq.get_queue('default').jobs:
        if j.func_name == 'filings.parser.parse_filing' and j.result == None:
            return HttpResponse('parsing filings, please wait a minute and refresh...')

    state = request.GET.get('state') or 'ALL'
    if r.exists(state):
        state_data = json.loads(r.get(state))
        return JsonResponse(state_data, json_dumps_params={'indent': 2})

    django_rq.enqueue(_get_filings_async, state)
    return HttpResponse('generating JSON output, please wait a few seconds and refresh...')
コード例 #42
0
def run(*args):
    print("Empty queues")

    django_rq.get_queue('bot').empty()
    django_rq.get_queue('listen').empty()

    # Stop existing jobs
    registry = StartedJobRegistry('bot',
                                  connection=django_rq.get_connection('bot'))
    running_ids = registry.get_job_ids()
    if len(running_ids) > 1:
        for i in running_ids:
            current_job = django_rq.get_queue('bot').fetch_job(i)
            print("Delete : ", current_job)
            current_job.delete()
    else:
        for i in running_ids:
            current_job = django_rq.get_queue('bot').fetch_job(i)
            print("Send kill : ", current_job)
            current_job.meta['kill'] = "true"
            current_job.save_meta()

    if args and len(args) > 0 and args[0] == "stop":
        return

    print("Launch bot job")
    print(launch_bot.delay())
コード例 #43
0
def bulk_send_new_attempt_email(**kwargs):
    """
    Queue RQ job for sending out notifications to users when they
    have been given a new attempt.

    Adds :meth:`bulk_deadline_email` to the RQ-queue.
    """
    kwargs.update({
        'template_name': 'devilry_email/deadline_email/new_attempt.txt',
        'deadline_type': 'new_attempt'
    })
    queue = django_rq.get_queue(name='email')
    queue.enqueue(bulk_deadline_email, **kwargs)
コード例 #44
0
ファイル: tasks.py プロジェクト: EwoutGoet/iati_data
def force_parse_source_by_id(source_id, update_searchable=False):
    try:
        xml_source = Dataset.objects.get(pk=source_id)
        xml_source.process(force_reparse=True)

        queue = django_rq.get_queue("parser")
        if update_searchable and settings.ROOT_ORGANISATIONS:
            queue.enqueue(start_searchable_activities_task,
                          args=(0, ),
                          timeout=300)

    except Dataset.DoesNotExist:
        return False
コード例 #45
0
 def run(self):
     while True:
         self.logger.info('cron update index start')
         index_queue = django_rq.get_queue(settings.INDEX_QUEUE)
         if index_queue.count < 1 :
             index_redis = get_redis_connection('djrq')
             index_count = int(index_redis.get(settings.INDEX_NAME)) if index_redis.get(settings.INDEX_NAME) else 0
             if index_count > 0:
                 self.logger.info('index count is ' + str(index_count) + ', cron update index enqueue')
                 index_redis.set(settings.INDEX_NAME, 0)
                 index_queue.enqueue(warp_update_index)
         self.logger.info('cron update index done, sleep ' + str(settings.INDEX_TIME) + '\n*********************')
         time.sleep(settings.INDEX_TIME)
コード例 #46
0
    def post(self, request, format=None):

        queue = django_rq.get_queue('high')

        job = queue.enqueue(regimes_clustering_run, request.data['start'],
                            request.data['end'], request.data['capital_base'],
                            request.data['ticker'], request.data['use_clf'],
                            request.data['no_shorts'],
                            request.data['log_channel'])

        json = {'success': True, 'job_id': job.key}

        return Response(json)
コード例 #47
0
ファイル: mixins.py プロジェクト: brian-lai/django-docker
    def get_context_data(self, *args, **kwargs):
        context = super(QueuedJobsMixin, self).get_context_data(*args, **kwargs)

        # Add a list of job items currently active for the object
        obj = self.object
        lname = generate_object_index(obj)

        # Retrieve a list of jobs
        conn = get_connection()
        queue = django_rq.get_queue()
        context['queued_jobs'] = [queue.fetch_job(job_id) for job_id in conn.lrange(lname, 0, 10)]

        return context
コード例 #48
0
ファイル: models.py プロジェクト: undp/satlomas-back
 def start(self, sync=False):
     if self.state == states.PENDING:
         if sync:
             method = self._get_function_from_string(self.name)
             method(self.pk, sync=True)
         else:
             queue = django_rq.get_queue(self.queue or "default")
             queue.enqueue(self.name, self.pk)
         self.state = states.STARTED
         self.save(update_fields=["state", "updated_at"])
         signals.job_started.send(sender=self.__class__, job=self)
         return True
     return False
コード例 #49
0
ファイル: base.py プロジェクト: jeqka24/CUBA
 def post(self, request, format=None):
     """ Some description for posts"""
     data = self.serialize(request)
     if not isinstance(data, dict):
         return data
     data["domain_name"] = request.META.get('HTTP_HOST', '')
     # if request.POST.get('synchronous_job', False):
     #     return self.worker_class.run(data)
     # else:
     job = django_rq.get_queue("default").enqueue(self.worker_class.run,
                                                  data,
                                                  result_ttl=180)
     return Response({"job_id": job.id}, status=status.HTTP_200_OK)
コード例 #50
0
ファイル: test_views.py プロジェクト: pnuckowski/django-rq
    def test_jobs(self):
        """Jobs in queue are displayed properly"""
        queue = get_queue('default')
        job = queue.enqueue(access_self)
        queue_index = get_queue_index('default')
        response = self.client.get(reverse('rq_jobs', args=[queue_index]))
        self.assertEqual(response.context['jobs'], [job])

        # This page shouldn't fail when job.data is corrupt
        queue.connection.hset(job.key, 'data', 'unpickleable data')
        response = self.client.get(reverse('rq_jobs', args=[queue_index]))
        self.assertEqual(response.status_code, 200)
        self.assertIn('UnpicklingError', response.content.decode('utf-8'))
コード例 #51
0
 def cancel_rq_job(self):
     """
     Cancels the RQ job corresponding to me.
     """
     try:
         logger.debug(f"cancel_rq_job(): Started: {self}")
         queue = django_rq.get_queue(UPLOAD_FILE_QUEUE_NAME)
         job = queue.fetch_job(self.rq_job_id())
         job.cancel(
         )  # NB: just removes it from the queue and won't will kill it if is already executing
         logger.debug(f"cancel_rq_job(): done: {self}")
     except Exception as ex:
         logger.debug(f"cancel_rq_job(): Failed: {ex}, {self}")
コード例 #52
0
def bulk_send_deadline_moved_email(**kwargs):
    """
    Queue RQ job for sending out notifications to users when their
    deadline is moved.

    Adds :meth:`bulk_deadline_email` to the RQ-queue.
    """
    kwargs.update({
        'template_name': 'devilry_email/deadline_email/deadline_moved.txt',
        'deadline_type': 'moved'
    })
    queue = django_rq.get_queue(name='email')
    queue.enqueue(bulk_deadline_email, **kwargs)
コード例 #53
0
def healthcheck(request):
    results = {
        'version': __version__,
        'is_database_synchronized': is_database_synchronized(),
        'rq_jobs': len(django_rq.get_queue().jobs),
    }

    status_code = 200

    if not results['is_database_synchronized']:
        status_code = 500

    return JsonResponse(results, status=status_code)
コード例 #54
0
def dummy_horde(interactive=False, how_many=1000):
    if interactive:
        for i in xrange(how_many):
            dummy_task(interactive=interactive, index=i + 1)
    else:
        queue = django_rq.get_queue()
        for i in xrange(how_many):
            queue.enqueue_call(
                func=dummy_task,
                kwargs=dict(interactive=interactive, index=i + 1),
                timeout=60,
                result_ttl=0,
            )
コード例 #55
0
 def update_user(self):
     Subscription = apps.get_model('bhs.subscription')
     User = apps.get_model('api.user')
     queue = django_rq.get_queue('low')
     try:
         subscription = self.subscriptions.filter(
             items_editable=True, ).latest('modified')
         queue.enqueue(
             User.objects.update_or_create_from_subscription,
             subscription,
         )
     except Subscription.DoesNotExist:
         pass
コード例 #56
0
ファイル: utils.py プロジェクト: zqiang3/django-cacheback
def enqueue_task(kwargs, task_options=None):
    task_queue = getattr(settings, 'CACHEBACK_TASK_QUEUE', 'celery')

    if task_queue == 'rq' and rq_refresh_cache is not None:
        return django_rq.get_queue(**task_options or {}).enqueue(
            rq_refresh_cache, **kwargs)

    elif task_queue == 'celery' and celery_refresh_cache is not None:
        return celery_refresh_cache.apply_async(kwargs=kwargs,
                                                **task_options or {})

    raise ImproperlyConfigured(
        'Unkown task queue configured: {0}'.format(task_queue))
コード例 #57
0
ファイル: tasks.py プロジェクト: IATI/iati.cloud
def delete_sources_not_found_in_registry_in_x_days(days):
    if int(days) < 6:
        raise Exception(
            "The task queue only allows deletion of sources when not found \
                for +5 days")

    for source in Dataset.objects.all():
        current_date = float(datetime.datetime.now().strftime('%s'))
        if source.last_found_in_registry:
            last_found_in_registry = float(
                source.last_found_in_registry.strftime('%s'))
            update_interval_time = 24 * 60 * 60 * int(days)

            if (current_date - update_interval_time) > last_found_in_registry:
                queue = django_rq.get_queue("parser")
                queue.enqueue(delete_source_by_id, args=(source.id,))

        else:
            if not source.added_manually:
                # Old source, delete
                queue = django_rq.get_queue("parser")
                queue.enqueue(delete_source_by_id, args=(source.id,))
コード例 #58
0
def get_job(job_id):
    """
    Return the job for the specified ID or None if it cannot be found.

    Args:
        job_id(uuid): The ID of the job to retrieve.

    Returns:
        The job with the matching ID or None if no job with the supplied job
        ID could be found.
    """
    queue = get_queue(settings.ASYNC_QUEUE)
    return queue.fetch_job(job_id)
コード例 #59
0
def update(score_abbrev, project_pk, model_abbrev, no_enqueue):
    """
    A subcommand that enqueues or (executes immediately) updating model scores, controlled by the args. NB: Does NOT
    exclude those that do not need updating according to how ForecastModel.forecasts_changed_at compares to
    ScoreLastUpdate.updated_at .

    :param score_abbrev: if a valid Score abbreviation then only that score is updated. o/w all scores are updated
    :param project_pk: if a valid Project pk then only that project's models are updated. o/w defers to `model_abbrev` arg
    :param model_abbrev: if a valid ForecastModel abbreviation then only that model is updated. o/w all models are updated
    :param no_enqueue: controls whether the update will be immediate in the calling thread (blocks), or enqueued for RQ
    """
    from forecast_repo.settings.base import UPDATE_MODEL_SCORES_QUEUE_NAME  # avoid circular imports

    Score.ensure_all_scores_exist()
    logger.info(
        f"update(): score_abbrev={score_abbrev!r}, project_pk={project_pk}, model_abbrev={model_abbrev!r}, "
        f"no_enqueue={no_enqueue}")

    scores = [get_object_or_404(Score, abbreviation=score_abbrev)
              ] if score_abbrev else Score.objects.all()

    # set models
    project = get_object_or_404(Project, pk=project_pk) if project_pk else None
    model = get_object_or_404(ForecastModel, project__id=project_pk, abbreviation=model_abbrev) \
        if model_abbrev and project_pk else None
    if model:
        models = [model]
    elif project:
        models = project.models.all()
    else:
        models = ForecastModel.objects.all()

    logger.info(
        f"update(): project={project}, scores=({len(scores)}) {scores}, model={model}, "
        f"models=({len(models)}) {models}")
    queue = django_rq.get_queue(UPDATE_MODEL_SCORES_QUEUE_NAME)
    for score in scores:
        logger.info(f"* {score}")
        for forecast_model in models:
            if no_enqueue:
                logger.info(
                    f"** (no enqueue) calculating score={score}, forecast_model={forecast_model}"
                )
                _update_model_scores_worker(score.pk, forecast_model.pk)
            else:
                logger.info(
                    f"** enqueuing score={score}, forecast_model={forecast_model}"
                )
                queue.enqueue(_update_model_scores_worker, score.pk,
                              forecast_model.pk)
    logger.info("update done")
コード例 #60
0
ファイル: version.py プロジェクト: wghub/DCRM
 def batch_hash_update(self, request, queryset):
     """
     :type queryset: QuerySet
     """
     if settings.ENABLE_REDIS is True:
         queue = django_rq.get_queue('high')
         queue.enqueue(hash_update_job, queryset)
         self.message_user(
             request,
             _("Hash updating job has been added to the \"high\" queue."))
     else:
         hash_update_job(queryset)
         self.message_user(request,
                           _("Hash updating job has been finished."))