def get_search(self): search = json.loads(self.search) if 'after' in search: search['after'] = parse_iso8601(search['after']) if 'before' in search: search['before'] = parse_iso8601(search['before']) return search
def get_search(self): search = json.loads(self.search) if "after" in search: search["after"] = parse_iso8601(search["after"]) if "before" in search: search["before"] = parse_iso8601(search["before"]) return search
def derive_search(self): """ Collects and prepares reply search parameters into JSON serializable dict """ params = self.request.GET partner = params.get("partner") after = parse_iso8601(params.get("after")) before = parse_iso8601(params.get("before")) return {"partner": partner, "after": after, "before": before}
def derive_search(self): """ Collects and prepares reply search parameters into JSON serializable dict """ params = self.request.GET partner = params.get('partner') after = parse_iso8601(params.get('after')) before = parse_iso8601(params.get('before')) return {'partner': partner, 'after': after, 'before': before}
def derive_search(self): """ Collects and prepares case search parameters into JSON serializable dict """ params = self.request.GET folder = CaseFolder[params['folder']] assignee = params.get('assignee') after = parse_iso8601(params.get('after')) before = parse_iso8601(params.get('before')) return {'folder': folder, 'assignee': assignee, 'after': after, 'before': before}
def post(self, request, *args, **kwargs): of_type = request.json['type'] # parse dates and adjust max so it's exclusive after = parse_iso8601(request.json['after']).date() before = parse_iso8601(request.json['before']).date() + timedelta(days=1) export = DailyCountExport.create(self.request.org, self.request.user, of_type, after, before) daily_count_export.delay(export.pk) return JsonResponse({'export_id': export.pk})
def derive_search(self): """ Collects and prepares case search parameters into JSON serializable dict """ params = self.request.GET folder = CaseFolder[params["folder"]] assignee = params.get("assignee") after = parse_iso8601(params.get("after")) before = parse_iso8601(params.get("before")) return { "folder": folder, "assignee": assignee, "after": after, "before": before }
def derive_search(self): """ Collects and prepares case search parameters into JSON serializable dict """ params = self.request.GET folder = CaseFolder[params['folder']] assignee = params.get('assignee') after = parse_iso8601(params.get('after')) before = parse_iso8601(params.get('before')) return { 'folder': folder, 'assignee': assignee, 'after': after, 'before': before }
def derive_search(self): """ Collects and prepares message search parameters into JSON serializable dict """ folder = MessageFolder[self.request.GET["folder"]] label_id = self.request.GET.get("label", None) include_archived = str_to_bool(self.request.GET.get("archived", "")) text = self.request.GET.get("text", None) contact_id = self.request.GET.get("contact", None) after = parse_iso8601(self.request.GET.get("after", None)) before = parse_iso8601(self.request.GET.get("before", None)) return { "folder": folder, "label": label_id, "include_archived": include_archived, # only applies to flagged folder "text": text, "contact": contact_id, "after": after, "before": before, }
def org_task(self, org): """ Fetches new and modified flow runs for the given org and creates/updates poll responses. """ from tracpro.orgs_ext.constants import TaskType from tracpro.polls.models import Poll, PollRun, Response client = org.get_temba_client() redis_connection = get_redis_connection() last_time_key = LAST_FETCHED_RUN_TIME_KEY % org.pk last_time = redis_connection.get(last_time_key) if last_time is not None: last_time = parse_iso8601(last_time) else: newest_runs = Response.objects.filter( pollrun__poll__org=org).order_by('-created_on') newest_runs = newest_runs.exclude( pollrun__pollrun_type=PollRun.TYPE_SPOOFED) newest_run = newest_runs.first() last_time = newest_run.created_on if newest_run else None until = timezone.now() total_runs = 0 for poll in Poll.objects.active().by_org(org): poll_runs = client.get_runs(flows=[poll.flow_uuid], after=last_time, before=until) total_runs += len(poll_runs) # convert flow runs into poll responses for run in poll_runs: try: Response.from_run(org, run, poll=poll) except ValueError as e: logger.error("Unable to save run #%d due to error: %s" % (run.id, e.message)) continue logger.info("Fetched %d new and updated runs for org #%d (since=%s)" % (total_runs, org.id, format_iso8601(last_time) if last_time else 'Never')) task_result = dict(time=datetime_to_ms(timezone.now()), counts=dict(fetched=total_runs)) org.set_task_result(TaskType.fetch_runs, task_result) redis_connection.set(last_time_key, format_iso8601(until))
def derive_search(self): """ Collects and prepares message search parameters into JSON serializable dict """ folder = MessageFolder[self.request.GET['folder']] label_id = self.request.GET.get('label', None) include_archived = str_to_bool(self.request.GET.get('archived', '')) text = self.request.GET.get('text', None) contact_id = self.request.GET.get('contact', None) group_ids = parse_csv(self.request.GET.get('groups', ''), as_ints=True) after = parse_iso8601(self.request.GET.get('after', None)) before = parse_iso8601(self.request.GET.get('before', None)) return { 'folder': folder, 'label': label_id, 'include_archived': include_archived, # only applies to flagged folder 'text': text, 'contact': contact_id, 'groups': group_ids, 'after': after, 'before': before }
def fetch_org_runs(org_id): """ Fetches new and modified flow runs for the given org and creates/updates poll responses. """ from tracpro.orgs_ext.constants import TaskType from tracpro.polls.models import Poll, PollRun, Response org = Org.objects.get(pk=org_id) client = org.get_temba_client() redis_connection = get_redis_connection() last_time_key = LAST_FETCHED_RUN_TIME_KEY % org.pk last_time = redis_connection.get(last_time_key) if last_time is not None: last_time = parse_iso8601(last_time) else: newest_runs = Response.objects.filter(pollrun__poll__org=org).order_by("-created_on") newest_runs = newest_runs.exclude(pollrun__pollrun_type=PollRun.TYPE_SPOOFED) newest_run = newest_runs.first() last_time = newest_run.created_on if newest_run else None until = timezone.now() total_runs = 0 for poll in Poll.get_all(org): poll_runs = client.get_runs(flows=[poll.flow_uuid], after=last_time, before=until) total_runs += len(poll_runs) # convert flow runs into poll responses for run in poll_runs: try: Response.from_run(org, run, poll=poll) except ValueError as e: logger.error("Unable to save run #%d due to error: %s" % (run.id, e.message)) continue logger.info( "Fetched %d new and updated runs for org #%d (since=%s)" % (total_runs, org.id, format_iso8601(last_time) if last_time else "Never") ) task_result = dict(time=datetime_to_ms(timezone.now()), counts=dict(fetched=total_runs)) org.set_task_result(TaskType.fetch_runs, task_result) redis_connection.set(last_time_key, format_iso8601(until))
def check_rate_limit(self, org): """Return the next run time if this task has run too recently.""" now = timezone.now() last_run_time = self.cache_get(org, LAST_RUN_TIME) if last_run_time is not None: # Calculate when the task will be eligible to run again. last_run_time = parse_iso8601(last_run_time) if last_run_time else None failure_count = self.cache_get(org, FAILURE_COUNT, default=0) delta = settings.ORG_TASK_TIMEOUT * 2 ** failure_count next_run_time = last_run_time + min(delta, MAX_TIME_BETWEEN_RUNS) if now < next_run_time: # Task has been run too recently. raise ValueError( "Skipping task because rate limit was exceeded. " "Last run time was {}. " "Task has failed {} times recently. " "Task won't be run again before {}.".format( last_run_time, failure_count, next_run_time)) # Set the current time as the last run time. self.cache_set(org, LAST_RUN_TIME, value=format_iso8601(now), timeout=RUNS_TIMEOUT)