示例#1
0
文件: views.py 项目: mdheyab/tracpro
        def get_context_data(self, **kwargs):
            context = super(PollCRUDL.Read, self).get_context_data(**kwargs)
            questions = self.object.get_questions()
            pollruns = self.object.get_pollruns(self.request.region, self.request.include_subregions)

            # if we're viewing "All Regions" don't include regional only pollruns
            if not self.request.region:
                pollruns = pollruns.universal()

            window = self.request.POST.get("window", self.request.GET.get("window", None))
            window = Window[window] if window else Window.last_30_days
            window_min, window_max = window.to_range()

            pollruns = pollruns.filter(conducted_on__gte=window_min, conducted_on__lt=window_max)
            pollruns = pollruns.order_by("conducted_on")

            for question in questions:
                question.chart_type, question.chart_data = charts.multiple_pollruns(
                    pollruns, question, self.request.data_regions
                )

            context["window"] = window
            context["window_min"] = datetime_to_ms(window_min)
            context["window_max"] = datetime_to_ms(window_max)
            context["window_options"] = Window.__members__.values()
            context["questions"] = questions
            return context
示例#2
0
    def fetch_results(self, segment=None):
        from raven.contrib.django.raven_compat.models import client

        cache_time = UREPORT_ASYNC_FETCHED_DATA_CACHE_TIME
        if segment and segment.get('location', "") == "District":
            cache_time = UREPORT_RUN_FETCHED_DATA_CACHE_TIME

        try:
            key = CACHE_POLL_RESULTS_KEY % (self.poll.org.pk, self.poll.pk,
                                            self.pk)
            if segment:
                segment = self.poll.org.substitute_segment(segment)
                key += ":" + slugify(unicode(segment))

            this_time = datetime.now()
            temba_client = self.poll.org.get_temba_client()
            client_results = temba_client.get_results(self.ruleset_uuid,
                                                      segment=segment)
            results = temba_client_flow_results_serializer(client_results)

            cache.set(key, {
                'time': datetime_to_ms(this_time),
                'results': results
            }, cache_time)

            # delete the open ended cache
            cache.delete('open_ended:%d' % self.id)

        except:  # pragma: no cover
            client.captureException()
            import traceback
            traceback.print_exc()
示例#3
0
    def fetch_results(self, segment=None):
        from raven.contrib.django.raven_compat.models import client

        cache_time = UREPORT_ASYNC_FETCHED_DATA_CACHE_TIME
        if segment and segment.get('location', "") == "District":
            cache_time = UREPORT_RUN_FETCHED_DATA_CACHE_TIME

        try:
            key = CACHE_POLL_RESULTS_KEY % (self.poll.org.pk, self.poll.pk, self.pk)
            if segment:
                segment = self.poll.org.substitute_segment(segment)
                key += ":" + slugify(unicode(segment))

            this_time = datetime.now()
            temba_client = self.poll.org.get_temba_client()
            client_results = temba_client.get_results(self.ruleset_uuid, segment=segment)
            results = temba_client_flow_results_serializer(client_results)

            cache.set(key, {'time': datetime_to_ms(this_time), 'results': results}, cache_time)

            # delete the open ended cache
            cache.delete('open_ended:%d' % self.id)

        except:  # pragma: no cover
            client.captureException()
            import traceback
            traceback.print_exc()
示例#4
0
def fetch_old_sites_count():
    import requests, re
    from ureport.polls.models import UREPORT_ASYNC_FETCHED_DATA_CACHE_TIME

    start = time.time()
    this_time = datetime.now()
    linked_sites = list(getattr(settings, 'PREVIOUS_ORG_SITES', []))

    old_site_values = []

    for site in linked_sites:
        count_link = site.get('count_link', "")
        if count_link:
            try:
                response = requests.get(count_link)
                response.raise_for_status()

                count = int(re.search(r'\d+', response.content).group())
                key = "org:%s:reporters:%s" % (site.get('name').lower(),
                                               'old-site')
                value = {
                    'time': datetime_to_ms(this_time),
                    'results': dict(size=count)
                }
                old_site_values.append(value)
                cache.set(key, value, UREPORT_ASYNC_FETCHED_DATA_CACHE_TIME)
            except:
                import traceback
                traceback.print_exc()

    # delete the global count cache to force a recalculate at the end
    cache.delete(GLOBAL_COUNT_CACHE_KEY)

    print "Fetch old sites counts took %ss" % (time.time() - start)
    return old_site_values
示例#5
0
def fetch_flows(org):
    start = time.time()
    #print "Fetching flows for %s" % org.name

    try:
        from ureport.polls.models import CACHE_ORG_FLOWS_KEY, UREPORT_ASYNC_FETCHED_DATA_CACHE_TIME

        this_time = datetime.now()

        temba_client = org.get_temba_client()
        flows = temba_client.get_flows()

        all_flows = dict()
        for flow in flows:
            if flow.rulesets:
                flow_json = dict()
                flow_json['uuid'] = flow.uuid
                flow_json['created_on'] = flow.created_on.strftime('%Y-%m-%d')
                flow_json['name'] = flow.name
                flow_json['participants'] = flow.participants
                flow_json['runs'] = flow.runs
                flow_json['completed_runs'] = flow.completed_runs
                flow_json['rulesets'] = [
                    dict(uuid=elt.uuid, label=elt.label, response_type=elt.response_type) for elt in flow.rulesets]

                all_flows[flow.uuid] = flow_json

        all_flows_key = CACHE_ORG_FLOWS_KEY % org.pk
        cache.set(all_flows_key,
                  {'time': datetime_to_ms(this_time), 'results': all_flows},
                  UREPORT_ASYNC_FETCHED_DATA_CACHE_TIME)
    except:
        client.captureException()
        import traceback
        traceback.print_exc()
示例#6
0
def fetch_flows(org, backend=None):
    from ureport.polls.models import CACHE_ORG_FLOWS_KEY, UREPORT_ASYNC_FETCHED_DATA_CACHE_TIME

    start = time.time()
    logger.info("Fetching flows for %s" % org.name)

    if backend:
        backends = [backend]
    else:
        backends = org.backends.filter(is_active=True)

    this_time = datetime.now()
    org_flows = dict(time=datetime_to_ms(this_time), results=dict())

    for backend_obj in backends:
        backend = org.get_backend(backend_slug=backend_obj.slug)
        try:
            all_flows = backend.fetch_flows(org)
            org_flows["results"] = all_flows

            cache_key = CACHE_ORG_FLOWS_KEY % (org.pk, backend_obj.slug)
            cache.set(cache_key, org_flows, UREPORT_ASYNC_FETCHED_DATA_CACHE_TIME)

        except Exception:
            client.captureException()
            import traceback

            traceback.print_exc()

    logger.info("Fetch %s flows took %ss" % (org.name, time.time() - start))

    if len(backends):
        return org_flows.get("results", dict())
示例#7
0
def fetch_old_sites_count():
    import requests
    import re
    from ureport.polls.models import UREPORT_ASYNC_FETCHED_DATA_CACHE_TIME

    start = time.time()
    this_time = datetime.now()
    linked_sites = list(getattr(settings, "PREVIOUS_ORG_SITES", []))

    old_site_values = []

    for site in linked_sites:
        count_link = site.get("count_link", "")
        if count_link:
            try:
                response = requests.get(count_link)
                response.raise_for_status()

                count = int(re.search(r"\d+", response.content.decode("utf-8")).group())
                key = "org:%s:reporters:%s" % (site.get("name").lower(), "old-site")
                value = {"time": datetime_to_ms(this_time), "results": dict(size=count)}
                old_site_values.append(value)
                cache.set(key, value, UREPORT_ASYNC_FETCHED_DATA_CACHE_TIME)
            except Exception:
                import traceback

                traceback.print_exc()

    # delete the global count cache to force a recalculate at the end
    cache.delete(GLOBAL_COUNT_CACHE_KEY)

    logger.info("Fetch old sites counts took %ss" % (time.time() - start))
    return old_site_values
示例#8
0
def sync_org_contacts(org_id):
    """
    Syncs all contacts for the given org
    """
    from chatpro.orgs_ext import TaskType
    from chatpro.rooms.models import Room
    from .models import Contact

    org = Org.objects.get(pk=org_id)

    logger.info('Starting contact sync task for org #%d' % org.id)

    sync_fields = [org.get_chat_name_field()]
    sync_groups = [r.uuid for r in Room.get_all(org)]

    created, updated, deleted, failed = sync_pull_contacts(org, Contact, fields=sync_fields, groups=sync_groups)

    task_result = dict(time=datetime_to_ms(timezone.now()),
                       counts=dict(created=len(created),
                                   updated=len(updated),
                                   deleted=len(deleted),
                                   failed=len(failed)))
    org.set_task_result(TaskType.sync_contacts, task_result)

    logger.info("Finished contact sync for org #%d (%d created, %d updated, %d deleted, %d failed)"
                % (org.id, len(created), len(updated), len(deleted), len(failed)))
示例#9
0
def fetch_old_sites_count():
    import requests
    import re
    from ureport.polls.models import UREPORT_ASYNC_FETCHED_DATA_CACHE_TIME

    start = time.time()
    this_time = datetime.now()
    linked_sites = list(getattr(settings, "PREVIOUS_ORG_SITES", [])) + list(
        getattr(settings, "OTHER_ORG_COUNT_SITES", [])
    )

    old_site_values = []

    for site in linked_sites:
        count_link = site.get("count_link", "")
        if count_link:
            try:
                response = requests.get(count_link)
                response.raise_for_status()

                count = int(re.search(r"\d+", response.content.decode("utf-8")).group())
                key = "org:%s:reporters:%s" % (site.get("name").lower(), "old-site")
                value = {"time": datetime_to_ms(this_time), "results": dict(size=count)}
                old_site_values.append(value)
                cache.set(key, value, UREPORT_ASYNC_FETCHED_DATA_CACHE_TIME)
            except Exception:
                import traceback

                traceback.print_exc()

    # delete the global count cache to force a recalculate at the end
    cache.delete(GLOBAL_COUNT_CACHE_KEY)

    logger.info("Fetch old sites counts took %ss" % (time.time() - start))
    return old_site_values
示例#10
0
def fetch_flows(org, backend=None):
    from ureport.polls.models import CACHE_ORG_FLOWS_KEY, UREPORT_ASYNC_FETCHED_DATA_CACHE_TIME

    start = time.time()
    logger.info("Fetching flows for %s" % org.name)

    if backend:
        backends = [backend]
    else:
        backends = org.backends.filter(is_active=True)

    this_time = datetime.now()
    org_flows = dict(time=datetime_to_ms(this_time), results=dict())

    for backend_obj in backends:
        backend = org.get_backend(backend_slug=backend_obj.slug)
        try:
            all_flows = backend.fetch_flows(org)
            org_flows["results"] = all_flows

            cache_key = CACHE_ORG_FLOWS_KEY % (org.pk, backend_obj.slug)
            cache.set(cache_key, org_flows, UREPORT_ASYNC_FETCHED_DATA_CACHE_TIME)

        except Exception:
            client.captureException()
            import traceback

            traceback.print_exc()

    logger.info("Fetch %s flows took %ss" % (org.name, time.time() - start))

    if len(backends):
        return org_flows.get("results", dict())
示例#11
0
文件: tasks.py 项目: daaray/tracpro
def sync_org_contacts(org_id):
    """
    Syncs all contacts for the given org
    """
    from tracpro.groups.models import Region, Group
    from tracpro.orgs_ext.constants import TaskType
    from .models import Contact

    org = Org.objects.get(pk=org_id)

    logger.info('Starting contact sync task for org #%d' % org.id)

    sync_groups = [r.uuid for r in Region.get_all(org)] + [g.uuid for g in Group.get_all(org)]

    most_recent_contact = Contact.objects.by_org(org).active().exclude(temba_modified_on=None)
    most_recent_contact = most_recent_contact.order_by('-temba_modified_on').first()
    if most_recent_contact:
        last_time = most_recent_contact.temba_modified_on
    else:
        last_time = None

    created, updated, deleted, failed = sync_pull_contacts(
        org, Contact, fields=(), groups=sync_groups, last_time=last_time,
        delete_blocked=True)

    task_result = dict(time=datetime_to_ms(timezone.now()),
                       counts=dict(created=len(created),
                                   updated=len(updated),
                                   deleted=len(deleted),
                                   failed=len(failed)))
    org.set_task_result(TaskType.sync_contacts, task_result)

    logger.info("Finished contact sync for org #%d (%d created, "
                "%d updated, %d deleted, %d failed)" %
                (org.id, len(created), len(updated), len(deleted), len(failed)))
示例#12
0
def fetch_reporter_group(org):
    start = time.time()
    print "Fetching reporter group for %s" % org.name
    try:
        from ureport.polls.models import CACHE_ORG_REPORTER_GROUP_KEY, UREPORT_ASYNC_FETCHED_DATA_CACHE_TIME

        this_time = datetime.now()

        reporter_group = org.get_config('reporter_group')
        if reporter_group:
            temba_client = org.get_temba_client()
            groups = temba_client.get_groups(name=reporter_group)

            key = CACHE_ORG_REPORTER_GROUP_KEY % (org.pk, slugify(unicode(reporter_group)))
            group_dict = dict()
            if groups:
                group = groups[0]
                group_dict = dict(size=group.size, name=group.name, uuid=group.uuid)
            cache.set(key,
                      {'time': datetime_to_ms(this_time), 'results': group_dict},
                      UREPORT_ASYNC_FETCHED_DATA_CACHE_TIME)
    except:
        client.captureException()
        import traceback
        traceback.print_exc()
    # delete the global count cache to force a recalculate at the end
    cache.delete(GLOBAL_COUNT_CACHE_KEY)

    print "Fetch %s reporter group took %ss" % (org.name, time.time() - start)
示例#13
0
文件: tasks.py 项目: ewheeler/tracpro
def sync_org_contacts(org_id):
    """
    Syncs all contacts for the given org
    """
    from tracpro.groups.models import Region, Group
    from tracpro.orgs_ext import TaskType
    from .models import Contact

    org = Org.objects.get(pk=org_id)

    logger.info('Starting contact sync task for org #%d' % org.id)

    sync_groups = [r.uuid for r in Region.get_all(org)] + [g.uuid for g in Group.get_all(org)]

    created, updated, deleted, failed = sync_pull_contacts(org, Contact, fields=(), groups=sync_groups)

    task_result = dict(time=datetime_to_ms(timezone.now()),
                       counts=dict(created=len(created),
                                   updated=len(updated),
                                   deleted=len(deleted),
                                   failed=len(failed)))
    org.set_task_result(TaskType.sync_contacts, task_result)

    logger.info("Finished contact sync for org #%d (%d created, %d updated, %d deleted, %d failed)"
                % (org.id, len(created), len(updated), len(deleted), len(failed)))
示例#14
0
def fetch_old_sites_count():
    import requests, re
    from ureport.polls.models import UREPORT_ASYNC_FETCHED_DATA_CACHE_TIME

    start = time.time()
    this_time = datetime.now()
    linked_sites = list(getattr(settings, 'PREVIOUS_ORG_SITES', []))

    for site in linked_sites:
        count_link = site.get('count_link', "")
        if count_link:
            try:
                response = requests.get(count_link)
                response.raise_for_status()

                count = int(re.search(r'\d+', response.content).group())
                key = "org:%s:reporters:%s" % (site.get('name').lower(), 'old-site')
                cache.set(key,
                          {'time': datetime_to_ms(this_time), 'results': dict(size=count)},
                          UREPORT_ASYNC_FETCHED_DATA_CACHE_TIME)
            except:
                import traceback
                traceback.print_exc()

    # delete the global count cache to force a recalculate at the end
    cache.delete(GLOBAL_COUNT_CACHE_KEY)

    print "Fetch old sites counts took %ss" % (time.time() - start)
示例#15
0
def fetch_contact_field_results(org, contact_field, segment):
    from ureport.polls.models import CACHE_ORG_FIELD_DATA_KEY, UREPORT_ASYNC_FETCHED_DATA_CACHE_TIME
    from ureport.polls.models import UREPORT_RUN_FETCHED_DATA_CACHE_TIME

    start = time.time()
    print "Fetching  %s for %s with segment %s" % (contact_field, org.name, segment)

    cache_time = UREPORT_ASYNC_FETCHED_DATA_CACHE_TIME
    if segment and segment.get('location', "") == "District":
        cache_time = UREPORT_RUN_FETCHED_DATA_CACHE_TIME

    try:
        segment = substitute_segment(org, segment)

        this_time = datetime.now()

        temba_client = org.get_temba_client()
        client_results = temba_client.get_results(contact_field=contact_field, segment=segment)

        results_data = temba_client_flow_results_serializer(client_results)
        cleaned_results_data = results_data

        print "Fetch took %ss" % (time.time() - start)

        key = CACHE_ORG_FIELD_DATA_KEY % (org.pk, slugify(unicode(contact_field)), slugify(unicode(segment)))
        cache.set(key, {'time': datetime_to_ms(this_time), 'results': cleaned_results_data}, cache_time)
    except:
        client.captureException()
        import traceback
        traceback.print_exc()
示例#16
0
    def org_task(self, org):
        """
        Fetches new and modified flow runs for the given org and creates/updates
        poll responses.
        """
        from tracpro.orgs_ext.constants import TaskType
        from tracpro.polls.models import Poll, PollRun, Response

        client = org.get_temba_client()
        redis_connection = get_redis_connection()
        last_time_key = LAST_FETCHED_RUN_TIME_KEY % org.pk
        last_time = redis_connection.get(last_time_key)

        if last_time is not None:
            last_time = parse_iso8601(last_time)
        else:
            newest_runs = Response.objects.filter(
                pollrun__poll__org=org).order_by('-created_on')
            newest_runs = newest_runs.exclude(
                pollrun__pollrun_type=PollRun.TYPE_SPOOFED)
            newest_run = newest_runs.first()
            last_time = newest_run.created_on if newest_run else None

        until = timezone.now()

        total_runs = 0
        for poll in Poll.objects.active().by_org(org):
            poll_runs = client.get_runs(flows=[poll.flow_uuid],
                                        after=last_time,
                                        before=until)
            total_runs += len(poll_runs)

            # convert flow runs into poll responses
            for run in poll_runs:
                try:
                    Response.from_run(org, run, poll=poll)
                except ValueError as e:
                    logger.error("Unable to save run #%d due to error: %s" %
                                 (run.id, e.message))
                    continue

        logger.info("Fetched %d new and updated runs for org #%d (since=%s)" %
                    (total_runs, org.id,
                     format_iso8601(last_time) if last_time else 'Never'))

        task_result = dict(time=datetime_to_ms(timezone.now()),
                           counts=dict(fetched=total_runs))
        org.set_task_result(TaskType.fetch_runs, task_result)

        redis_connection.set(last_time_key, format_iso8601(until))
示例#17
0
def fetch_shared_sites_count():
    import requests

    this_time = datetime.now()
    try:
        response = requests.get("https://ureport.in/shared_sites_count/")
        response.raise_for_status()

        value = {"time": datetime_to_ms(this_time), "results": response.json()}
        cache.set("shared_sites", value, None)
        return value["results"]
    except Exception:
        import traceback

        traceback.print_exc()
示例#18
0
文件: tasks.py 项目: daaray/tracpro
def fetch_org_runs(org_id):
    """
    Fetches new and modified flow runs for the given org and creates/updates
    poll responses.
    """
    from tracpro.orgs_ext.constants import TaskType
    from tracpro.polls.models import Poll, PollRun, Response

    org = Org.objects.get(pk=org_id)

    client = org.get_temba_client()
    redis_connection = get_redis_connection()
    last_time_key = LAST_FETCHED_RUN_TIME_KEY % org.pk
    last_time = redis_connection.get(last_time_key)

    if last_time is not None:
        last_time = parse_iso8601(last_time)
    else:
        newest_runs = Response.objects.filter(pollrun__poll__org=org).order_by("-created_on")
        newest_runs = newest_runs.exclude(pollrun__pollrun_type=PollRun.TYPE_SPOOFED)
        newest_run = newest_runs.first()
        last_time = newest_run.created_on if newest_run else None

    until = timezone.now()

    total_runs = 0
    for poll in Poll.get_all(org):
        poll_runs = client.get_runs(flows=[poll.flow_uuid], after=last_time, before=until)
        total_runs += len(poll_runs)

        # convert flow runs into poll responses
        for run in poll_runs:
            try:
                Response.from_run(org, run, poll=poll)
            except ValueError as e:
                logger.error("Unable to save run #%d due to error: %s" % (run.id, e.message))
                continue

    logger.info(
        "Fetched %d new and updated runs for org #%d (since=%s)"
        % (total_runs, org.id, format_iso8601(last_time) if last_time else "Never")
    )

    task_result = dict(time=datetime_to_ms(timezone.now()), counts=dict(fetched=total_runs))
    org.set_task_result(TaskType.fetch_runs, task_result)

    redis_connection.set(last_time_key, format_iso8601(until))
示例#19
0
def process_new_org_unsolicited(org):
    """
    Processes new unsolicited messages for an org in RapidPro
    """
    from .models import Message

    client = org.get_temba_client()

    # when was this task last run?
    last_result = org.get_task_result(TaskType.label_messages)
    if last_result:
        last_time = ms_to_datetime(last_result['time'])
    else:
        # if first time (or Redis bombed...) then we'll fetch back to 3 hours ago
        last_time = timezone.now() - timedelta(hours=3)

    this_time = timezone.now()

    num_messages = 0
    num_labelled = 0

    # grab all un-processed unsolicited messages
    pager = client.pager()
    while True:
        messages = client.get_messages(direction='I',
                                       _types=['I'],
                                       archived=False,
                                       after=last_time,
                                       before=this_time,
                                       pager=pager)
        num_messages += len(messages)
        num_labelled += Message.process_unsolicited(org, messages)

        if not pager.has_more():
            break

    print "Processed %d new unsolicited messages and labelled %d" % (
        num_messages, num_labelled)

    org.set_task_result(
        TaskType.label_messages, {
            'time': datetime_to_ms(this_time),
            'counts': {
                'messages': num_messages,
                'labelled': num_labelled
            }
        })
示例#20
0
def fetch_flows(org):
    start = time.time()
    print "Fetching flows for %s" % org.name

    this_time = datetime.now()
    org_flows = dict(time=datetime_to_ms(this_time), results=dict())

    try:
        from ureport.polls.models import CACHE_ORG_FLOWS_KEY, UREPORT_ASYNC_FETCHED_DATA_CACHE_TIME

        temba_client = org.get_temba_client()
        flows = temba_client.get_flows()

        all_flows = dict()
        for flow in flows:
            if flow.rulesets:
                flow_json = dict()
                flow_json['uuid'] = flow.uuid
                flow_json['date_hint'] = flow.created_on.strftime('%Y-%m-%d')
                flow_json['created_on'] = datetime_to_json_date(
                    flow.created_on)
                flow_json['name'] = flow.name
                flow_json['runs'] = flow.runs
                flow_json['archived'] = flow.archived
                flow_json['completed_runs'] = flow.completed_runs
                flow_json['rulesets'] = [
                    dict(uuid=elt.uuid,
                         label=elt.label,
                         response_type=elt.response_type)
                    for elt in flow.rulesets
                ]

                all_flows[flow.uuid] = flow_json

        all_flows_key = CACHE_ORG_FLOWS_KEY % org.pk
        org_flows['results'] = all_flows
        cache.set(all_flows_key, org_flows,
                  UREPORT_ASYNC_FETCHED_DATA_CACHE_TIME)

    except:
        client.captureException()
        import traceback
        traceback.print_exc()

    print "Fetch %s flows took %ss" % (org.name, time.time() - start)

    return org_flows.get('results')
示例#21
0
    def build_boundaries(self):

        this_time = datetime.now()
        temba_client = self.get_temba_client()
        client_boundaries = temba_client.get_boundaries()

        # we now build our cached versions of level 1 (all states) and level 2
        # (all districts for each state) geojson
        start_level = []
        other_levels_by_parent = dict()
        for boundary in client_boundaries:
            if boundary.level == BOUNDARY_START_LEVEL:
                start_level.append(boundary)
            elif boundary.level <= BOUNDARY_END_LEVEL and boundary.parent:
                osm_id = boundary.parent
                if osm_id not in other_levels_by_parent:
                    other_levels_by_parent[osm_id] = []

                districts = other_levels_by_parent[osm_id]
                districts.append(boundary)

        # mini function to convert a list of boundary objects to geojson
        def to_geojson(boundary_list):
            features = [
                dict(type='Feature',
                     geometry=dict(type=b.geometry.type,
                                   coordinates=b.geometry.coordinates),
                     properties=dict(name=b.name, id=b.boundary,
                                     level=b.level)) for b in boundary_list
            ]
            return dict(type='FeatureCollection', features=features)

        boundaries = dict()
        boundaries[BOUNDARY_LEVEL_START_KEY %
                   self.id] = to_geojson(start_level)

        for parent_id in other_levels_by_parent.keys():
            boundaries[BOUNDARY_LEVEL_END_KEY %
                       (self.id, parent_id)] = to_geojson(
                           other_levels_by_parent[parent_id])

        key = BOUNDARY_CACHE_KEY % self.pk
        value = {'time': datetime_to_ms(this_time), 'results': boundaries}
        cache.set(key, value, BOUNDARY_CACHE_TIME)

        return boundaries
示例#22
0
文件: models.py 项目: caktus/dash
    def build_boundaries(self):

        this_time = datetime.now()
        temba_client = self.get_temba_client()
        client_boundaries = temba_client.get_boundaries()

        # we now build our cached versions of level 1 (all states) and level 2
        # (all districts for each state) geojson
        states = []
        districts_by_state = dict()
        for boundary in client_boundaries:
            if boundary.level == STATE:
                states.append(boundary)
            elif boundary.level == DISTRICT:
                osm_id = boundary.parent
                if osm_id not in districts_by_state:
                    districts_by_state[osm_id] = []

                districts = districts_by_state[osm_id]
                districts.append(boundary)

        # mini function to convert a list of boundary objects to geojson
        def to_geojson(boundary_list):
            features = [dict(type='Feature',
                             geometry=dict(type=b.geometry.type,
                                           coordinates=b.geometry.coordinates),
                             properties=dict(name=b.name, id=b.boundary, level=b.level))
                        for b in boundary_list]
            return dict(type='FeatureCollection', features=features)

        boundaries = dict()
        boundaries[BOUNDARY_LEVEL_1_KEY % self.id] = to_geojson(states)

        for state_id in districts_by_state.keys():
            boundaries[BOUNDARY_LEVEL_2_KEY % (self.id, state_id)] = to_geojson(
                districts_by_state[state_id])

        key = BOUNDARY_CACHE_KEY % self.pk
        value = {'time': datetime_to_ms(this_time), 'results': boundaries}
        cache.set(key, value, BOUNDARY_CACHE_TIME)

        return boundaries
示例#23
0
def fetch_flows(org):
    start = time.time()
    print "Fetching flows for %s" % org.name

    this_time = datetime.now()
    org_flows = dict(time=datetime_to_ms(this_time), results=dict())

    try:
        from ureport.polls.models import CACHE_ORG_FLOWS_KEY, UREPORT_ASYNC_FETCHED_DATA_CACHE_TIME

        temba_client = org.get_temba_client()
        flows = temba_client.get_flows()

        all_flows = dict()
        for flow in flows:
            if flow.rulesets:
                flow_json = dict()
                flow_json['uuid'] = flow.uuid
                flow_json['date_hint'] = flow.created_on.strftime('%Y-%m-%d')
                flow_json['created_on'] = datetime_to_json_date(flow.created_on)
                flow_json['name'] = flow.name
                flow_json['runs'] = flow.runs
                flow_json['archived'] = flow.archived
                flow_json['completed_runs'] = flow.completed_runs
                flow_json['rulesets'] = [
                    dict(uuid=elt.uuid, label=elt.label, response_type=elt.response_type) for elt in flow.rulesets]

                all_flows[flow.uuid] = flow_json

        all_flows_key = CACHE_ORG_FLOWS_KEY % org.pk
        org_flows['results'] = all_flows
        cache.set(all_flows_key, org_flows, UREPORT_ASYNC_FETCHED_DATA_CACHE_TIME)

    except:
        client.captureException()
        import traceback
        traceback.print_exc()

    print "Fetch %s flows took %ss" % (org.name, time.time() - start)

    return org_flows.get('results')
示例#24
0
    def sync(self, org):
        recent_contacts = Contact.objects.by_org(org).active()
        recent_contacts = recent_contacts.exclude(temba_modified_on=None)
        recent_contacts = recent_contacts.order_by('-temba_modified_on')

        most_recent = recent_contacts.first()
        sync_regions = [r.uuid for r in Region.get_all(org)]
        sync_groups = [g.uuid for g in Group.get_all(org)]

        created, updated, deleted, failed = sync_pull_contacts(
            org=org, contact_class=Contact, fields=(), delete_blocked=True,
            groups=sync_regions + sync_groups,
            last_time=most_recent.temba_modified_on if most_recent else None)

        org.set_task_result(TaskType.sync_contacts, {
            'time': datetime_to_ms(timezone.now()),
            'counts': {
                'created': len(created),
                'updated': len(updated),
                'deleted': len(deleted),
                'failed': len(failed),
            },
        })
示例#25
0
def process_new_org_unsolicited(org):
    """
    Processes new unsolicited messages for an org in RapidPro
    """
    from .models import Message

    client = org.get_temba_client()

    # when was this task last run?
    last_result = org.get_task_result(TaskType.label_messages)
    if last_result:
        last_time = ms_to_datetime(last_result['time'])
    else:
        # if first time (or Redis bombed...) then we'll fetch back to 3 hours ago
        last_time = timezone.now() - timedelta(hours=3)

    this_time = timezone.now()

    num_messages = 0
    num_labelled = 0

    # grab all un-processed unsolicited messages
    pager = client.pager()
    while True:
        messages = client.get_messages(direction='I', _types=['I'], archived=False,
                                       after=last_time, before=this_time, pager=pager)
        num_messages += len(messages)
        num_labelled += Message.process_unsolicited(org, messages)

        if not pager.has_more():
            break

    print "Processed %d new unsolicited messages and labelled %d" % (num_messages, num_labelled)

    org.set_task_result(TaskType.label_messages, {'time': datetime_to_ms(this_time),
                                                  'counts': {'messages': num_messages, 'labelled': num_labelled}})
示例#26
0
 def default(self, obj):
     if isinstance(obj, datetime.datetime):
         return datetime_to_ms(obj)
     elif isinstance(obj, Decimal):
         return float(obj)
     return json.JSONEncoder.default(self, obj)
示例#27
0
文件: utils.py 项目: devartis/tracpro
 def default(self, obj):
     if isinstance(obj, datetime.datetime):
         return datetime_to_ms(obj)
     elif isinstance(obj, Decimal):
         return float(obj)
     return json.JSONEncoder.default(self, obj)