def restore_groups(self): with self.lock(self.org, self.uuid): for group in list(self.suspended_groups.all()): self.groups.add(group) self.suspended_groups.remove(group) get_backend().add_to_group(self.org, self, group)
def restore_groups(self): with self.lock(self.org, self.uuid): for group in list(self.suspended_groups.all()): if not group.is_dynamic: self.groups.add(group) get_backend().add_to_group(self.org, self, group) self.suspended_groups.remove(group)
def create_forwards(cls, org, user, text, urns, original_message): forwards = [] for urn in urns: forwards.append(cls._create(org, user, cls.FORWARD, text, original_message, urn=urn, push=False)) # push together as a single broadcast get_backend().push_outgoing(org, forwards, as_broadcast=True) return forwards
def bulk_restore(org, user, messages): messages = list(messages) if messages: org.incoming_messages.filter(org=org, pk__in=[m.pk for m in messages]).update(is_archived=False, modified_on=now()) get_backend().restore_messages(org, messages) MessageAction.create(org, user, messages, MessageAction.RESTORE)
def bulk_archive(org, user, messages): messages = list(messages) if messages: org.incoming_messages.filter(org=org, pk__in=[m.pk for m in messages]).update(is_archived=True, modified_on=now()) get_backend().archive_messages(org, messages) MessageAction.create(org, user, messages, MessageAction.ARCHIVE)
def bulk_unflag(org, user, messages): messages = list(messages) if messages: org.incoming_messages.filter(org=org, pk__in=[m.pk for m in messages]).update(is_flagged=False, modified_on=now()) get_backend().unflag_messages(org, messages) MessageAction.create(org, user, messages, MessageAction.UNFLAG)
def bulk_unflag(org, user, messages): messages = list(messages) if messages: org.incoming_messages.filter(org=org, pk__in=[m.pk for m in messages ]).update(is_flagged=False) get_backend().unflag_messages(org, messages) MessageAction.create(org, user, messages, MessageAction.UNFLAG)
def bulk_archive(org, user, messages): messages = list(messages) if messages: org.incoming_messages.filter(org=org, pk__in=[m.pk for m in messages ]).update(is_archived=True) get_backend().archive_messages(org, messages) MessageAction.create(org, user, messages, MessageAction.ARCHIVE)
def bulk_restore(org, user, messages): messages = list(messages) if messages: org.incoming_messages.filter(org=org, pk__in=[m.pk for m in messages ]).update(is_archived=False) get_backend().restore_messages(org, messages) MessageAction.create(org, user, messages, MessageAction.RESTORE)
def bulk_flag(org, user, messages): messages = list(messages) if messages: org.incoming_messages.filter(org=org, pk__in=[m.pk for m in messages ]).update(is_flagged=True, modified_on=now()) get_backend().flag_messages(org, messages) MessageAction.create(org, user, messages, MessageAction.FLAG)
def bulk_unlabel(org, user, messages, label): messages = list(messages) if messages: for msg in messages: msg.unlabel(label) if label.is_synced: get_backend().unlabel_messages(org, messages, label) MessageAction.create(org, user, messages, MessageAction.UNLABEL, label)
def bulk_unlabel(org, user, messages, label): messages = list(messages) if messages: for msg in messages: msg.unlabel(label) org.incoming_messages.filter(org=org, pk__in=[m.pk for m in messages]).update(modified_on=now()) if label.is_synced: get_backend().unlabel_messages(org, messages, label) MessageAction.create(org, user, messages, MessageAction.UNLABEL, label)
def create_bulk_replies(cls, org, user, text, messages): if not messages: raise ValueError("Must specify at least one message to reply to") replies = [] for incoming in messages: reply = cls._create(org, user, cls.BULK_REPLY, text, incoming, contact=incoming.contact, push=False) replies.append(reply) # push together as a single broadcast get_backend().push_outgoing(org, replies, as_broadcast=True) return replies
def suspend_groups(self): with self.lock(self.org, self.uuid): if self.suspended_groups.all(): raise ValueError("Can't suspend from groups as contact is already suspended from groups") cur_groups = list(self.groups.all()) suspend_group_pks = {g.pk for g in Group.get_suspend_from(self.org)} for group in cur_groups: if group.pk in suspend_group_pks: self.groups.remove(group) self.suspended_groups.add(group) get_backend().remove_from_group(self.org, self, group)
def suspend_groups(self): with self.lock(self.org, self.uuid): if self.suspended_groups.all(): # pragma: no cover raise ValueError("Can't suspend from groups as contact is already suspended from groups") cur_groups = list(self.groups.all()) suspend_groups = set(Group.get_suspend_from(self.org)) for group in cur_groups: if group in suspend_groups: self.groups.remove(group) self.suspended_groups.add(group) get_backend().remove_from_group(self.org, self, group)
def bulk_unlabel(org, user, messages, label): messages = list(messages) if messages: for msg in messages: msg.unlabel(label) org.incoming_messages.filter(org=org, pk__in=[m.pk for m in messages ]).update(modified_on=now()) if label.is_synced: get_backend().unlabel_messages(org, messages, label) MessageAction.create(org, user, messages, MessageAction.UNLABEL, label)
def get_timeline(self, after, before, merge_from_backend): local_outgoing = self.outgoing_messages.filter(created_on__gte=after, created_on__lte=before) local_outgoing = local_outgoing.select_related('case', 'contact', 'created_by').order_by('-created_on') local_incoming = self.incoming_messages.filter(created_on__gte=after, created_on__lte=before) local_incoming = local_incoming.select_related('case', 'contact').prefetch_related('labels') local_incoming = local_incoming.order_by('-created_on') # merge local incoming and outgoing local_messages = chain(local_outgoing, local_incoming) messages = [{'time': msg.created_on, 'type': 'M', 'item': msg.as_json()} for msg in local_messages] if merge_from_backend: # if this is the initial request, fetch additional messages from the backend backend = get_backend() backend_messages = backend.fetch_contact_messages(self.org, self.contact, after, before) # add any backend messages that don't exist locally if backend_messages: local_broadcast_ids = {o.backend_broadcast_id for o in local_outgoing if o.backend_broadcast_id} for msg in backend_messages: if msg['id'] not in local_broadcast_ids: messages.append({'time': msg['time'], 'type': 'M', 'item': msg}) # fetch actions in chronological order actions = self.actions.filter(created_on__gte=after, created_on__lte=before) actions = actions.select_related('assignee', 'created_by').order_by('pk') actions = [{'time': a.created_on, 'type': 'A', 'item': a.as_json()} for a in actions] # merge actions and messages and sort by time return sorted(messages + actions, key=lambda event: event['time'])
def _create(cls, org, user, activity, text, reply_to, contact=None, urn=None, case=None, push=True): if not text: raise ValueError("Message text cannot be empty") if not contact and not urn: # pragma: no cover raise ValueError("Message must have a recipient") msg = cls.objects.create(org=org, partner=user.get_partner(org), activity=activity, text=text, contact=contact, urn=urn, reply_to=reply_to, case=case, created_by=user) if push: get_backend().push_outgoing(org, [msg]) return msg
def handle(self, *args, **options): org_id = int(options['org_id']) try: org = Org.objects.get(pk=org_id) except Org.DoesNotExist: raise CommandError("No such org with id %d" % org_id) prompt = """You have requested to pull all contacts, groups and fields for org '%s' (#%d). Are you sure you want to do this? Type 'yes' to continue, or 'no' to cancel: """ % (org.name, org.pk) if raw_input(prompt).lower() != 'yes': self.stdout.write("Operation cancelled") return def progress_callback(num_synced): self.stdout.write(" > Synced %d contacts..." % num_synced) backend = get_backend() created, updated, deleted, ignored = backend.pull_fields(org) self.stdout.write("Finished field pull (%d created, %d updated, %d deleted, %d ignored)" % (created, updated, deleted, ignored)) created, updated, deleted, ignored = backend.pull_groups(org) self.stdout.write("Finished group pull (%d created, %d updated, %d deleted, %d ignored)" % (created, updated, deleted, ignored)) created, updated, deleted, ignored = backend.pull_contacts(org, None, timezone.now(), progress_callback) self.stdout.write("Finished contact pull (%d created, %d updated, %d deleted, %d ignored)" % (created, updated, deleted, ignored))
def suspend_groups(self): with self.lock(self.org, self.uuid): if self.suspended_groups.all(): # pragma: no cover raise ValueError( "Can't suspend from groups as contact is already suspended from groups" ) cur_groups = list(self.groups.all()) suspend_groups = set(Group.get_suspend_from(self.org)) for group in cur_groups: if group in suspend_groups: self.groups.remove(group) self.suspended_groups.add(group) get_backend().remove_from_group(self.org, self, group)
def get_timeline(self, after, before, merge_from_backend): local_outgoing = self.outgoing_messages.filter(created_on__gte=after, created_on__lte=before) local_outgoing = local_outgoing.select_related('case', 'contact', 'created_by').order_by('-created_on') local_incoming = self.incoming_messages.filter(created_on__gte=after, created_on__lte=before) local_incoming = local_incoming.select_related('case', 'contact').prefetch_related('labels') local_incoming = local_incoming.order_by('-created_on') # merge local incoming and outgoing timeline = [TimelineItem(msg) for msg in chain(local_outgoing, local_incoming)] if merge_from_backend: # if this is the initial request, fetch additional messages from the backend backend = get_backend() backend_messages = backend.fetch_contact_messages(self.org, self.contact, after, before) # add any backend messages that don't exist locally if backend_messages: local_broadcast_ids = {o.backend_broadcast_id for o in local_outgoing if o.backend_broadcast_id} for msg in backend_messages: if msg.backend_broadcast_id not in local_broadcast_ids: timeline.append(TimelineItem(msg)) # fetch and append actions actions = self.actions.filter(created_on__gte=after, created_on__lte=before) actions = actions.select_related('assignee', 'user_assignee', 'created_by') timeline += [TimelineItem(a) for a in actions] # sort timeline by reverse chronological order return sorted(timeline, key=lambda item: item.get_time())
def create_forwards(cls, org, user, text, urns, original_message): forwards = [] for urn in urns: forwards.append( cls._create(org, user, cls.FORWARD, text, original_message, urn=urn, push=False)) # push together as a single broadcast get_backend().push_outgoing(org, forwards, as_broadcast=True) return forwards
def pull_messages(org, since, until): """ Pulls new unsolicited messages for an org """ from casepro.backend import get_backend backend = get_backend() # if we're running for the first time, then we'll fetch back to 1 hour ago if not since: since = until - timedelta(hours=1) labels_created, labels_updated, labels_deleted, ignored = backend.pull_labels( org) msgs_created, msgs_updated, msgs_deleted, ignored = backend.pull_messages( org, since, until) return { 'labels': { 'created': labels_created, 'updated': labels_updated, 'deleted': labels_deleted }, 'messages': { 'created': msgs_created, 'updated': msgs_updated, 'deleted': msgs_deleted } }
def get_or_create_from_urn(cls, org, urn, name=None): """ Gets an existing contact or creates a new contact. Used when opening a case without an initial message """ normalized_urn = URN.normalize(urn) contact = cls.objects.filter(urns__contains=[normalized_urn]).first() if not contact: URN.validate(normalized_urn) contact = cls.objects.create(org=org, name=name, urns=[normalized_urn], is_stub=False) get_backend().push_contact(org, contact) return contact
def create_bulk_replies(cls, org, user, text, messages): if not messages: raise ValueError("Must specify at least one message to reply to") replies = [] for incoming in messages: reply = cls._create(org, user, cls.BULK_REPLY, text, incoming, contact=incoming.contact, push=False) replies.append(reply) # push together as a single broadcast get_backend().push_outgoing(org, replies, as_broadcast=True) return replies
def handle_messages(org, since, until): from casepro.backend import get_backend from casepro.cases.models import Case, Label from .models import Message backend = get_backend() labelled, unlabelled, case_replies = [], [], [] # fetch all unhandled messages who now have full contacts unhandled = list(Message.get_unhandled(org).filter(contact__is_stub=False).select_related('contact')) if unhandled: labels_by_keyword = Label.get_keyword_map(org) label_matches = defaultdict(list) # messages that match each label for msg in unhandled: open_case = Case.get_open_for_contact_on(org, msg.contact, msg.created_on) if open_case: msg.case = open_case msg.is_archived = True msg.save(update_fields=('case', 'is_archived')) case_replies.append(msg) else: # only apply labels if there isn't a currently open case for this contact matched_labels = msg.auto_label(labels_by_keyword) if matched_labels: labelled.append(msg) for label in matched_labels: label_matches[label].append(msg) else: unlabelled.append(msg) # add labels to matching messages for label, matched_msgs in six.iteritems(label_matches): if matched_msgs: # TODO check for pointless re-labelling for msg in matched_msgs: msg.labels.add(label) backend.label_messages(org, matched_msgs, label) # archive messages which are case replies on the backend if case_replies: backend.archive_messages(org, case_replies) # mark all of these messages as handled Message.objects.filter(pk__in=[m.pk for m in unhandled]).update(is_handled=True) return {'messages': len(unhandled), 'labelled': len(labelled), 'case_replies': len(case_replies)}
def handle_messages(org): from casepro.backend import get_backend from casepro.cases.models import Case from casepro.rules.models import Rule from .models import Message backend = get_backend() case_replies = [] num_rules_matched = 0 # fetch all unhandled messages who now have full contacts unhandled = Message.get_unhandled(org).filter(contact__is_stub=False) unhandled = list( unhandled.select_related('contact').prefetch_related( 'contact__groups')) if unhandled: rules = Rule.get_all(org) rule_processor = Rule.BatchProcessor(org, rules) for msg in unhandled: open_case = Case.get_open_for_contact_on(org, msg.contact, msg.created_on) # only apply rules if there isn't a currently open case for this contact if open_case: open_case.add_reply(msg) case_replies.append(msg) else: rules_matched, actions_deferred = rule_processor.include_messages( msg) num_rules_matched += rules_matched # archive messages which are case replies on the backend if case_replies: backend.archive_messages(org, case_replies) rule_processor.apply_actions() # mark all of these messages as handled Message.objects.filter(pk__in=[m.pk for m in unhandled]).update( is_handled=True) return { 'handled': len(unhandled), 'rules_matched': num_rules_matched, 'case_replies': len(case_replies) }
def handle(self, *args, **options): org_id = int(options['org_id']) try: org = Org.objects.get(pk=org_id) except Org.DoesNotExist: raise CommandError("No such org with id %d" % org_id) days, weeks, as_handled = options['days'], options['weeks'], options[ 'as_handled'] if not (days or weeks): raise CommandError( "Must provide at least one of --days or --weeks") now = timezone.now() since = now - relativedelta(days=days, weeks=weeks) prompt = """You have requested to pull and label messages for org '%s' (#%d), since %s. Are you sure you want to do this? DO NOT RUN THIS COMMAND WHILST BACKGROUND SYNCING IS RUNNING Type 'yes' to continue, or 'no' to cancel: """ % ( org.name, org.pk, since.strftime('%b %d, %Y %H:%M')) if input(prompt).lower() != 'yes': self.stdout.write("Operation cancelled") return def progress_callback(num_synced): self.stdout.write(" > Synced %d messages..." % num_synced) backend = get_backend() created, updated, deleted, ignored = backend.pull_labels(org) self.stdout.write( "Finished label pull (%d created, %d updated, %d deleted, %d ignored)" % (created, updated, deleted, ignored)) created, updated, deleted, ignored = backend.pull_messages( org, since, now, as_handled, progress_callback) self.stdout.write( "Finished message pull (%d created, %d updated, %d deleted, %d ignored)" % (created, updated, deleted, ignored))
def pull_messages(org, since, until): """ Pulls new unsolicited messages for an org """ from casepro.backend import get_backend backend = get_backend() # if we're running for the first time, then we'll fetch back to 1 hour ago if not since: since = until - timedelta(hours=1) labels_created, labels_updated, labels_deleted, ignored = backend.pull_labels(org) msgs_created, msgs_updated, msgs_deleted, ignored = backend.pull_messages(org, since, until) return { 'labels': {'created': labels_created, 'updated': labels_updated, 'deleted': labels_deleted}, 'messages': {'created': msgs_created, 'updated': msgs_updated, 'deleted': msgs_deleted} }
def get_timeline(self, after, before, merge_from_backend): local_outgoing = self.outgoing_messages.filter(created_on__gte=after, created_on__lte=before) local_outgoing = local_outgoing.select_related( 'case', 'contact', 'created_by').order_by('-created_on') local_incoming = self.incoming_messages.filter(created_on__gte=after, created_on__lte=before) local_incoming = local_incoming.select_related( 'case', 'contact').prefetch_related('labels') local_incoming = local_incoming.order_by('-created_on') # merge local incoming and outgoing timeline = [ TimelineItem(msg) for msg in chain(local_outgoing, local_incoming) ] if merge_from_backend: # if this is the initial request, fetch additional messages from the backend backend = get_backend() backend_messages = backend.fetch_contact_messages( self.org, self.contact, after, before) # add any backend messages that don't exist locally if backend_messages: local_broadcast_ids = { o.backend_broadcast_id for o in local_outgoing if o.backend_broadcast_id } for msg in backend_messages: if msg.backend_broadcast_id not in local_broadcast_ids: timeline.append(TimelineItem(msg)) # fetch and append actions actions = self.actions.filter(created_on__gte=after, created_on__lte=before) actions = actions.select_related('assignee', 'user_assignee', 'created_by') timeline += [TimelineItem(a) for a in actions] # sort timeline by reverse chronological order return sorted(timeline, key=lambda item: item.get_time())
def pull_contacts(org, since, until): """ Fetches updated contacts from RapidPro and updates local contacts accordingly """ from casepro.backend import get_backend backend = get_backend() if not since: logger.warn("First time run for org #%d. Will sync all contacts" % org.pk) fields_created, fields_updated, fields_deleted, ignored = backend.pull_fields(org) groups_created, groups_updated, groups_deleted, ignored = backend.pull_groups(org) contacts_created, contacts_updated, contacts_deleted, ignored = backend.pull_contacts(org, since, until) return { 'fields': {'created': fields_created, 'updated': fields_updated, 'deleted': fields_deleted}, 'groups': {'created': groups_created, 'updated': groups_updated, 'deleted': groups_deleted}, 'contacts': {'created': contacts_created, 'updated': contacts_updated, 'deleted': contacts_deleted} }
def handle_messages(org): from casepro.backend import get_backend from casepro.cases.models import Case from casepro.rules.models import Rule from .models import Message backend = get_backend() case_replies = [] num_rules_matched = 0 # fetch all unhandled messages who now have full contacts unhandled = Message.get_unhandled(org).filter(contact__is_stub=False) unhandled = list(unhandled.select_related('contact').prefetch_related('contact__groups')) if unhandled: rules = Rule.get_all(org) rule_processor = Rule.BatchProcessor(org, rules) for msg in unhandled: open_case = Case.get_open_for_contact_on(org, msg.contact, msg.created_on) # only apply rules if there isn't a currently open case for this contact if open_case: open_case.add_reply(msg) case_replies.append(msg) else: rules_matched, actions_deferred = rule_processor.include_messages(msg) num_rules_matched += rules_matched # archive messages which are case replies on the backend if case_replies: backend.archive_messages(org, case_replies) rule_processor.apply_actions() # mark all of these messages as handled Message.objects.filter(pk__in=[m.pk for m in unhandled]).update(is_handled=True) return {'handled': len(unhandled), 'rules_matched': num_rules_matched, 'case_replies': len(case_replies)}
def get_timeline(self, after, before, merge_from_backend): messages = [] local_outgoing = self.outgoing_messages.filter(created_on__gte=after, created_on__lte=before) local_outgoing = local_outgoing.select_related('case__contact') if merge_from_backend: # if this is the initial request, get a more complete timeline from the backend backend = get_backend() backend_messages = backend.fetch_contact_messages(self.org, self.contact, after, before) local_by_backend_id = {o.backend_id: o for o in local_outgoing} for msg in backend_messages: # annotate with sender from local message if there is one local = local_by_backend_id.pop(msg['broadcast'], None) msg['sender'] = local.created_by.as_json() if local else None messages.append({'time': msg['time'], 'type': 'M', 'item': msg}) for msg in local_by_backend_id.values(): messages.append({'time': msg.created_on, 'type': 'M', 'item': msg.as_json()}) else: # otherwise just merge local outgoing and incoming messages for msg in local_outgoing: messages.append({'time': msg.created_on, 'type': 'M', 'item': msg.as_json()}) local_incoming = self.incoming_messages.filter(created_on__gte=after, created_on__lte=before) local_incoming = local_incoming.select_related('contact') for msg in local_incoming: messages.append({'time': msg.created_on, 'type': 'M', 'item': msg.as_json()}) # fetch actions in chronological order actions = self.actions.filter(created_on__gte=after, created_on__lte=before) actions = actions.select_related('assignee', 'created_by').order_by('pk') actions = [{'time': a.created_on, 'type': 'A', 'item': a.as_json()} for a in actions] # merge actions and messages and sort by time return sorted(messages + actions, key=lambda event: event['time'])
def handle(self, *args, **options): org_id = int(options['org_id']) try: org = Org.objects.get(pk=org_id) except Org.DoesNotExist: raise CommandError("No such org with id %d" % org_id) days, weeks, as_handled = options['days'], options['weeks'], options['as_handled'] if not (days or weeks): raise CommandError("Must provide at least one of --days or --weeks") now = timezone.now() since = now - relativedelta(days=days, weeks=weeks) prompt = """You have requested to pull and label messages for org '%s' (#%d), since %s. Are you sure you want to do this? DO NOT RUN THIS COMMAND WHILST BACKGROUND SYNCING IS RUNNING Type 'yes' to continue, or 'no' to cancel: """ % (org.name, org.pk, since.strftime('%b %d, %Y %H:%M')) if raw_input(prompt).lower() != 'yes': self.stdout.write("Operation cancelled") return def progress_callback(num_synced): self.stdout.write(" > Synced %d messages..." % num_synced) backend = get_backend() created, updated, deleted, ignored = backend.pull_labels(org) self.stdout.write("Finished label pull (%d created, %d updated, %d deleted, %d ignored)" % (created, updated, deleted, ignored)) created, updated, deleted, ignored = backend.pull_messages(org, since, now, as_handled, progress_callback) self.stdout.write("Finished message pull (%d created, %d updated, %d deleted, %d ignored)" % (created, updated, deleted, ignored))
def handle(self, *args, **options): org_id = int(options['org_id']) try: org = Org.objects.get(pk=org_id) except Org.DoesNotExist: raise CommandError("No such org with id %d" % org_id) prompt = """You have requested to pull all contacts, groups and fields for org '%s' (#%d). Are you sure you want to do this? Type 'yes' to continue, or 'no' to cancel: """ % (org.name, org.pk) if input(prompt).lower() != 'yes': self.stdout.write("Operation cancelled") return def progress_callback(num_synced): self.stdout.write(" > Synced %d contacts..." % num_synced) backend = get_backend() created, updated, deleted, ignored = backend.pull_fields(org) self.stdout.write( "Finished field pull (%d created, %d updated, %d deleted, %d ignored)" % (created, updated, deleted, ignored)) created, updated, deleted, ignored = backend.pull_groups(org) self.stdout.write( "Finished group pull (%d created, %d updated, %d deleted, %d ignored)" % (created, updated, deleted, ignored)) created, updated, deleted, ignored = backend.pull_contacts( org, None, timezone.now(), progress_callback) self.stdout.write( "Finished contact pull (%d created, %d updated, %d deleted, %d ignored)" % (created, updated, deleted, ignored))
def pull_contacts(org, since, until): """ Fetches updated contacts from RapidPro and updates local contacts accordingly """ from casepro.backend import get_backend backend = get_backend() if not since: logger.warn("First time run for org #%d. Will sync all contacts" % org.pk) fields_created, fields_updated, fields_deleted, ignored = backend.pull_fields( org) groups_created, groups_updated, groups_deleted, ignored = backend.pull_groups( org) contacts_created, contacts_updated, contacts_deleted, ignored = backend.pull_contacts( org, since, until) return { 'fields': { 'created': fields_created, 'updated': fields_updated, 'deleted': fields_deleted }, 'groups': { 'created': groups_created, 'updated': groups_updated, 'deleted': groups_deleted }, 'contacts': { 'created': contacts_created, 'updated': contacts_updated, 'deleted': contacts_deleted } }
def apply_to(self, org, messages): Message.objects.filter(pk__in=[m.pk for m in messages]).update( is_archived=True) get_backend().archive_messages(org, messages)
def apply_to(self, org, messages): Message.objects.filter(pk__in=[m.pk for m in messages]).update(is_archived=True) get_backend().archive_messages(org, messages)
urlpatterns = [ url(r'', include('casepro.cases.urls')), url(r'', include('casepro.contacts.urls')), url(r'', include('casepro.msg_board.urls')), url(r'', include('casepro.msgs.urls')), url(r'', include('casepro.rules.urls')), url(r'', include('casepro.profiles.urls')), url(r'', include('casepro.orgs_ext.urls')), url(r'^pods/', include('casepro.pods.urls')), url(r'^stats/', include('casepro.statistics.urls')), url(r'^users/', include('dash.users.urls')), url(r'^i18n/', include('django.conf.urls.i18n')), url(r'^comments/', include('django_comments.urls')), url(r'^partials/(?P<template>[a-z0-9\-_]+)\.html$', PartialTemplate.as_view(), name='utils.partial_template') ] backend_urls = get_backend().get_url_patterns() or [] urlpatterns += backend_urls if settings.DEBUG: # pragma: no cover try: import debug_toolbar urlpatterns.append(url(r'^__debug__/', include(debug_toolbar.urls))) except ImportError: pass urlpatterns = [ url(r'^media/(?P<path>.*)$', static.serve, {'document_root': settings.MEDIA_ROOT, 'show_indexes': True}), url(r'', include('django.contrib.staticfiles.urls')) ] + urlpatterns
def archive_messages(self): self.incoming_messages.update(is_archived=True) get_backend().archive_contact_messages(self.org, self)
def expire_flows(self): get_backend().stop_runs(self.org, self)
def apply_to(self, org, messages): for msg in messages: msg.label(self.label) if self.label.is_synced: get_backend().label_messages(org, messages, self.label)
def expire_flows(self): get_backend().stop_runs(self.org, self)
raise ValueError("Message text cannot be empty") if not contact and not urn: # pragma: no cover raise ValueError("Message must have a recipient") msg = cls.objects.create(org=org, partner=user.get_partner(org), activity=activity, text=text, contact=contact, urn=urn, reply_to=reply_to, case=case, created_by=user) if push: get_backend().push_outgoing(org, [msg]) return msg @classmethod def get_replies(cls, org): return org.outgoing_messages.filter(activity__in=cls.REPLY_ACTIVITIES) @classmethod def search(cls, org, user, search): text = search.get('text') contact_id = search.get('contact') queryset = org.outgoing_messages.all() partner = user.get_partner(org)
def update_label_uuid(sender, instance, **kwargs): if instance.is_synced and not instance.uuid: get_backend().push_label(instance.org, instance)
url(r'', include('casepro.msg_board.urls')), url(r'', include('casepro.msgs.urls')), url(r'', include('casepro.rules.urls')), url(r'', include('casepro.profiles.urls')), url(r'', include('casepro.orgs_ext.urls')), url(r'^pods/', include('casepro.pods.urls')), url(r'^stats/', include('casepro.statistics.urls')), url(r'^users/', include('dash.users.urls')), url(r'^i18n/', include('django.conf.urls.i18n')), url(r'^comments/', include('django_comments.urls')), url(r'^partials/(?P<template>[a-z0-9\-_]+)\.html$', PartialTemplate.as_view(), name='utils.partial_template') ] backend_urls = get_backend().get_url_patterns() or [] urlpatterns += backend_urls if settings.DEBUG: # pragma: no cover try: import debug_toolbar urlpatterns.append(url(r'^__debug__/', include(debug_toolbar.urls))) except ImportError: pass urlpatterns = [ url(r'^media/(?P<path>.*)$', static.serve, { 'document_root': settings.MEDIA_ROOT, 'show_indexes': True }), url(r'', include('django.contrib.staticfiles.urls'))
def apply_to(self, org, messages): for msg in messages: msg.label(self.label) if self.label.is_synced: get_backend().label_messages(org, messages, self.label)
def archive_messages(self): self.incoming_messages.update(is_archived=True) get_backend().archive_contact_messages(self.org, self)
def update_label_uuid(sender, instance, **kwargs): if instance.is_synced and not instance.uuid: get_backend().push_label(instance.org, instance)