示例#1
0
def _histo_data_non_cumulative(domain_list, histogram_type, start_date,
        end_date, interval, filters):
    def _get_active_length(histo_type):
        # TODO - add to configs
        return 90 if histogram_type == 'active_cases' else 30

    timestamps = daterange(interval, iso_string_to_date(start_date),
                           iso_string_to_date(end_date))
    histo_data = {}
    for domain_name_data in domain_list:
        display_name = domain_name_data['display_name']
        domain_data = []
        for timestamp in timestamps:
            past_30_days = _histo_data(
                [domain_name_data],
                histogram_type,
                (timestamp - relativedelta(
                        days=_get_active_length())).isoformat(),
                timestamp.isoformat(),
                filters
            )
            domain_data.append(
                get_data_point(
                    sum(point['count']
                        for point in past_30_days[display_name]),
                    timestamp
                )
            )
        histo_data.update({
            display_name: domain_data
        })
    return histo_data
示例#2
0
    def update_context(self):
        self.context["datespan_name"] = self.name

        range = self.request.GET.get('range', None)
        if range:
            dates = str(range).split(_(' to '))
            self.request.datespan.startdate = datetime.datetime.combine(
                iso_string_to_date(dates[0]), datetime.time())
            self.request.datespan.enddate = datetime.datetime.combine(
                iso_string_to_date(dates[1]), datetime.time())

        self.datespan = DateSpan.since(self.default_days, timezone=self.timezone, inclusive=self.inclusive)
        if self.request.datespan.is_valid():
            self.datespan.startdate = self.request.datespan.startdate
            self.datespan.enddate = self.request.datespan.enddate
        self.context['timezone'] = self.timezone.zone
        self.context['datespan'] = self.datespan

        report_labels = json.dumps({
            'year_to_date': _('Year to Date'), 'last_month': _('Last Month'),
            'last_quarter': _('Last Quarter'), 'last_two_quarters': _('Last Two Quarters'),
            'last_three_quarters': _('Last Three Quarters'), 'last_year': _('Last Year'),
            'last_two_years': _('Last Two Years'), 'last_three_years': _('Last Three Years'),
            'last_four_years': _('Last Four Years')
        })

        self.context['report_labels'] = report_labels
        self.context['separator'] = _(' to ')
示例#3
0
 def date_or_nothing(param):
     if param:
         if self.compare_as_string:
             return iso_string_to_date(param)
         else:
             return datetime.combine(iso_string_to_date(param), time())
     else:
         return None
    def handle(self, start, end, *args, **options):
        start = iso_string_to_date(start)
        end = iso_string_to_date(end)
        print("[1] Get all form ids by domain for date range")
        all_form_ids_by_domain = use_json_cache_file(
            filename='all_form_ids_received_{}_to_{}_by_domain.json'.format(start, end),
            fn=lambda: generate_all_form_ids_by_domain(start, end)
        )
        print("[2] Get form ids by domain missing from ES")
        missing_form_ids_by_domain = {}
        for domain, form_ids in all_form_ids_by_domain.items():
            missing_form_ids_by_domain[domain] = use_json_cache_file(
                filename='missing_form_ids_{}_to_{}__{}.json'.format(start, end, domain),
                fn=lambda: get_form_ids_missing_from_elasticsearch(form_ids)
            )
        print("[3] Get all case ids by domain for date range")
        all_case_ids_by_domain = use_json_cache_file(
            filename='all_case_ids_last_modified_{}_to_{}_by_domain.json'.format(start, end),
            fn=lambda: get_all_case_ids_by_domain(start, end)
        )
        print("[4] Get case ids by domain missing from ES")
        missing_case_ids_by_domain = {}
        for domain, case_ids in all_case_ids_by_domain.items():
            if case_ids:
                missing_case_ids_by_domain[domain] = use_json_cache_file(
                    filename='missing_case_ids_{}_to_{}__{}.json'.format(start, end, domain),
                    fn=lambda: get_case_ids_missing_from_elasticsearch(case_ids)
                )

        print("[5] Get all the _revs for these docs")
        case_metadata = use_json_cache_file(
            filename='missing_case_metadata_{}_to_{}.json'.format(start, end),
            fn=lambda: prepare_metadata(missing_case_ids_by_domain)
        )
        form_metadata = use_json_cache_file(
            filename='missing_form_metadata_{}_to_{}.json'.format(start, end),
            fn=lambda: prepare_metadata(missing_form_ids_by_domain)
        )

        print("[6] Publish changes for docs missing from ES!")
        interleaved_changes = (change for pair in zip_longest(
            iter_case_changes(case_metadata),
            iter_form_changes(form_metadata),
        ) for change in pair if change is not None)
        for changes in chunked(interleaved_changes, 100):
            for change in changes:
                publish_change(change)
            time.sleep(1)
def get_sample_doc_and_indicators(fake_time_now):
    date_opened = "2014-06-21"
    sample_doc = dict(
        _id='some-doc-id',
        opened_on=date_opened,
        owner_id='some-user-id',
        doc_type="CommCareCase",
        domain='user-reports',
        type='ticket',
        category='bug',
        tags='easy-win public',
        is_starred='yes',
        estimate=2.3,
        priority=4,
    )
    expected_indicators = {
        'doc_id': 'some-doc-id',
        'repeat_iteration': 0,
        'date': iso_string_to_date(date_opened),
        'owner': 'some-user-id',
        'count': 1,
        'category_bug': 1, 'category_feature': 0, 'category_app': 0, 'category_schedule': 0,
        'tags_easy-win': 1, 'tags_potential-dupe': 0, 'tags_roadmap': 0, 'tags_public': 1,
        'is_starred': 1,
        'estimate': Decimal(2.3),
        'priority': 4,
        'inserted_at': fake_time_now,
    }
    return sample_doc, expected_indicators
示例#6
0
def num_periods_late(product_case, schedule, *schedule_args):
    last_reported = iso_string_to_date(getattr(product_case, 'last_reported', '2000-01-01')[:10])

    class DueDateStream(object):
        """mimic an array of due dates to perform a binary search"""

        def __getitem__(self, i):
            return self.normalize(self.due_date(i + 1))

        def __len__(self):
            """highest number of periods late before we stop caring"""
            max_horizon = 30. * 365.2425 / self.period_length() # arbitrary upper limit -- 30 years
            return math.ceil(max_horizon)

        def due_date(self, n):
            return {
                'weekly': due_date_weekly,
                'monthly': due_date_monthly,
            }[schedule](*schedule_args, past_period=n)

        def period_length(self, n=100):
            """get average length of reporting period"""
            return (self.due_date(0) - self.due_date(n)).days / float(n)

        def normalize(self, dt):
            """convert dates into a numerical scale (where greater == more in the past)"""
            return -(dt - date(2000, 1, 1)).days

    stream = DueDateStream()
    # find the earliest due date that is on or after the most-recent report date,
    # and return how many reporting periods back it occurs
    return bisect.bisect_right(stream, stream.normalize(last_reported))
示例#7
0
    def handle(self, *args, **options):
        if len(args) not in [2, 3]:
            raise CommandError('Usage is copy_domain %s' % self.args)

        sourcedb = Database(args[0])
        domain = args[1].strip()
        simulate = options['simulate']
        exclude_attachments = options['exclude_attachments']
        self.run_multi_process = options['run_multi_process']

        since = json_format_date(iso_string_to_date(options['since'])) if options['since'] else None

        if options['list_types']:
            self.list_types(sourcedb, domain, since)
            sys.exit(0)

        if simulate:
            print "\nSimulated run, no data will be copied.\n"

        if options['postgres_db'] and options['postgres_password']:
            settings.DATABASES[options['postgres_db']]['PASSWORD'] = options['postgres_password']

        self.targetdb = Database(args[2]) if len(args) == 3 else get_db()

        try:
            domain_doc = Domain.get_by_name(domain)
        except ResourceNotFound:
            domain_doc = None

        if domain_doc is None:
            self.copy_domain(sourcedb, domain)

        if options['doc_types']:
            doc_types = options['doc_types'].split(',')
            for type in doc_types:
                startkey = [x for x in [domain, type, since] if x is not None]
                endkey = [x for x in [domain, type, {}] if x is not None]
                self.copy_docs(sourcedb, domain, simulate, startkey, endkey, doc_type=type, since=since,
                               postgres_db=options['postgres_db'], exclude_attachments=exclude_attachments)
        elif options['id_file']:
            path = options['id_file']
            if not os.path.isfile(path):
                print "Path '%s' does not exist or is not a file" % path
                sys.exit(1)

            with open(path) as input:
                doc_ids = [line.rstrip('\n') for line in input]

            if not doc_ids:
                print "Path '%s' does not contain any document ID's" % path
                sys.exit(1)

            self.copy_docs(sourcedb, domain, simulate, doc_ids=doc_ids, postgres_db=options['postgres_db'],
                           exclude_attachments=exclude_attachments)
        else:
            startkey = [domain]
            endkey = [domain, {}]
            exclude_types = DEFAULT_EXCLUDE_TYPES + options['doc_types_exclude'].split(',')
            self.copy_docs(sourcedb, domain, simulate, startkey, endkey, exclude_types=exclude_types,
                           postgres_db=options['postgres_db'], exclude_attachments=exclude_attachments)
示例#8
0
    def headers(self):
        startdate = self.datespan.startdate
        enddate = self.datespan.enddate

        column_headers = []
        group_by = self.group_by[:-2]
        for place in group_by:
            column_headers.append(DataTablesColumn(place.capitalize()))
        column_headers.append(DataTablesColumn("Disease"))

        prev_month = startdate.month
        month_columns = [startdate.strftime(USER_MONTH_FORMAT)]
        for n, day in enumerate(self.daterange(startdate, enddate)):
            day_obj = iso_string_to_date(day)
            month = day_obj.month
            day_column = DataTablesColumn("Day%(n)s (%(day)s)" % {'n':n+1, 'day': day})

            if month == prev_month:
                month_columns.append(day_column)
            else:
                month_group = DataTablesColumnGroup(*month_columns)
                column_headers.append(month_group)
                month_columns = [day_obj.strftime(USER_MONTH_FORMAT), day_column]
                prev_month = month
        
        month_group = DataTablesColumnGroup(*month_columns)
        column_headers.append(month_group)

        return DataTablesHeader(*column_headers)
示例#9
0
    def rows(self):
        startdate = self.datespan.startdate
        enddate = self.datespan.enddate

        old_data = self.data
        rows = []
        for loc_key in self.keys:
            selected_disease = self.request.GET.get('test_type_disease', '')
            selected_disease = selected_disease.split(':') if selected_disease else None
            diseases = [selected_disease[0]] if selected_disease else self.diseases["ids"]
            for disease in diseases:
                row = [capitalize_fn(x) for x in loc_key]
                disease_names = self.diseases["names"]
                index = self.diseases['ids'].index(disease)
                row.append(disease_names[index])
                for n, day in enumerate(self.daterange(startdate, enddate)):
                    temp_key = [loc for loc in loc_key]
                    temp_key.append(iso_string_to_date(day))
                    temp_key.append(disease)
                    keymap = old_data.get(tuple(temp_key), None)
                    day_count = (keymap["day_count"] if keymap else None)
                    row.append(format_datatables_data(day_count or self.no_value, day_count or 0))
                rows.append(row)

        self.total_row = calculate_total_row(rows)
        self.total_row[0] = 'Total'
        return rows
示例#10
0
def datestring_minus_days(datestring, days):
    """
    returns e.g. '2015-04-08T00:00:00' (date + zeroed out time)
    """
    # todo: should this return '2015-04-08' instead?
    date = datetime.datetime.combine(
        iso_string_to_date(datestring[:10]), datetime.time())
    return (date - datetime.timedelta(days=days)).isoformat()
示例#11
0
    def date(self):
        from_req = self.request.GET.get("date")
        if from_req:
            try:
                return iso_string_to_date(from_req)
            except ValueError:
                pass

        return datetime.date.today()
示例#12
0
def chw_calendar_submit_report(request, username, interval=7):
    """Calendar view of submissions by CHW, overlaid with their scheduled visits, and whether they made them or not."""
    return_context = {}
    return_context['username'] = username
    total_interval = interval
    if 'interval' in request.GET:
        try:
            total_interval = int(request.GET['interval'])
        except ValueError:
            pass

    #secret date ranges
    if 'enddate' in request.GET:
        end_date_str = request.GET.get('enddate', json_format_date(datetime.utcnow()))
        end_date = iso_string_to_date(end_date_str)
    else:
        end_date = datetime.utcnow().date()

    if 'startdate' in request.GET:
        #if there's a startdate, trump interval
        start_date_str = request.GET.get('startdate', json_format_date(datetime.utcnow()))
        start_date = iso_string_to_date(start_date_str)
        total_interval = (end_date - start_date).days

    ret, patients, total_scheduled, total_visited = get_schedule_tally(username,
                                                                       total_interval,
                                                                       override_date=end_date)

    if len(ret) > 0:
        return_context['date_arr'] = ret
        return_context['total_scheduled'] = total_scheduled
        return_context['total_visited'] = total_visited
        return_context['start_date'] = ret[0][0]
        return_context['end_date'] = ret[-1][0]
    else:
        return_context['total_scheduled'] = 0
        return_context['total_visited'] = 0

    return return_context
示例#13
0
    def report_context(self):
        ret = {}
        if not self.request.GET.has_key('dot_patient') or self.request.GET.get('dot_patient') == "":
            self.report_template_path = "pact/dots/dots_report_nopatient.html"
            return ret
        submit_id = self.request.GET.get('submit_id', None)
        ret['dot_case_id'] = self.request.GET['dot_patient']
        casedoc = PactPatientCase.get(ret['dot_case_id'])
        ret['patient_case'] = casedoc
        start_date_str = self.request.GET.get('startdate',
                                              json_format_date(datetime.utcnow() - timedelta(days=7)))
        end_date_str = self.request.GET.get('enddate', json_format_date(datetime.utcnow()))

        start_date = datetime.combine(iso_string_to_date(start_date_str), time())
        end_date = datetime.combine(iso_string_to_date(end_date_str), time())

        ret['startdate'] = start_date_str
        ret['enddate'] = end_date_str

        dcal = DOTCalendarReporter(casedoc, start_date=start_date, end_date=end_date, submit_id=submit_id)
        ret['dot_calendar'] = dcal

        unique_visits = dcal.unique_xforms()
        xform_es = ReportXFormES(PACT_DOMAIN)

        q = xform_es.base_query(size=len(unique_visits))
        lvisits = list(unique_visits)
        if len(lvisits) > 0:
            q['filter']['and'].append({"ids": {"values": lvisits}})
            #todo double check pactid/caseid matches
        q['sort'] = {'received_on': 'desc'}
        res = xform_es.run_query(q)

        #ugh, not storing all form data by default - need to get?
        ret['sorted_visits'] = [DOTSubmission.wrap(x['_source']) for x in
                                filter(lambda x: x['_source']['xmlns'] == XMLNS_DOTS_FORM,
                                       res['hits']['hits'])]
        return ret
示例#14
0
 def get_date(cls, request, date_type):
     date_str = cls.get_date_str(request, date_type)
     if date_str is not None:
         try:
             return datetime.datetime.combine(iso_string_to_date(date_str), datetime.time())
         except ValueError:
             if date_type == cls.START_DATE:
                 return datetime.datetime.today() - datetime.timedelta(days=cls.default_days)
             elif date_type == cls.END_DATE:
                 return datetime.datetime.today()
             else:
                 return None
     else:
         return None
示例#15
0
    def rows(self):
        """
            Override this method to create a functional tabular report.
            Returns 2D list of rows.
            [['row1'],[row2']]
        """
        case_id = self.request.GET.get('dot_patient', '')
        start_date_str = self.request.GET.get('startdate',
                                              json_format_date(datetime.utcnow() - timedelta(days=7)))
        end_date_str = self.request.GET.get('enddate', json_format_date(datetime.utcnow()))

        if case_id == '':
            mode = 'all'
        else:
            mode = ''

        start_datetime = datetime.combine(iso_string_to_date(start_date_str),
                                          time())
        end_datetime = datetime.combine(iso_string_to_date(end_date_str),
                                        time())
        for num, obs in enumerate(self.tabular_data(mode, case_id, start_datetime, end_datetime)):
            dict_obj = obs.to_json()
            row = [dict_obj[x.prop_name].encode('utf-8') if isinstance(dict_obj[x.prop_name], unicode) else dict_obj[x.prop_name] for x in self.headers]
            yield row
示例#16
0
def transform_date(item):
    # postgres crashes on empty strings, but is happy to take null dates
    if item:
        if isinstance(item, string_types):
            try:
                return iso_string_to_date(item)
            except ValueError:
                try:
                    return iso_string_to_datetime(item, strict=True).date()
                except ValueError:
                    return None
        elif isinstance(item, datetime):
            return item.date()
        elif isinstance(item, date):
            return item
    return None
示例#17
0
def transform_datetime(item):
    if item:
        if isinstance(item, string_types):
            try:
                return iso_string_to_datetime(item, strict=True)
            except ValueError:
                try:
                    parsed_item = iso_string_to_date(item)
                    return datetime.combine(parsed_item, time(0, 0, 0))
                except ValueError:
                    pass
        elif isinstance(item, datetime):
            return item
        elif isinstance(item, date):
            return datetime.combine(item, time(0, 0, 0))

    return None
示例#18
0
    def _runCreateUserFromRegistrationTest(self):
        """
        test creating of couch user from a registration xmlns.
        this is more of an integration test than a unit test.
        """
        couch_user, created = CommCareUser.create_or_update_from_xform(self.xform)
        self.assertEqual(couch_user.user_id, self.uuid)
        # czue: removed lxml reference
        # uuid = ET.fromstring(xml).findtext(".//{http://openrosa.org/user/registration}uuid")
        couch_user = CommCareUser.get_by_user_id(self.xform.form['uuid'])

        self.assertNotEqual(couch_user, None)
        self.assertEqual(couch_user.username, format_username(self.username, self.domain))
        self.assertEqual(couch_user.domain, self.domain)
        self.assertEqual(couch_user.user_id, self.uuid)
        date = iso_string_to_date(self.date_string)
        self.assertEqual(couch_user.created_on, force_to_datetime(date))
        self.assertEqual(couch_user.device_ids[0], self.registering_device_id)

        django_user = couch_user.get_django_user()
        self.assertEqual(couch_user.user_id, CouchUser.from_django_user(django_user).user_id)
示例#19
0
 def date_or_nothing(param):
     if param:
         return datetime.combine(iso_string_to_date(param), time())
     else:
         return None
示例#20
0
 def get_date(self):
     return datetime.combine(
         iso_string_to_date(self.request_params.get('date')),
         time()
     )
示例#21
0
    def handle(self, sourcedb, domain, targetdb, **options):
        self.exclude_dbs = (
            # these have data we don't want to copy
            'receiverwrapper',
            'auditcare',
            'fluff-bihar',
            'fluff-mc',
            'fluff-cvsu',
            'mvp-indicators',
            'm4change',
            # todo: missing domain/docs, but probably want to add back
            'meta',
        )
        self.source_couch = source_couch = self._get_couch_database_configs_from_string(
            sourcedb)
        simulate = options['simulate']
        exclude_attachments = options['exclude_attachments']
        self.run_multi_process = options['run_multi_process']

        since = json_format_date(iso_string_to_date(
            options['since'])) if options['since'] else None

        if options['list_types']:
            for sourcedb_name, sourcedb in self.iter_source_dbs():
                self.list_types(sourcedb, domain, since)
            sys.exit(0)

        if simulate:
            print("\nSimulated run, no data will be copied.\n")

        if options['postgres_db'] and options['postgres_password']:
            settings.DATABASES[options['postgres_db']]['PASSWORD'] = options[
                'postgres_password']

        self.target_couch = self._get_couch_database_configs_from_string(
            targetdb)

        try:
            domain_doc = Domain.get_by_name(domain)
        except ResourceNotFound:
            domain_doc = None

        if domain_doc is None:
            self.copy_domain(source_couch, domain)

        if options['doc_types']:
            doc_types = options['doc_types'].split(',')
            for doc_type in doc_types:
                sourcedb = source_couch.get_db_for_doc_type(doc_type)
                startkey = [
                    x for x in [domain, doc_type, since] if x is not None
                ]
                endkey = [x for x in [domain, doc_type, {}] if x is not None]
                self.copy_docs(sourcedb,
                               domain,
                               simulate,
                               startkey,
                               endkey,
                               doc_type=doc_type,
                               since=since,
                               postgres_db=options['postgres_db'],
                               exclude_attachments=exclude_attachments)
        elif options['id_file']:
            path = options['id_file']
            if not os.path.isfile(path):
                print("Path '%s' does not exist or is not a file" % path)
                sys.exit(1)

            with open(path) as input:
                doc_ids = [line.rstrip('\n') for line in input]

            if not doc_ids:
                print("Path '%s' does not contain any document ID's" % path)
                sys.exit(1)

            for sourcedb_name, sourcedb in self.iter_source_dbs():
                self.copy_docs(sourcedb,
                               domain,
                               simulate,
                               doc_ids=doc_ids,
                               postgres_db=options['postgres_db'],
                               exclude_attachments=exclude_attachments)
        else:
            startkey = [domain]
            endkey = [domain, {}]
            exclude_types = DEFAULT_EXCLUDE_TYPES + options[
                'doc_types_exclude'].split(',')
            for sourcedb_name, sourcedb in self.iter_source_dbs():
                self.copy_docs(sourcedb,
                               domain,
                               simulate,
                               startkey,
                               endkey,
                               exclude_types=exclude_types,
                               postgres_db=options['postgres_db'],
                               exclude_attachments=exclude_attachments)
示例#22
0
 def date(self, d):
     return datetime.combine(iso_string_to_date(d), time())
示例#23
0
 def get_date(self):
     return datetime.combine(
         iso_string_to_date(self.request_params.get('date')), time())
示例#24
0
    def handle(self, *args, **options):
        if len(args) not in [2, 3]:
            raise CommandError('Usage is copy_domain %s' % self.args)
        self.exclude_dbs = (
            # these have data we don't want to copy
            'receiverwrapper', 'couchlog', 'auditcare', 'fluff-bihar', 'fluff-opm',
            'fluff-mc', 'fluff-cvsu', 'mvp-indicators', 'm4change',
            # todo: missing domain/docs, but probably want to add back
            'meta',
        )
        self.source_couch = source_couch = CouchConfig(args[0])
        domain = args[1].strip()
        simulate = options['simulate']
        exclude_attachments = options['exclude_attachments']
        self.run_multi_process = options['run_multi_process']

        since = json_format_date(iso_string_to_date(options['since'])) if options['since'] else None

        if options['list_types']:
            for sourcedb_name, sourcedb in self.iter_source_dbs():
                self.list_types(sourcedb, domain, since)
            sys.exit(0)

        if simulate:
            print "\nSimulated run, no data will be copied.\n"

        if options['postgres_db'] and options['postgres_password']:
            settings.DATABASES[options['postgres_db']]['PASSWORD'] = options['postgres_password']

        self.targetdb = CouchConfig(args[2]) if len(args) == 3 else CouchConfig()

        try:
            domain_doc = Domain.get_by_name(domain)
        except ResourceNotFound:
            domain_doc = None

        if domain_doc is None:
            self.copy_domain(source_couch, domain)

        if options['doc_types']:
            doc_types = options['doc_types'].split(',')
            for doc_type in doc_types:
                sourcedb = source_couch.get_db_for_doc_type(doc_type)
                startkey = [x for x in [domain, doc_type, since] if x is not None]
                endkey = [x for x in [domain, doc_type, {}] if x is not None]
                self.copy_docs(sourcedb, domain, simulate, startkey, endkey, doc_type=doc_type, since=since,
                               postgres_db=options['postgres_db'], exclude_attachments=exclude_attachments)
        elif options['id_file']:
            path = options['id_file']
            if not os.path.isfile(path):
                print "Path '%s' does not exist or is not a file" % path
                sys.exit(1)

            with open(path) as input:
                doc_ids = [line.rstrip('\n') for line in input]

            if not doc_ids:
                print "Path '%s' does not contain any document ID's" % path
                sys.exit(1)

            for sourcedb_name, sourcedb in self.iter_source_dbs():
                self.copy_docs(sourcedb, domain, simulate, doc_ids=doc_ids, postgres_db=options['postgres_db'],
                               exclude_attachments=exclude_attachments)
        else:
            startkey = [domain]
            endkey = [domain, {}]
            exclude_types = DEFAULT_EXCLUDE_TYPES + options['doc_types_exclude'].split(',')
            for sourcedb_name, sourcedb in self.iter_source_dbs():
                self.copy_docs(sourcedb, domain, simulate, startkey, endkey, exclude_types=exclude_types,
                               postgres_db=options['postgres_db'], exclude_attachments=exclude_attachments)
示例#25
0
 def date(self, d):
     return datetime.combine(iso_string_to_date(d), time())
示例#26
0
 def date_or_nothing(param):
     if param:
         return datetime.combine(iso_string_to_date(param), time())
     else:
         return None

def _get_new_date_value(case, plain_date_props, adjust_date_props):
    def _is_date(value):
        return value and (isinstance(value, datetime.date)
                          or re_date.match(value))

    found_prop, found_value = None, None
    for prop in plain_date_props + adjust_date_props:
        value = case.get_case_property(prop)
        if _is_date(value):
            found_prop, found_value = prop, value
            break

    if found_prop in adjust_date_props:
        date_value = iso_string_to_date(found_value) if isinstance(
            found_value, str) else found_value
        adjusted_date_value = date_value + datetime.timedelta(days=15)
        return json_format_date(adjusted_date_value)
    return found_value


INACTIVE_LOCATION_IDS = [
    '9074edfe555043fd8f16825a6236a313',
    '62c7aa16b77140b98ed1e4d09ae0b756',
    'de98c400ca394099be014e632d3e342e',
    '21a9e05ff2ee4e468d7d69b66f537d06',
    '38eec5e54108404d86779e4d5735f42e',
    '6840086194254414b89854125f5c84d1',
    '0ff93180c3a44e8f860213f9f15c46a1',
    'f36d29ae1c29442988f27cdb87e6cb9f',