Beispiel #1
0
    def handle(self, csv_file, **options):
        self.domain = 'icds-cas'
        self.case_accessor = CaseAccessors(self.domain)
        with open(csv_file, "w", encoding='utf-8') as csv_file:
            field_names = [
                'case_id', 'owner_id', 'modified_on', 'server_modified_on',
                'add', 'edd', 'ccs_phase', 'num_pnc_visits',
                'current_schedule_phase'
            ]

            csv_writer = csv.DictWriter(csv_file,
                                        field_names,
                                        extrasaction='ignore')
            csv_writer.writeheader()

            for ccs_case in self._get_cases():
                properties = copy.deepcopy(ccs_case.case_json)

                if 'add' in properties:
                    continue

                if properties.get('current_schedule_phase') != '2':
                    continue

                properties.update({
                    'case_id':
                    ccs_case.case_id,
                    'owner_id':
                    ccs_case.owner_id,
                    'modified_on':
                    ccs_case.modified_on,
                    'server_modified_on':
                    ccs_case.server_modified_on
                })
                csv_writer.writerow(properties)
Beispiel #2
0
    def handle(self, **options):
        domain = options['domain']
        usernames = options['usernames'].split(',')
        app_id = options['app_id']
        users = [CouchUser.get_by_username(username) for username in usernames]
        for username in usernames:
            if not CouchUser.get_by_username(username):
                print("User '{}' not found".format(username))
                return
        if app_id:
            try:
                get_current_app_doc(domain, app_id)
            except ResourceNotFound:
                print("App '{}' not found".format(app_id))
                return

        headers, rows = _get_headers_and_rows(domain, users, app_id)
        totals_row = _calculate_totals_row(headers, rows)

        filename = "restore_timings_{}.csv".format(
            get_timestamp_for_filename())
        with open(filename, 'w') as f:
            writer = csv.DictWriter(f, headers)
            writer.writeheader()
            writer.writerows(rows)
            writer.writerow(totals_row)
Beispiel #3
0
    def handle(self, domains, file_name, **options):
        blob_db = get_blob_db()

        with open(file_name, 'w', encoding='utf-8') as csv_file:
            field_names = ['domain', 'archived', 'form_id', 'received_on']
            csv_writer = csv.DictWriter(csv_file, field_names)
            csv_writer.writeheader()
            for domain in domains:
                self.stdout.write("Handling domain %s" % domain)
                form_db = FormAccessors(domain)
                form_ids = form_db.get_all_form_ids_in_domain()
                form_ids.extend(
                    form_db.get_all_form_ids_in_domain('XFormArchived'))
                for form in with_progress_bar(form_db.iter_forms(form_ids),
                                              len(form_ids)):
                    if isinstance(form, CouchForm):
                        meta = form.blobs.get(ATTACHMENT_NAME)
                        if not meta or not blob_db.exists(key=meta.key):
                            self.write_row(csv_writer, domain,
                                           form.is_archived, form.received_on,
                                           form.form_id)
                    elif isinstance(form, XFormInstanceSQL):
                        meta = form.get_attachment_meta(ATTACHMENT_NAME)
                        if not meta or not blob_db.exists(key=meta.key):
                            self.write_row(csv_writer, domain,
                                           form.is_archived, form.received_on,
                                           form.form_id)
                    else:
                        raise Exception("not sure how we got here")
Beispiel #4
0
 def create_result_file():
     _, temp_file_path = tempfile.mkstemp()
     with open(temp_file_path, 'w') as csvfile:
         headers.append('payload_id')
         writer = csv.DictWriter(csvfile, fieldnames=headers)
         writer.writeheader()
         for payload_id, payload in payloads.items():
             row = payload
             row['payload_id'] = payload_id
             writer.writerow(row)
     return temp_file_path
 def handle(self, add_on_name, *args, **options):
     add_to_toggle = options.get('add_to_toggle')
     if add_to_toggle:
         add_to_toggle = find_static_toggle(add_to_toggle)
         if not add_to_toggle:
             raise CommandError('Toggle %s not found.' % add_to_toggle)
     with open("apps_with_feature_%s.csv" % add_on_name,
               "w",
               encoding='utf-8') as csvfile:
         writer = csv.DictWriter(csvfile,
                                 fieldnames=[
                                     'domain', 'application_id', 'app_name',
                                     'all_add_ons_enabled', 'status'
                                 ])
         writer.writeheader()
         for domain_obj in self._iter_domains(options):
             application_ids = get_app_ids_in_domain(domain_obj.name)
             for application_id in application_ids:
                 application = Application.get(application_id)
                 if not application.is_remote_app():
                     all_add_ons_enabled = toggles.ENABLE_ALL_ADD_ONS.enabled(
                         domain_obj.name)
                     if add_on_name in application.add_ons or all_add_ons_enabled:
                         try:
                             writer.writerow({
                                 'domain':
                                 domain_obj.name.encode('utf-8'),
                                 'application_id':
                                 application.get_id,
                                 'app_name':
                                 application.name.encode('utf-8'),
                                 'all_add_ons_enabled':
                                 all_add_ons_enabled,
                                 'status':
                                 application.add_ons.get(add_on_name)
                             })
                             if add_to_toggle:
                                 add_to_toggle.set(domain_obj.name, True,
                                                   NAMESPACE_DOMAIN)
                         except UnicodeEncodeError:
                             print('encode error')
                             print({
                                 'domain':
                                 domain_obj.name,
                                 'application_id':
                                 application.get_id,
                                 'app_name':
                                 application.name,
                                 'all_add_ons_enabled':
                                 all_add_ons_enabled,
                                 'status':
                                 application.add_ons.get(add_on_name)
                             })
Beispiel #6
0
 def test_can_write(self):
     with io.StringIO() as csv_stream:
         csv_writer = csv.DictWriter(csv_stream, ['name', 'size', 'nothing', 'date_of_birth'])
         data = {
             'name': 'Alice',
             'size': 167.5,
             'nothing': None,
             'date_of_birth': '1983-11-27',
         }
         csv_writer.writeheader()
         csv_writer.writerow(data)
         content = csv_stream.getvalue().replace('\r\n', '\n').replace('\r', '\n')
     self.assertEqual(
         'name,size,nothing,date_of_birth\nAlice,167.5,,1983-11-27\n',
         content)
Beispiel #7
0
    def handle(self, domain, csv_file, **options):
        self.domain = domain
        if not should_use_sql_backend(domain):
            print("This domain doesn't use SQL backend, exiting!")
            return

        current_date = self.first_form_received_on()
        if not current_date:
            print("No submissions in this domain yet, exiting!")
            return

        with open(csv_file, "w", encoding='utf-8') as csv_file:
            field_names = ('date', 'doc_type', 'in_sql', 'in_es', 'diff')

            csv_writer = csv.DictWriter(csv_file,
                                        field_names,
                                        extrasaction='ignore')
            csv_writer.writeheader()

            while current_date <= datetime.today():
                cases_in_sql = self._get_sql_cases_modified_on_date(
                    current_date)
                cases_in_es = self._get_es_cases_modified_on_date(current_date)
                properties = {
                    "date": current_date,
                    "doc_type": "CommCareCase",
                    "in_sql": cases_in_sql,
                    "in_es": cases_in_es,
                    "diff": cases_in_sql - cases_in_es,
                }
                csv_writer.writerow(properties)
                print(properties)

                forms_in_sql = self._get_sql_forms_received_on_date(
                    current_date)
                forms_in_es = self._get_es_forms_received_on_date(current_date)
                properties = {
                    "date": current_date,
                    "doc_type": "XFormInstance",
                    "in_sql": forms_in_sql,
                    "in_es": forms_in_es,
                    "diff": forms_in_sql - forms_in_es
                }
                csv_writer.writerow(properties)
                print(properties)

                current_date += relativedelta(months=1)
Beispiel #8
0
    def handle(self, domain, data_source_id, *args, **kwargs):
        config, _ = get_datasource_config(data_source_id, domain)
        adapter = get_indicator_adapter(config, load_source='find_datasource_mismatches')
        q = adapter.get_query_object()
        document_store = get_document_store_for_doc_type(
            domain, config.referenced_doc_type, load_source="find_datasource_mismatches")
        bad_rows = []
        for row in with_progress_bar(q, length=q.count()):
            adapter.track_load()
            doc_id = row.doc_id
            doc = document_store.get_document(doc_id)

            current_rows = config.get_all_values(doc)
            if len(current_rows) > 1:
                raise ValueError("this command doesn't work for datasources returning multiple rows per doc")

            try:
                current_row = current_rows[0]
            except KeyError:
                continue

            # don't compare the 'inserted_at' columns
            current_row = [val for val in current_row if val.column.database_column_name != 'inserted_at']

            for val in current_row:
                try:
                    inserted_value = getattr(row, val.column.database_column_name)
                    if (inserted_value != val.value
                       or row.inserted_at.replace(tzinfo=pytz.utc) < parse_datetime(doc['server_modified_on'])):
                        bad_rows.append({
                            'doc_id': row.doc_id,
                            'column_name': val.column.database_column_name,
                            'inserted_at': row.inserted_at.isoformat(),
                            'server_modified_on': doc['server_modified_on'],
                            'stored_value': getattr(row, val.column.database_column_name),
                            'desired_value': val.value,
                            'message': ('column mismatch'
                                        if inserted_value != val.value else "modified date early"),
                        })
                except AttributeError:
                    bad_rows.append({
                        'doc_id': row.doc_id,
                        'column_name': val.column.database_column_name,
                        'inserted_at': 'missing',
                        'server_modified_on': doc['server_modified_on'],
                        'stored_value': 'missing',
                        'desired_value': val.value,
                        'message': 'doc missing',
                    })

        filename = 'datasource_mismatches_{}_{}.csv'.format(
            data_source_id[-8:],
            datetime.utcnow().strftime("%Y-%m-%d-%H-%M-%S")
        )
        with open(filename, 'w', encoding='utf-8') as f:
            headers = ['doc_id', 'column_name', 'inserted_at', 'server_modified_on',
                       'stored_value', 'desired_value', 'message']
            writer = csv.DictWriter(f, headers)
            writer.writeheader()
            writer.writerows(bad_rows)

        print("Found {} mismatches. Check {} for more details".format(len(bad_rows), filename))