def parse_users(group_memoizer, domain, user_data_model, location_cache): def _get_group_names(user): return sorted(map( lambda id: group_memoizer.get(id).name, Group.by_user(user, wrap=False) ), key=alphanumeric_sort_key) def _make_user_dict(user, group_names, location_cache): model_data, uncategorized_data = ( user_data_model.get_model_and_uncategorized(user.user_data) ) return { 'data': model_data, 'uncategorized_data': uncategorized_data, 'group': group_names, 'name': user.full_name, 'password': "******", # dummy display string for passwords 'phone-number': user.phone_number, 'email': user.email, 'username': user.raw_username, 'language': user.language, 'user_id': user._id, 'is_active': str(user.is_active), 'location-sms-code': location_cache.get(user.location_id), } user_data_keys = set() user_groups_length = 0 user_dicts = [] for user in get_all_commcare_users_by_domain(domain): group_names = _get_group_names(user) user_dicts.append(_make_user_dict(user, group_names, location_cache)) user_data_keys.update(user.user_data.keys() if user.user_data else []) user_groups_length = max(user_groups_length, len(group_names)) user_headers = [ 'username', 'password', 'name', 'phone-number', 'email', 'language', 'user_id', 'is_active', ] if domain_has_privilege(domain, privileges.LOCATIONS): user_headers.append('location-sms-code') user_data_fields = [f.slug for f in user_data_model.fields] user_headers.extend(build_data_headers(user_data_fields)) user_headers.extend(build_data_headers( user_data_keys.difference(set(user_data_fields)), header_prefix='uncategorized_data' )) user_headers.extend(json_to_headers( {'group': range(1, user_groups_length + 1)} )) def _user_rows(): for user_dict in user_dicts: row = dict(flatten_json(user_dict)) yield [row.get(header) or '' for header in user_headers] return user_headers, _user_rows()
def _extend_headers(prefix, headers): tab_headers.extend(json_to_headers( {prefix: {header: None for header in headers}} ))
def dump_users_and_groups(response, domain): file = StringIO() writer = Excel2007ExportWriter() users = CommCareUser.by_domain(domain) user_data_keys = set() user_groups_length = 0 user_dicts = [] group_data_keys = set() group_dicts = [] group_memoizer = GroupMemoizer(domain=domain) # load groups manually instead of calling group_memoizer.load_all() # so that we can detect blank groups blank_groups = set() for group in Group.by_domain(domain): if group.name: group_memoizer.add_group(group) else: blank_groups.add(group) if blank_groups: raise GroupNameError(blank_groups=blank_groups) for user in users: data = user.user_data group_names = sorted(map(lambda id: group_memoizer.get(id).name, Group.by_user(user, wrap=False)), key=alphanumeric_sort_key) # exclude password and user_id user_dicts.append({ 'data': data, 'group': group_names, 'name': user.full_name, # dummy display string for passwords 'password': "******", 'phone-number': user.phone_number, 'email': user.email, 'username': user.raw_username, 'language': user.language, 'user_id': user._id, }) user_data_keys.update(user.user_data.keys() if user.user_data else {}) user_groups_length = max(user_groups_length, len(group_names)) sorted_groups = sorted(group_memoizer.groups, key=lambda group: alphanumeric_sort_key(group.name)) for group in sorted_groups: group_dicts.append({ 'id': group.get_id, 'name': group.name, 'case-sharing': group.case_sharing, 'reporting': group.reporting, 'data': group.metadata, }) group_data_keys.update(group.metadata.keys() if group.metadata else {}) # include obscured password column for adding new users user_headers = [ 'username', 'password', 'name', 'phone-number', 'email', 'language', 'user_id' ] user_headers.extend( json_to_headers( {'data': dict([(key, None) for key in user_data_keys])})) user_headers.extend( json_to_headers({'group': range(1, user_groups_length + 1)})) group_headers = ['id', 'name', 'case-sharing?', 'reporting?'] group_headers.extend( json_to_headers( {'data': dict([(key, None) for key in group_data_keys])})) headers = [ ('users', [user_headers]), ('groups', [group_headers]), ] commtrack_enabled = Domain.get_by_name(domain).commtrack_enabled if commtrack_enabled: headers.append( ('locations', [['username', 'location-sms-code', 'location name (optional)']])) writer.open( header_table=headers, file=file, ) def get_user_rows(): for user_dict in user_dicts: row = dict(flatten_json(user_dict)) yield [row.get(header) or '' for header in user_headers] def get_group_rows(): for group_dict in group_dicts: row = dict(flatten_json(group_dict)) yield [row.get(header) or '' for header in group_headers] rows = [ ('users', get_user_rows()), ('groups', get_group_rows()), ] if commtrack_enabled: rows.append(('locations', get_location_rows(domain))) writer.write(rows) writer.close() response.write(file.getvalue())
def dump_users_and_groups(response, domain): file = StringIO() writer = Excel2007ExportWriter() users = CommCareUser.by_domain(domain) user_data_keys = set() user_groups_length = 0 user_dicts = [] group_data_keys = set() group_dicts = [] group_memoizer = GroupMemoizer(domain=domain) # load groups manually instead of calling group_memoizer.load_all() # so that we can detect blank groups blank_groups = set() for group in Group.by_domain(domain): if group.name: group_memoizer.add_group(group) else: blank_groups.add(group) if blank_groups: raise GroupNameError(blank_groups=blank_groups) for user in users: data = user.user_data group_names = sorted(map( lambda id: group_memoizer.get(id).name, Group.by_user(user, wrap=False) ), key=alphanumeric_sort_key) # exclude password and user_id user_dicts.append({ 'data': data, 'group': group_names, 'name': user.full_name, 'phone-number': user.phone_number, 'username': user.raw_username, 'language': user.language, }) user_data_keys.update(user.user_data.keys()) user_groups_length = max(user_groups_length, len(group_names)) sorted_groups = sorted(group_memoizer.groups, key=lambda group: alphanumeric_sort_key(group.name)) for group in sorted_groups: group_dicts.append({ 'id': group.get_id, 'name': group.name, 'case-sharing': group.case_sharing, 'reporting': group.reporting, 'data': group.metadata, }) group_data_keys.update(group.metadata.keys()) # include blank password column for adding new users user_headers = ['username', 'password', 'name', 'phone-number', 'language'] user_headers.extend(json_to_headers( {'data': dict([(key, None) for key in user_data_keys])} )) user_headers.extend(json_to_headers( {'group': range(1, user_groups_length + 1)} )) group_headers = ['id', 'name', 'case-sharing?', 'reporting?'] group_headers.extend(json_to_headers( {'data': dict([(key, None) for key in group_data_keys])} )) writer.open( header_table=[ ('users', [user_headers]), ('groups', [group_headers]), ], file=file, ) def get_user_rows(): for user_dict in user_dicts: row = dict(flatten_json(user_dict)) yield [row.get(header) or '' for header in user_headers] def get_group_rows(): for group_dict in group_dicts: row = dict(flatten_json(group_dict)) yield [row.get(header) or '' for header in group_headers] writer.write([ ('users', get_user_rows()), ('groups', get_group_rows()), ]) writer.close() response.write(file.getvalue())
def build_data_headers(keys, header_prefix='data'): return json_to_headers( {header_prefix: {key: None for key in keys}} )
def parse_users(group_memoizer, users, user_data_fields): def _get_group_names(user): return sorted(map( lambda id: group_memoizer.get(id).name, Group.by_user(user, wrap=False) ), key=alphanumeric_sort_key) def _parse_custom_data(user): if not user.user_data: return {}, {} model_data = {} uncategorized_data = {} for k, v in user.user_data.items(): if k in user_data_fields: model_data[k] = v else: uncategorized_data[k] = v return model_data, uncategorized_data def _make_user_dict(user, group_names): model_data, uncategorized_data = _parse_custom_data(user) return { 'data': model_data, 'uncategorized_data': uncategorized_data, 'group': group_names, 'name': user.full_name, 'password': "******", # dummy display string for passwords 'phone-number': user.phone_number, 'email': user.email, 'username': user.raw_username, 'language': user.language, 'user_id': user._id, } user_data_keys = set() user_groups_length = 0 user_dicts = [] for user in users: group_names = _get_group_names(user) user_dicts.append(_make_user_dict(user, group_names)) user_data_keys.update(user.user_data.keys() if user.user_data else []) user_groups_length = max(user_groups_length, len(group_names)) user_headers = [ 'username', 'password', 'name', 'phone-number', 'email', 'language', 'user_id' ] user_headers.extend(build_data_headers(user_data_fields)) user_headers.extend(build_data_headers( user_data_keys.difference(set(user_data_fields)), header_prefix='uncategorized_data' )) user_headers.extend(json_to_headers( {'group': range(1, user_groups_length + 1)} )) def _user_rows(): for user_dict in user_dicts: row = dict(flatten_json(user_dict)) yield [row.get(header) or '' for header in user_headers] return user_headers, _user_rows()