def parse_groups(groups): def _make_group_dict(group): return { 'id': group.get_id, 'name': group.name, 'case-sharing': group.case_sharing, 'reporting': group.reporting, 'data': group.metadata, } group_data_keys = set() group_dicts = [] sorted_groups = sorted( groups, key=lambda group: alphanumeric_sort_key(group.name) ) for group in sorted_groups: group_dicts.append(_make_group_dict(group)) group_data_keys.update(group.metadata.keys() if group.metadata else []) group_headers = ['id', 'name', 'case-sharing?', 'reporting?'] group_headers.extend(build_data_headers(group_data_keys)) def _get_group_rows(): for group_dict in group_dicts: row = dict(flatten_json(group_dict)) yield [row.get(header) or '' for header in group_headers] return group_headers, _get_group_rows()
def _get_groups(self): if self.request.couch_user.is_commcare_user(): groups = Group.by_user(self.request.couch_user) else: # for web users just show everything? groups = Group.by_domain(self.domain) return sorted(groups, key=lambda group: alphanumeric_sort_key(group.name))
def _get_groups(self): if self.request.couch_user.is_commcare_user(): groups = Group.by_user(self.request.couch_user) else: # for web users just show everything? groups = Group.by_domain(self.domain) return sorted( groups, key=lambda group: alphanumeric_sort_key(group.name) )
def _get_sorted_groups(domain): return sorted( Group.by_domain(domain), key=lambda group: alphanumeric_sort_key(group.name or '') )
def dump_users_and_groups(response, domain): file = StringIO() writer = Excel2007ExportWriter() users = CommCareUser.by_domain(domain) user_data_keys = set() user_groups_length = 0 user_dicts = [] group_data_keys = set() group_dicts = [] group_memoizer = GroupMemoizer(domain=domain) # load groups manually instead of calling group_memoizer.load_all() # so that we can detect blank groups blank_groups = set() for group in Group.by_domain(domain): if group.name: group_memoizer.add_group(group) else: blank_groups.add(group) if blank_groups: raise GroupNameError(blank_groups=blank_groups) for user in users: data = user.user_data group_names = sorted(map(lambda id: group_memoizer.get(id).name, Group.by_user(user, wrap=False)), key=alphanumeric_sort_key) # exclude password and user_id user_dicts.append({ 'data': data, 'group': group_names, 'name': user.full_name, # dummy display string for passwords 'password': "******", 'phone-number': user.phone_number, 'email': user.email, 'username': user.raw_username, 'language': user.language, 'user_id': user._id, }) user_data_keys.update(user.user_data.keys() if user.user_data else {}) user_groups_length = max(user_groups_length, len(group_names)) sorted_groups = sorted(group_memoizer.groups, key=lambda group: alphanumeric_sort_key(group.name)) for group in sorted_groups: group_dicts.append({ 'id': group.get_id, 'name': group.name, 'case-sharing': group.case_sharing, 'reporting': group.reporting, 'data': group.metadata, }) group_data_keys.update(group.metadata.keys() if group.metadata else {}) # include obscured password column for adding new users user_headers = [ 'username', 'password', 'name', 'phone-number', 'email', 'language', 'user_id' ] user_headers.extend( json_to_headers( {'data': dict([(key, None) for key in user_data_keys])})) user_headers.extend( json_to_headers({'group': range(1, user_groups_length + 1)})) group_headers = ['id', 'name', 'case-sharing?', 'reporting?'] group_headers.extend( json_to_headers( {'data': dict([(key, None) for key in group_data_keys])})) headers = [ ('users', [user_headers]), ('groups', [group_headers]), ] commtrack_enabled = Domain.get_by_name(domain).commtrack_enabled if commtrack_enabled: headers.append( ('locations', [['username', 'location-sms-code', 'location name (optional)']])) writer.open( header_table=headers, file=file, ) def get_user_rows(): for user_dict in user_dicts: row = dict(flatten_json(user_dict)) yield [row.get(header) or '' for header in user_headers] def get_group_rows(): for group_dict in group_dicts: row = dict(flatten_json(group_dict)) yield [row.get(header) or '' for header in group_headers] rows = [ ('users', get_user_rows()), ('groups', get_group_rows()), ] if commtrack_enabled: rows.append(('locations', get_location_rows(domain))) writer.write(rows) writer.close() response.write(file.getvalue())
def _get_sorted_groups(domain): return sorted(Group.by_domain(domain), key=lambda group: alphanumeric_sort_key(group.name or ''))
def dump_users_and_groups(response, domain): file = StringIO() writer = Excel2007ExportWriter() users = CommCareUser.by_domain(domain) user_data_keys = set() user_groups_length = 0 user_dicts = [] group_data_keys = set() group_dicts = [] group_memoizer = GroupMemoizer(domain=domain) # load groups manually instead of calling group_memoizer.load_all() # so that we can detect blank groups blank_groups = set() for group in Group.by_domain(domain): if group.name: group_memoizer.add_group(group) else: blank_groups.add(group) if blank_groups: raise GroupNameError(blank_groups=blank_groups) for user in users: data = user.user_data group_names = sorted(map( lambda id: group_memoizer.get(id).name, Group.by_user(user, wrap=False) ), key=alphanumeric_sort_key) # exclude password and user_id user_dicts.append({ 'data': data, 'group': group_names, 'name': user.full_name, 'phone-number': user.phone_number, 'username': user.raw_username, 'language': user.language, }) user_data_keys.update(user.user_data.keys()) user_groups_length = max(user_groups_length, len(group_names)) sorted_groups = sorted(group_memoizer.groups, key=lambda group: alphanumeric_sort_key(group.name)) for group in sorted_groups: group_dicts.append({ 'id': group.get_id, 'name': group.name, 'case-sharing': group.case_sharing, 'reporting': group.reporting, 'data': group.metadata, }) group_data_keys.update(group.metadata.keys()) # include blank password column for adding new users user_headers = ['username', 'password', 'name', 'phone-number', 'language'] user_headers.extend(json_to_headers( {'data': dict([(key, None) for key in user_data_keys])} )) user_headers.extend(json_to_headers( {'group': range(1, user_groups_length + 1)} )) group_headers = ['id', 'name', 'case-sharing?', 'reporting?'] group_headers.extend(json_to_headers( {'data': dict([(key, None) for key in group_data_keys])} )) writer.open( header_table=[ ('users', [user_headers]), ('groups', [group_headers]), ], file=file, ) def get_user_rows(): for user_dict in user_dicts: row = dict(flatten_json(user_dict)) yield [row.get(header) or '' for header in user_headers] def get_group_rows(): for group_dict in group_dicts: row = dict(flatten_json(group_dict)) yield [row.get(header) or '' for header in group_headers] writer.write([ ('users', get_user_rows()), ('groups', get_group_rows()), ]) writer.close() response.write(file.getvalue())
def _get_groups(self): groups = groups_for_user(self.request.couch_user, self.domain) return sorted( groups, key=lambda group: alphanumeric_sort_key(group.name) )
def _get_groups(self): groups = groups_for_user(self.request.couch_user, self.domain) return sorted(groups, key=lambda group: alphanumeric_sort_key(group.name))