Beispiel #1
0
def update_items(fields_patches, domain, data_type_id, transaction):
    data_items = FixtureDataItem.by_data_type(domain, data_type_id)
    for item in data_items:
        fields = item.fields
        updated_fields = {}
        patches = deepcopy(fields_patches)
        for old_field in fields.keys():
            patch = patches.pop(old_field, {})
            if not any(patch):
                updated_fields[old_field] = fields.pop(old_field)
            if "update" in patch:
                new_field_name = patch["update"]
                updated_fields[new_field_name] = fields.pop(old_field)
            if "remove" in patch:
                continue
                # destroy_field(field_to_delete, transaction)
        for new_field_name in patches.keys():
            patch = patches.pop(new_field_name, {})
            if "is_new" in patch:
                updated_fields[new_field_name] = FieldList(
                    field_list=[]
                )
        setattr(item, "fields", updated_fields)
        transaction.save(item)
    data_items = FixtureDataItem.by_data_type(domain, data_type_id)
Beispiel #2
0
def update_items(fields_patches, domain, data_type_id, transaction):
    data_items = FixtureDataItem.by_data_type(domain, data_type_id)
    for item in data_items:
        fields = item.fields
        updated_fields = {}
        patches = deepcopy(fields_patches)
        for old_field in fields.keys():
            patch = patches.pop(old_field, {})
            if not any(patch):
                updated_fields[old_field] = fields.pop(old_field)
            if "update" in patch:
                new_field_name = patch["update"]
                updated_fields[new_field_name] = fields.pop(old_field)
            if "remove" in patch:
                continue
                # destroy_field(field_to_delete, transaction)
        for new_field_name in patches.keys():
            patch = patches.pop(new_field_name, {})
            if "is_new" in patch:
                updated_fields[new_field_name] = FieldList(
                    field_list=[]
                )
        setattr(item, "fields", updated_fields)
        transaction.save(item)
    data_items = FixtureDataItem.by_data_type(domain, data_type_id)
Beispiel #3
0
def download_item_lists(request, domain):
    data_types = FixtureDataType.by_domain(domain)
    data_type_schemas = []
    max_fields = 0
    max_groups = 0
    max_users = 0
    mmax_groups = 0
    mmax_users = 0
    data_tables = []
    

    for data_type in data_types:
        type_schema = [data_type.name, data_type.tag]
        fields = [field for field in data_type.fields]
        type_id = data_type.get_id
        data_table_of_type = []
        for item_row in FixtureDataItem.by_data_type(domain, type_id):
            group_len = len(item_row.get_groups())
            max_groups = group_len if group_len>max_groups else max_groups
            user_len = len(item_row.get_users())
            max_users = user_len if user_len>max_users else max_users
        for item_row in FixtureDataItem.by_data_type(domain, type_id):
            groups = [group.name for group in item_row.get_groups()] + ["" for x in range(0,max_groups-len(item_row.get_groups()))]
            users = [user.raw_username for user in item_row.get_users()] + ["" for x in range(0, max_users-len(item_row.get_users()))]
            data_row = tuple([str(_id_from_doc(item_row)),"N"]+
                             [item_row.fields[field] for field in fields]+
                             groups + users)
            data_table_of_type.append(data_row)
        type_schema.extend(fields)
        data_type_schemas.append(tuple(type_schema))
        if max_fields<len(type_schema):
            max_fields = len(type_schema)
        data_tables.append((data_type.tag,tuple(data_table_of_type)))
        mmax_users = max_users if max_users>mmax_users else mmax_users
        mmax_groups = max_groups if max_groups>mmax_groups else mmax_groups
        max_users = 0
        max_groups = 0

    type_headers = ["name", "tag"] + ["field %d" % x for x in range(1, max_fields - 1)]
    type_headers = ("types", tuple(type_headers))
    table_headers = [type_headers]    
    for type_schema in data_type_schemas:
        item_header = (type_schema[1], tuple(["UID", DELETE_HEADER] +
                                             ["field: " + x for x in type_schema[2:]] +
                                             ["group %d" % x for x in range(1, mmax_groups + 1)] +
                                             ["user %d" % x for x in range(1, mmax_users + 1)]))
        table_headers.append(item_header)

    table_headers = tuple(table_headers)
    type_rows = ("types", tuple(data_type_schemas))
    data_tables = tuple([type_rows]+data_tables)
    
    fd, path = tempfile.mkstemp()
    with os.fdopen(fd, 'w') as temp:
        export_raw((table_headers), (data_tables), temp)
    format = Format.XLS_2007
    return export_response(open(path), format, "%s_fixtures" % domain)
Beispiel #4
0
def _get_all_fixture_items(domain, fixture_id):
    """
    :return: returns a dict mapped like
    defaultdict(dict,
            {u'99DOTS': {u'directly_observed_dose': u'13',
              u'manual': u'18',
              u'missed_dose': u'15',
              u'missing_data': u'16',
              u'self_administered_dose': u'17',
              u'unobserved_dose': u'14'},
             u'enikshay': {u'directly_observed_dose': u'1',
              u'manual': u'6',
              u'missed_dose': u'3',
              u'missing_data': u'4',
              u'self_administered_dose': u'5',
              u'unobserved_dose': u'2'},
            ...
            so one can use result[u'99DOTS'][u'missed_dose'] => 15
    """
    if fixture_id:
        all_items = FixtureDataItem.by_data_type(domain, fixture_id)
        result = defaultdict(dict)
        for item in all_items:
            source = item.fields['adherence_source'].field_list[0].field_value
            value = item.fields['adherence_value'].field_list[0].field_value
            ledger_value = item.fields['ledger_value'].field_list[
                0].field_value
            result[source][value] = ledger_value
        return result
    else:
        return defaultdict(dict)
Beispiel #5
0
    def get_data_from_table(self, table_name=None, fields_and_values: Optional[Tuple] = None):
        """
        filters and collects data from the desired table based on fields_and_values

        :param table_name: name of the table the data is supposed to be collected from
        :param fields_and_values: tuple of tuples, used for filtering, each tuple should have 2 elements:
            1st element: column to look for in the data
            2nd element: tuple of values to look for in the column

            e.g.
            (
                ('age', ('7', '14', '21')),
                ('name', ('Mary', 'Steve'))
            )

        :return: list of filtered data collected from the table OR all rows from the table
        """
        if table_name is None and len(self.tables) == 1:
            table_name = list(self.tables.keys())[0]
        elif not table_name:
            raise AttributeError('table_name must be specified')
        elif table_name not in self.tables:
            raise ValueError(f"table_name '{table_name}' not in {self.tables}")
        records = FixtureDataItem.by_data_type(self.domain, self.tables[table_name])

        if not fields_and_values:
            return records
        values = dict(fields_and_values)

        return [r for r in records if self._is_record_valid(r, values)]
Beispiel #6
0
    def filter_context(self):
        root_fdis = [self.fdi_to_json(f) for f in FixtureDataItem.by_data_type(self.domain, self.data_types(0).get_id)]

        f_id = self.request.GET.get('fixture_id', None)
        selected_fdi_type = f_id.split(':')[0] if f_id else None
        selected_fdi_id = f_id.split(':')[1] if f_id else None

        if selected_fdi_id:
            index = 0
            lineage = self.generate_lineage(selected_fdi_type, selected_fdi_id)
            parent = {'children': root_fdis}
            for i, fdi in enumerate(lineage[:-1]):
                this_fdi = [f for f in parent['children'] if f['id'] == fdi.get_id][0]
                next_h = self.hierarchy[i+1]
                this_fdi['children'] = [self.fdi_to_json(f) for f in FixtureDataItem.by_field_value(self.domain,
                                        self.data_types(i+1), next_h["parent_ref"], fdi.fields_without_attributes[next_h["references"]])]
                parent = this_fdi

        return {
            'api_root': self.api_root,
            'control_name': self.label,
            'control_slug': self.slug,
            'selected_fdi_id': selected_fdi_id,
            'fdis': json.dumps(root_fdis),
            'hierarchy': self.full_hierarchy
        }
Beispiel #7
0
    def filter_context(self):
        root_fdis = [self.fdi_to_json(f) for f in FixtureDataItem.by_data_type(self.domain, self.data_types(0).get_id)]

        f_id = self.request.GET.get("fixture_id", None)
        selected_fdi_type = f_id.split(":")[0] if f_id else None
        selected_fdi_id = f_id.split(":")[1] if f_id else None

        if selected_fdi_id:
            index = 0
            lineage = self.generate_lineage(selected_fdi_type, selected_fdi_id)
            parent = {"children": root_fdis}
            for i, fdi in enumerate(lineage[:-1]):
                this_fdi = [f for f in parent["children"] if f["id"] == fdi.get_id][0]
                next_h = self.hierarchy[i + 1]
                this_fdi["children"] = [
                    self.fdi_to_json(f)
                    for f in FixtureDataItem.by_field_value(
                        self.domain, self.data_types(i + 1), next_h["parent_ref"], fdi.fields[next_h["references"]]
                    )
                ]
                parent = this_fdi

        return {
            "api_root": self.api_root,
            "control_name": self.label,
            "control_slug": self.slug,
            "selected_fdi_id": selected_fdi_id,
            "fdis": json.dumps(root_fdis),
            "hierarchy": self.full_hierarchy,
        }
Beispiel #8
0
    def drilldown_map(self):
        diseases = []
        disease_fixtures = FixtureDataItem.by_data_type(
            self.domain, 
            FixtureDataType.by_domain_tag(self.domain, "diseases").one()
        )
        for d in disease_fixtures:
            disease = dict(
                val="%(name)s:%(uid)s" % {'name': d.fields_without_attributes["disease_id"], 'uid': d.get_id}, 
                text=d.fields_without_attributes["disease_name"]
            )
            tests = []
            test_fixtures = FixtureDataItem.by_field_value(
                self.domain, 
                FixtureDataType.by_domain_tag(self.domain, "test").one(),
                "disease_id",
                d.fields_without_attributes["disease_id"]
            )
            for t in test_fixtures:
                tests.append(dict(
                    val="%(name)s:%(uid)s" % {'name': t.fields_without_attributes["test_name"], 'uid': t.get_id}, 
                    text=t.fields_without_attributes["visible_test_name"])
                )
            disease['next'] = tests
            diseases.append(disease)

        return diseases
Beispiel #9
0
 def test_types(self):
     test_fixtures = FixtureDataItem.by_data_type(
         self.domain,
         FixtureDataType.by_domain_tag(self.domain, "test").one())
     return [
         t.fields_without_attributes["test_name"] for t in test_fixtures
     ]
Beispiel #10
0
def data_types(request, domain, data_type_id):
    
    if data_type_id:
        data_type = FixtureDataType.get(data_type_id)

        assert(data_type.doc_type == FixtureDataType._doc_type)
        assert(data_type.domain == domain)

        if request.method == 'GET':
            return json_response(strip_json(data_type))

        elif request.method == 'PUT':
            new = FixtureDataType(domain=domain, **_to_kwargs(request))
            for attr in 'tag', 'name', 'fields':
                setattr(data_type, attr, getattr(new, attr))
            data_type.save()
            return json_response(strip_json(data_type))

        elif request.method == 'DELETE':
            for item in FixtureDataItem.by_data_type(domain, data_type.get_id):
                item.delete()
            data_type.delete()
            return json_response({})

    elif data_type_id is None:

        if request.method == 'POST':
            data_type = FixtureDataType(domain=domain, **_to_kwargs(request))
            data_type.save()
            return json_response(strip_json(data_type))

        elif request.method == 'GET':
            return json_response([strip_json(x) for x in FixtureDataType.by_domain(domain)])

    return HttpResponseBadRequest()
Beispiel #11
0
 def get_users_per_dctl(cls):
     dctls = dict()
     data_type = FixtureDataType.by_domain_tag(cls.domain, 'dctl').first()
     data_items = FixtureDataItem.by_data_type(cls.domain, data_type.get_id if data_type else None)
     for item in data_items:
         dctls[item.fields_without_attributes.get("id")] = item.get_users(wrap=False)
     return dctls
Beispiel #12
0
    def filter_context(self):
        root_fdis = [self.fdi_to_json(f) for f in FixtureDataItem.by_data_type(self.domain, self.data_types(0).get_id)]

        f_id = self.request.GET.get('fixture_id', None)
        selected_fdi_type = f_id.split(':')[0] if f_id else None
        selected_fdi_id = f_id.split(':')[1] if f_id else None

        if selected_fdi_id:
            index = 0
            lineage = self.generate_lineage(selected_fdi_type, selected_fdi_id)
            parent = {'children': root_fdis}
            for i, fdi in enumerate(lineage[:-1]):
                this_fdi = [f for f in parent['children'] if f['id'] == fdi.get_id][0]
                next_h = self.hierarchy[i+1]
                this_fdi['children'] = [self.fdi_to_json(f) for f in FixtureDataItem.by_field_value(self.domain,
                                        self.data_types(i+1), next_h["parent_ref"], fdi.fields_without_attributes[next_h["references"]])]
                parent = this_fdi

        return {
            'api_root': self.api_root,
            'control_name': self.label,
            'control_slug': self.slug,
            'selected_fdi_id': selected_fdi_id,
            'fdis': json.dumps(root_fdis),
            'hierarchy': self.full_hierarchy
        }
def get_user_site_map(domain):
    user_site_map = defaultdict(list)
    data_type = FixtureDataType.by_domain_tag(domain, 'site').first()
    fixtures = FixtureDataItem.by_data_type(domain, data_type.get_id)
    for fixture in fixtures:
        for user in fixture.get_users():
            user_site_map[user._id].append(fixture.fields_without_attributes['site_id'])
    return user_site_map
Beispiel #14
0
 def diseases(self):
     disease_fixtures = FixtureDataItem.by_data_type(
         self.domain, FixtureDataType.by_domain_tag(self.domain, "diseases").one()
     )
     return {
         "ids": [d.fields["disease_id"] for d in disease_fixtures],
         "names": [d.fields["disease_name"] for d in disease_fixtures],
     }
def get_user_site_map(domain):
    user_site_map = defaultdict(list)
    data_type = FixtureDataType.by_domain_tag(domain, 'site').first()
    fixtures = FixtureDataItem.by_data_type(domain, data_type.get_id)
    for fixture in fixtures:
        for user in fixture.get_users():
            user_site_map[user._id].append(fixture.fields['site_id'])
    return user_site_map
Beispiel #16
0
 def copy_data_items(old_type_id, new_type_id):
     for item in FixtureDataItem.by_data_type(
             self.name, old_type_id):
         comp = self.copy_component(item.doc_type,
                                    item._id,
                                    new_domain_name,
                                    user=user)
         comp.data_type_id = new_type_id
         comp.save()
 def diseases(self):
     disease_fixtures = FixtureDataItem.by_data_type(
         self.domain, 
         FixtureDataType.by_domain_tag(self.domain, "diseases").one()
     )
     return {
         "ids": [d.fields_without_attributes["disease_id"] for d in disease_fixtures],
         "names": [d.fields_without_attributes["disease_name"] for d in disease_fixtures]
     }
Beispiel #18
0
    def copy_fixtures(self):
        from corehq.apps.fixtures.models import FixtureDataItem
        from corehq.apps.fixtures.dbaccessors import get_fixture_data_types_in_domain

        fixture_types = get_fixture_data_types_in_domain(self.existing_domain)
        for fixture_type in fixture_types:
            old_id, new_id = self.save_couch_copy(fixture_type, self.new_domain)
            for item in FixtureDataItem.by_data_type(self.existing_domain, old_id):
                item.data_type_id = new_id
                self.save_couch_copy(item, self.new_domain)
Beispiel #19
0
    def __call__(self, user, version, last_sync=None):
        assert isinstance(user, CommCareUser)

        all_types = dict([(t._id, t)
                          for t in FixtureDataType.by_domain(user.domain)])
        global_types = dict([(id, t) for id, t in all_types.items()
                             if t.is_global])

        items_by_type = defaultdict(list)

        def _set_cached_type(item, data_type):
            # set the cached version used by the object so that it doesn't
            # have to do another db trip later
            item._data_type = data_type

        for global_fixture in global_types.values():
            items = list(
                FixtureDataItem.by_data_type(user.domain, global_fixture))
            _ = [_set_cached_type(item, global_fixture) for item in items]
            items_by_type[global_fixture._id] = items

        other_items = FixtureDataItem.by_user(user)
        data_types = {}

        for item in other_items:
            if item.data_type_id in global_types:
                continue  # was part of the global type so no need to add here
            if item.data_type_id not in data_types:
                try:
                    data_types[item.data_type_id] = all_types[
                        item.data_type_id]
                except (AttributeError, KeyError):
                    continue
            items_by_type[item.data_type_id].append(item)
            _set_cached_type(item, data_types[item.data_type_id])

        fixtures = []
        all_types = data_types.values() + global_types.values()
        for data_type in all_types:
            xFixture = ElementTree.Element('fixture',
                                           attrib={
                                               'id':
                                               ':'.join(
                                                   (self.id, data_type.tag)),
                                               'user_id':
                                               user.user_id
                                           })
            xItemList = ElementTree.Element('%s_list' % data_type.tag)
            xFixture.append(xItemList)
            for item in sorted(items_by_type[data_type.get_id],
                               key=lambda x: x.sort_key):
                xItemList.append(item.to_xml())
            fixtures.append(xFixture)
        return fixtures
Beispiel #20
0
    def copy_fixtures(self):
        from corehq.apps.fixtures.models import FixtureDataItem
        from corehq.apps.fixtures.dbaccessors import get_fixture_data_types_in_domain

        fixture_types = get_fixture_data_types_in_domain(self.existing_domain)
        for fixture_type in fixture_types:
            old_id, new_id = self.save_couch_copy(fixture_type,
                                                  self.new_domain)
            for item in FixtureDataItem.by_data_type(self.existing_domain,
                                                     old_id):
                item.data_type_id = new_id
                self.save_couch_copy(item, self.new_domain)
Beispiel #21
0
    def __call__(self, restore_user, version, last_sync=None, app=None):
        assert isinstance(restore_user, OTARestoreUser)

        all_types = dict([
            (t._id, t) for t in FixtureDataType.by_domain(restore_user.domain)
        ])
        global_types = dict([(id, t) for id, t in all_types.items()
                             if t.is_global])

        items_by_type = defaultdict(list)

        def _set_cached_type(item, data_type):
            # set the cached version used by the object so that it doesn't
            # have to do another db trip later
            item._data_type = data_type

        for global_fixture in global_types.values():
            items = list(
                FixtureDataItem.by_data_type(restore_user.domain,
                                             global_fixture))
            _ = [_set_cached_type(item, global_fixture) for item in items]
            items_by_type[global_fixture._id] = items

        other_items = restore_user.get_fixture_data_items()
        data_types = {}

        for item in other_items:
            if item.data_type_id in global_types:
                continue  # was part of the global type so no need to add here
            if item.data_type_id not in data_types:
                try:
                    data_types[item.data_type_id] = all_types[
                        item.data_type_id]
                except (AttributeError, KeyError):
                    continue
            items_by_type[item.data_type_id].append(item)
            _set_cached_type(item, data_types[item.data_type_id])

        fixtures = []
        all_types_to_sync = data_types.values() + global_types.values()
        for data_type in all_types_to_sync:
            fixtures.append(
                self._get_fixture_element(
                    data_type.tag, restore_user.user_id,
                    sorted(items_by_type[data_type.get_id],
                           key=lambda x: x.sort_key)))
        for data_type_id, data_type in all_types.iteritems():
            if data_type_id not in global_types and data_type_id not in data_types:
                fixtures.append(
                    self._get_fixture_element(data_type.tag,
                                              restore_user.user_id, []))
        return fixtures
Beispiel #22
0
def get_unique_combinations(domain, place_types=None, place=None):
    if not place_types:
        return []
    if place:
        place_type = place[0]
        place = FixtureDataItem.get(place[1])
        place_name = place.fields_without_attributes[place_type + '_id']

    place_data_types = {}
    for pt in place_types:
        place_data_types[pt] = FixtureDataType.by_domain_tag(domain, pt).one()

    relevant_types = [t for t in reversed(place_types)]
    base_type = relevant_types[0] if relevant_types else ""
    fdis = FixtureDataItem.by_data_type(
        domain, place_data_types[base_type].get_id) if base_type else []

    combos = []
    for fdi in fdis:
        if place:
            if base_type == place_type:
                if fdi.fields_without_attributes[base_type +
                                                 '_id'] != place_name:
                    continue
            else:
                rel_type_name = fdi.fields_without_attributes.get(
                    place_type + "_id", "")
                if not rel_type_name:
                    logging.error(
                        "GSID Reports Error: fixture_id: %s -- place_type: %s"
                        % (fdi.get_id, place_type))
                    continue
                if rel_type_name.lower() != place_name:
                    continue
        comb = {}
        for pt in place_types:
            if base_type == pt:
                comb[pt] = str(fdi.fields_without_attributes[pt + '_id'])
                comb["gps"] = str(fdi.fields_without_attributes["gps"])
            else:
                p_id = fdi.fields_without_attributes.get(pt + "_id", None)
                if p_id:
                    if place and pt == place_type and p_id != place_name:
                        continue
                    comb[pt] = str(p_id)
                else:
                    comb[pt] = None
        combos.append(comb)
    return combos
Beispiel #23
0
    def copy_fixtures(self):
        from corehq.apps.fixtures.models import FixtureDataItem
        from corehq.apps.fixtures.dbaccessors import get_fixture_data_types_in_domain

        fixture_types = get_fixture_data_types_in_domain(self.existing_domain)
        for fixture_type in fixture_types:
            old_id, new_id = self.save_couch_copy(fixture_type, self.new_domain)
            for item in FixtureDataItem.by_data_type(self.existing_domain, old_id):
                item.data_type_id = new_id
                self.save_couch_copy(item, self.new_domain)

        # TODO: FixtureOwnership - requires copying users & groups

        existing_fixture_config = CalendarFixtureSettings.for_domain(self.existing_domain)
        self.save_sql_copy(existing_fixture_config, self.new_domain)
Beispiel #24
0
    def copy_fixtures(self):
        from corehq.apps.fixtures.models import FixtureDataItem
        from corehq.apps.fixtures.dbaccessors import get_fixture_data_types_in_domain

        fixture_types = get_fixture_data_types_in_domain(self.existing_domain)
        for fixture_type in fixture_types:
            old_id, new_id = self.save_couch_copy(fixture_type, self.new_domain)
            for item in FixtureDataItem.by_data_type(self.existing_domain, old_id):
                item.data_type_id = new_id
                self.save_couch_copy(item, self.new_domain)

        # TODO: FixtureOwnership - requires copying users & groups

        existing_fixture_config = CalendarFixtureSettings.for_domain(self.existing_domain)
        self.save_sql_copy(existing_fixture_config, self.new_domain)
Beispiel #25
0
 def getFacilities(cls, domain=None):
     cls.domain = domain or cls.domain
     facs = dict()
     data_type = FixtureDataType.by_domain_tag(cls.domain, 'site').first()
     fixtures = FixtureDataItem.by_data_type(cls.domain, data_type.get_id)
     for fix in fixtures:
         region = fix.fields_without_attributes.get("region_id")
         district = fix.fields_without_attributes.get("district_id")
         site = fix.fields_without_attributes.get("site_number")
         if region not in facs:
             facs[region] = dict(name=fix.fields_without_attributes.get("region_name"), districts=dict())
         if district not in facs[region]["districts"]:
             facs[region]["districts"][district] = dict(name=fix.fields_without_attributes.get("district_name"), sites=dict())
         if site not in facs[region]["districts"][district]["sites"]:
             facs[region]["districts"][district]["sites"][site] = dict(name=fix.fields_without_attributes.get("site_name"))
     return facs
def item_lists(user, version=V2, last_sync=None):
    if isinstance(user, CommCareUser):
        pass
    elif hasattr(user, "_hq_user") and user._hq_user is not None:
        user = user._hq_user
    else:
        return []

    all_types = dict([(t._id, t) for t in FixtureDataType.by_domain(user.domain)])
    global_types = dict([(id, t) for id, t in all_types.items() if t.is_global])

    items_by_type = defaultdict(list)

    def _set_cached_type(item, data_type):
        # set the cached version used by the object so that it doesn't
        # have to do another db trip later
        item._data_type = data_type

    for global_fixture in global_types.values():
        items = list(FixtureDataItem.by_data_type(user.domain, global_fixture))
        _ = [_set_cached_type(item, global_fixture) for item in items]
        items_by_type[global_fixture._id] = items

    other_items = FixtureDataItem.by_user(user)
    data_types = {}

    for item in other_items:
        if item.data_type_id in global_types:
            continue  # was part of the global type so no need to add here
        if not data_types.has_key(item.data_type_id):
            try:
                data_types[item.data_type_id] = all_types[item.data_type_id]
            except (AttributeError, KeyError):
                continue
        items_by_type[item.data_type_id].append(item)
        _set_cached_type(item, data_types[item.data_type_id])

    fixtures = []
    all_types = data_types.values() + global_types.values()
    for data_type in all_types:
        xFixture = ElementTree.Element('fixture', attrib={'id': 'item-list:%s' % data_type.tag, 'user_id': user.user_id})
        xItemList = ElementTree.Element('%s_list' % data_type.tag)
        xFixture.append(xItemList)
        for item in sorted(items_by_type[data_type.get_id], key=lambda x: x.sort_key):
            xItemList.append(item.to_xml())
        fixtures.append(xFixture)
    return fixtures
Beispiel #27
0
    def __call__(self, restore_state):
        restore_user = restore_state.restore_user

        all_types = {
            t._id: t
            for t in FixtureDataType.by_domain(restore_user.domain)
        }
        global_types = {id: t for id, t in all_types.items() if t.is_global}

        items_by_type = defaultdict(list)

        def _set_cached_type(item, data_type):
            # set the cached version used by the object so that it doesn't
            # have to do another db trip later
            item._data_type = data_type

        for global_fixture in global_types.values():
            items = FixtureDataItem.by_data_type(restore_user.domain,
                                                 global_fixture)
            _ = [_set_cached_type(item, global_fixture) for item in items]
            items_by_type[global_fixture._id] = items

        if set(all_types) - set(global_types):
            # only query ownership models if there are non-global types
            other_items = restore_user.get_fixture_data_items()

            for item in other_items:
                if item.data_type_id in global_types:
                    continue  # was part of the global type so no need to add here
                try:
                    _set_cached_type(item, all_types[item.data_type_id])
                except (AttributeError, KeyError):
                    continue
                items_by_type[item.data_type_id].append(item)

        fixtures = []
        types_sorted_by_tag = sorted(all_types.iteritems(),
                                     key=lambda (id_, type_): type_.tag)
        for data_type_id, data_type in types_sorted_by_tag:
            if data_type.is_indexed:
                fixtures.append(self._get_schema_element(data_type))
            items = sorted(items_by_type.get(data_type_id, []),
                           key=lambda x: x.sort_key)
            fixtures.append(
                self._get_fixture_element(data_type, restore_user.user_id,
                                          items))
        return fixtures
Beispiel #28
0
def data_items(request, domain, data_type_id, data_item_id):

    def prepare_item(item):
        ret = strip_json(item, disallow=['data_type_id'])
        if request.GET.get('groups') == 'true':
            ret['groups'] = []
            for group in item.get_groups():
                ret['groups'].append(strip_json(group))
        if request.GET.get('users') == 'true':
            ret['users'] = []
            for user in item.get_users():
                ret['users'].append(prepare_user(user))
        return ret

    if request.method == 'POST' and data_item_id is None:
        o = FixtureDataItem(domain=domain, data_type_id=data_type_id, **_to_kwargs(request))
        o.save()
        return json_response(strip_json(o, disallow=['data_type_id']))
    elif request.method == 'GET' and data_item_id is None:
        return json_response([
            prepare_item(x)
            for x in sorted(FixtureDataItem.by_data_type(domain, data_type_id),
                            key=lambda x: x.sort_key)
        ])
    elif request.method == 'GET' and data_item_id:
        try:
            o = FixtureDataItem.get(data_item_id)
        except ResourceNotFound:
            raise Http404()
        assert(o.domain == domain and o.data_type.get_id == data_type_id)
        return json_response(prepare_item(o))
    elif request.method == 'PUT' and data_item_id:
        original = FixtureDataItem.get(data_item_id)
        new = FixtureDataItem(domain=domain, **_to_kwargs(request))
        for attr in 'fields',:
            setattr(original, attr, getattr(new, attr))
        original.save()
        return json_response(strip_json(original, disallow=['data_type_id']))
    elif request.method == 'DELETE' and data_item_id:
        o = FixtureDataItem.get(data_item_id)
        assert(o.domain == domain and o.data_type.get_id == data_type_id)
        with CouchTransaction() as transaction:
            o.recursive_delete(transaction)
        return json_response({})
    else:
        return HttpResponseBadRequest()
Beispiel #29
0
def data_items(request, domain, data_type_id, data_item_id):

    def prepare_item(item):
        ret = strip_json(item, disallow=['data_type_id'])
        if request.GET.get('groups') == 'true':
            ret['groups'] = []
            for group in item.get_groups():
                ret['groups'].append(strip_json(group))
        if request.GET.get('users') == 'true':
            ret['users'] = []
            for user in item.get_users():
                ret['users'].append(prepare_user(user))
        return ret

    if request.method == 'POST' and data_item_id is None:
        o = FixtureDataItem(domain=domain, data_type_id=data_type_id, **_to_kwargs(request))
        o.save()
        return json_response(strip_json(o, disallow=['data_type_id']))
    elif request.method == 'GET' and data_item_id is None:
        return json_response([
            prepare_item(x)
            for x in sorted(FixtureDataItem.by_data_type(domain, data_type_id),
                            key=lambda x: x.sort_key)
        ])
    elif request.method == 'GET' and data_item_id:
        try:
            o = FixtureDataItem.get(data_item_id)
        except ResourceNotFound:
            raise Http404()
        assert(o.domain == domain and o.data_type.get_id == data_type_id)
        return json_response(prepare_item(o))
    elif request.method == 'PUT' and data_item_id:
        original = FixtureDataItem.get(data_item_id)
        new = FixtureDataItem(domain=domain, **_to_kwargs(request))
        for attr in 'fields',:
            setattr(original, attr, getattr(new, attr))
        original.save()
        return json_response(strip_json(original, disallow=['data_type_id']))
    elif request.method == 'DELETE' and data_item_id:
        o = FixtureDataItem.get(data_item_id)
        assert(o.domain == domain and o.data_type.get_id == data_type_id)
        with CouchTransaction() as transaction:
            o.recursive_delete(transaction)
        return json_response({})
    else:
        return HttpResponseBadRequest()
Beispiel #30
0
    def _get_facilities(cls, domain=None):
        domain = domain or cls.domain
        facilities = dict(ihf=[], chf=[])
        data_type = FixtureDataType.by_domain_tag(domain, 'site').first()
        data_items = FixtureDataItem.by_data_type(domain, data_type.get_id)
        for item in data_items:
            ihf_chf = item.fields_without_attributes.get("ihf_chf", "").lower()
            if ihf_chf == 'ifh':  # typo in some test data
                ihf_chf = 'ihf'

            try:
                facilities[ihf_chf].append(item.fields_without_attributes)
            except KeyError:
                # there's a site fixture item without an IHF/CHF value
                pass

        return facilities
Beispiel #31
0
    def obj_create(self, bundle, request=None, **kwargs):
        if 'data_type_id' not in bundle.data:
            raise BadRequest("data_type_id must be specified")

        data_type_id = bundle.data['data_type_id']

        try:
            FixtureDataType.get(data_type_id)
        except ResourceNotFound:
            raise NotFound('Lookup table not found')

        number_items = len(FixtureDataItem.by_data_type(kwargs['domain'], data_type_id))
        bundle.obj = FixtureDataItem(bundle.data)
        bundle.obj.domain = kwargs['domain']
        bundle.obj.sort_key = number_items + 1
        bundle.obj.save()
        return bundle
Beispiel #32
0
    def obj_create(self, bundle, request=None, **kwargs):
        data_type_id = bundle.data.get('data_type_id', None)

        if not data_type_id:
            raise BadRequest("data_type_id must be specified")

        try:
            FixtureDataType.get(data_type_id)
        except ResourceNotFound:
            raise NotFound('Lookup table not found')

        number_items = len(
            FixtureDataItem.by_data_type(kwargs['domain'], data_type_id))
        bundle.obj = FixtureDataItem(bundle.data)
        bundle.obj.domain = kwargs['domain']
        bundle.obj.sort_key = number_items + 1
        bundle.obj.save()
        return bundle
    def __call__(self, restore_user, version, last_sync=None, app=None):
        assert isinstance(restore_user, OTARestoreUser)

        all_types = dict([(t._id, t) for t in FixtureDataType.by_domain(restore_user.domain)])
        global_types = dict([(id, t) for id, t in all_types.items() if t.is_global])

        items_by_type = defaultdict(list)

        def _set_cached_type(item, data_type):
            # set the cached version used by the object so that it doesn't
            # have to do another db trip later
            item._data_type = data_type

        for global_fixture in global_types.values():
            items = list(FixtureDataItem.by_data_type(restore_user.domain, global_fixture))
            _ = [_set_cached_type(item, global_fixture) for item in items]
            items_by_type[global_fixture._id] = items

        other_items = restore_user.get_fixture_data_items()
        data_types = {}

        for item in other_items:
            if item.data_type_id in global_types:
                continue  # was part of the global type so no need to add here
            if item.data_type_id not in data_types:
                try:
                    data_types[item.data_type_id] = all_types[item.data_type_id]
                except (AttributeError, KeyError):
                    continue
            items_by_type[item.data_type_id].append(item)
            _set_cached_type(item, data_types[item.data_type_id])

        fixtures = []
        all_types_to_sync = data_types.values() + global_types.values()
        for data_type in all_types_to_sync:
            fixtures.append(self._get_fixture_element(
                data_type.tag,
                restore_user.user_id,
                sorted(items_by_type[data_type.get_id], key=lambda x: x.sort_key)
            ))
        for data_type_id, data_type in all_types.iteritems():
            if data_type_id not in global_types and data_type_id not in data_types:
                fixtures.append(self._get_fixture_element(data_type.tag, restore_user.user_id, []))
        return fixtures
Beispiel #34
0
def get_unique_combinations(domain, place_types=None, place=None):
    if not place_types:
        return []
    if place:
        place_type = place[0]
        place = FixtureDataItem.get(place[1])
        place_name = place.fields_without_attributes[place_type + '_id']

    place_data_types = {}
    for pt in place_types:
        place_data_types[pt] = FixtureDataType.by_domain_tag(domain, pt).one()

    relevant_types = [t for t in reversed(place_types)]
    base_type = relevant_types[0] if relevant_types else ""
    fdis = FixtureDataItem.by_data_type(domain, place_data_types[base_type].get_id) if base_type else []

    combos = []
    for fdi in fdis:
        if place:
            if base_type == place_type:
                if fdi.fields_without_attributes[base_type + '_id'] != place_name:
                    continue
            else:
                rel_type_name = fdi.fields_without_attributes.get(place_type+"_id", "")
                if not rel_type_name:
                    logging.error("GSID Reports Error: fixture_id: %s -- place_type: %s" % (fdi.get_id, place_type))
                    continue
                if rel_type_name.lower() != place_name:
                    continue
        comb = {}
        for pt in place_types:
            if base_type == pt:
                comb[pt] = str(fdi.fields_without_attributes[pt + '_id'])
                comb["gps"] = str(fdi.fields_without_attributes["gps"])
            else:
                p_id = fdi.fields_without_attributes.get(pt + "_id", None)
                if p_id:
                    if place and pt == place_type and p_id != place_name:
                        continue
                    comb[pt] = str(p_id)
                else:
                    comb[pt] = None
        combos.append(comb)
    return combos
Beispiel #35
0
def delete_unneeded_fixture_data_item(self, domain, data_type_id):
    """Deletes all fixture data items and their ownership models based on their data type.

    Note that this does not bust any caches meaning that the data items could still
    be returned to the user for some time
    """
    item_ids = []
    try:
        for items in chunked(FixtureDataItem.by_data_type(domain, data_type_id), 1000):
            FixtureDataItem.delete_docs(items)
            item_ids.extend([item.get_id for item in items])
        for item_id_chunk in chunked(item_ids, 1000):
            for docs in chunked(FixtureOwnership.for_all_item_ids(item_id_chunk, domain), 1000):
                FixtureOwnership.delete_docs(docs)
    except (KeyboardInterrupt, SystemExit):
        raise
    except Exception as exc:
        # there's no base exception in couchdbkit to catch, so must use Exception
        self.retry(exc=exc)
Beispiel #36
0
    def obj_get_list(self, bundle, **kwargs):
        domain = kwargs['domain']
        parent_id = bundle.request.GET.get("parent_id", None)
        parent_ref_name = bundle.request.GET.get("parent_ref_name", None)
        references = bundle.request.GET.get("references", None)
        child_type = bundle.request.GET.get("child_type", None)
        type_id = bundle.request.GET.get("fixture_type_id", None)
        type_tag = bundle.request.GET.get("fixture_type", None)

        if parent_id and parent_ref_name and child_type and references:
            parent_fdi = FixtureDataItem.get(parent_id)
            fdis = list(FixtureDataItem.by_field_value(domain, child_type, parent_ref_name, parent_fdi.fields_without_attributes[references]))
        elif type_id or type_tag:
            type_id = type_id or FixtureDataType.by_domain_tag(domain, type_tag).one()
            fdis = list(FixtureDataItem.by_data_type(domain, type_id))
        else:
            fdis = list(FixtureDataItem.by_domain(domain))

        return [convert_fdt(fdi) for fdi in fdis] or []
Beispiel #37
0
def data_items(request, domain, data_type_id, data_item_id):

    def prepare_item(item):
        ret = strip_json(item, disallow=['data_type_id'])
        if request.GET.get('groups') == 'true':
            ret['groups'] = []
            for group in item.get_groups():
                ret['groups'].append(strip_json(group))
        if request.GET.get('users') == 'true':
            ret['users'] = []
            for user in item.get_users():
                ret['users'].append(prepare_user(user))
        return ret

    if request.method == 'POST' and data_item_id is None:
        o = FixtureDataItem(domain=domain, data_type_id=data_type_id, **_to_kwargs(request))
        o.save()
        return json_response(strip_json(o, disallow=['data_type_id']))
    elif request.method == 'GET' and data_item_id is None:
        return json_response([prepare_item(x) for x in FixtureDataItem.by_data_type(domain, data_type_id)])
    elif request.method == 'GET' and data_item_id:
        o = FixtureDataItem.get(data_item_id)
        assert(o.domain == domain and o.data_type.get_id == data_type_id)
        return json_response(prepare_item(o))
    elif request.method == 'PUT' and data_item_id:
        original = FixtureDataItem.get(data_item_id)
        new = FixtureDataItem(domain=domain, **_to_kwargs(request))
        for attr in 'fields',:
            setattr(original, attr, getattr(new, attr))
        original.save()
        return json_response(strip_json(original, disallow=['data_type_id']))
    elif request.method == 'DELETE' and data_item_id:
        o = FixtureDataItem.get(data_item_id)
        assert(o.domain == domain and o.data_type.get_id == data_type_id)
        o.delete()
        return json_response({})
    else:
        return HttpResponseBadRequest()
Beispiel #38
0
    def obj_get_list(self, bundle, **kwargs):
        domain = kwargs['domain']
        parent_id = bundle.request.GET.get("parent_id", None)
        parent_ref_name = bundle.request.GET.get("parent_ref_name", None)
        references = bundle.request.GET.get("references", None)
        child_type = bundle.request.GET.get("child_type", None)
        type_id = bundle.request.GET.get("fixture_type_id", None)
        type_tag = bundle.request.GET.get("fixture_type", None)

        if parent_id and parent_ref_name and child_type and references:
            parent_fdi = FixtureDataItem.get(parent_id)
            fdis = list(
                FixtureDataItem.by_field_value(
                    domain, child_type, parent_ref_name,
                    parent_fdi.fields_without_attributes[references]))
        elif type_id or type_tag:
            type_id = type_id or FixtureDataType.by_domain_tag(
                domain, type_tag).one()
            fdis = list(FixtureDataItem.by_data_type(domain, type_id))
        else:
            fdis = list(FixtureDataItem.by_domain(domain))

        return [convert_fdt(fdi) for fdi in fdis] or []
Beispiel #39
0
 def get_dctl_list(cls):
     data_type = FixtureDataType.by_domain_tag(cls.domain, 'dctl').first()
     data_items = FixtureDataItem.by_data_type(cls.domain, data_type.get_id if data_type else None)
     return [dict(text=item.fields_without_attributes.get("name"), val=item.fields_without_attributes.get("id")) for item in data_items]
Beispiel #40
0
 def test_types(self):
     test_fixtures = FixtureDataItem.by_data_type(
         self.domain, 
         FixtureDataType.by_domain_tag(self.domain, "test").one()
     )
     return [t.fields["test_name"] for t in test_fixtures]
Beispiel #41
0
 def copy_data_items(old_type_id, new_type_id):
     for item in FixtureDataItem.by_data_type(self.name, old_type_id):
         comp = self.copy_component(item.doc_type, item._id,
                                    new_domain_name, user=user)
         comp.data_type_id = new_type_id
         comp.save()
Beispiel #42
0
def _prepare_fixture(table_ids, domain, html_response=False, task=None):
    if table_ids and table_ids[0]:
        try:
            data_types_view = [FixtureDataType.get(id) for id in table_ids]
        except ResourceNotFound:
            if html_response:
                raise FixtureDownloadError(
                    _("Sorry, we couldn't find that table. If you think this "
                      "is a mistake please report an issue."))
            data_types_view = FixtureDataType.by_domain(domain)
    else:
        data_types_view = FixtureDataType.by_domain(domain)

    if html_response:
        data_types_view = list(data_types_view)[0:1]

    total_tables = len(data_types_view)
    # when total_tables < 4 the final percentage can be >= 100%, but for
    # a small number of tables it renders more accurate progress
    total_events = (total_tables + (0 if total_tables < 4 else 1)) * 10

    now = datetime.utcnow
    last_update = [now()]
    upate_period = timedelta(
        seconds=1)  # do not update progress more than once a second

    def _update_progress(event_count, item_count, items_in_table):
        if task and now() - last_update[0] > upate_period:
            last_update[0] = now()
            processed = event_count * 10 + (10. * item_count / items_in_table)
            processed = min(processed, total_events)  # limit at 100%
            DownloadBase.set_progress(task, processed, total_events)

    # book-keeping data from view_results for repeated use
    data_types_book = []
    data_items_book_by_type = {}
    item_helpers_by_type = {}
    """
        Contains all excel sheets in following format
        excel_sheets = {
            "types": {
                "headers": [],
                "rows": [(row), (row), (row)]
            }
            "next-sheet": {
                "headers": [],
                "rows": [(row), (row), (row)]
            },
            ...
        }
    """
    excel_sheets = {}

    def empty_padding_list(length):
        return ["" for x in range(0, length)]

    max_fields = 0
    max_item_attributes = 0
    """
        - Helper to generate headers like "field 2: property 1"
        - Captures max_num_of_properties for any field of any type at the list-index.
        Example values:
            [0, 1] -> "field 2: property 1" (first-field has zero-props, second has 1 property)
            [1, 1] -> "field 1: property 1" (first-field has 1 property, second has 1 property)
            [0, 2] -> "field 2: property 1", "field 2: property 2"
    """
    field_prop_count = []
    """
        captures all possible 'field-property' values for each data-type
        Example value
          {
            u'clinics': {'field 2 : property 1': u'lang'},
            u'growth_chart': {'field 2 : property 2': u'maxWeight'}
          }
    """
    type_field_properties = {}
    indexed_field_numbers = set()
    get_field_prop_format = lambda x, y: "field " + str(
        x) + " : property " + str(y)
    for event_count, data_type in enumerate(data_types_view):
        # Helpers to generate 'types' sheet
        type_field_properties[data_type.tag] = {}
        data_types_book.append(data_type)
        if len(data_type.fields) > max_fields:
            max_fields = len(data_type.fields)
        if len(data_type.item_attributes) > max_item_attributes:
            max_item_attributes = len(data_type.item_attributes)
        for index, field in enumerate(data_type.fields):
            if len(field_prop_count) <= index:
                field_prop_count.append(len(field.properties))
            elif field_prop_count[index] <= len(field.properties):
                field_prop_count[index] = len(field.properties)
            if len(field.properties) > 0:
                for prop_index, property in enumerate(field.properties):
                    prop_key = get_field_prop_format(index + 1, prop_index + 1)
                    type_field_properties[data_type.tag][prop_key] = property

        # Helpers to generate item-sheets
        data_items_book_by_type[data_type.tag] = []
        max_users = 0
        max_groups = 0
        max_locations = 0
        max_field_prop_combos = {
            field_name: 0
            for field_name in data_type.fields_without_attributes
        }
        fixture_data = FixtureDataItem.by_data_type(domain, data_type.get_id)
        num_rows = len(fixture_data)
        for n, item_row in enumerate(fixture_data):
            _update_progress(event_count, n, num_rows)
            data_items_book_by_type[data_type.tag].append(item_row)
            max_groups = max(max_groups, len(item_row.groups))
            max_users = max(max_users, len(item_row.users))
            max_locations = max(max_locations, len(item_row.locations))
            for field_key in item_row.fields:
                if field_key in max_field_prop_combos:
                    max_combos = max_field_prop_combos[field_key]
                    cur_combo_len = len(item_row.fields[field_key].field_list)
                    max_combos = cur_combo_len if cur_combo_len > max_combos else max_combos
                    max_field_prop_combos[field_key] = max_combos

        item_helpers = {
            "max_users": max_users,
            "max_groups": max_groups,
            "max_locations": max_locations,
            "max_field_prop_combos": max_field_prop_combos,
        }
        item_helpers_by_type[data_type.tag] = item_helpers

    # Prepare 'types' sheet data
    types_sheet = {"headers": [], "rows": []}
    types_sheet["headers"] = [DELETE_HEADER, "table_id", 'is_global?']
    for x in range(1, max_fields + 1):
        types_sheet["headers"].append("field %d" % x)
        try:
            if any(data_type.fields[x - 1].is_indexed
                   for data_type in data_types_view):
                indexed_field_numbers.add(x - 1)
                types_sheet["headers"].append("field %d: is_indexed?" % x)
        except IndexError:
            continue
    types_sheet["headers"].extend(
        ["property %d" % x for x in range(1, max_item_attributes + 1)])
    field_prop_headers = []
    for field_num, prop_num in enumerate(field_prop_count):
        if prop_num > 0:
            for c in range(0, prop_num):
                prop_key = get_field_prop_format(field_num + 1, c + 1)
                field_prop_headers.append(prop_key)
                types_sheet["headers"].append(prop_key)

    for data_type in data_types_book:
        common_vals = ["N", data_type.tag, yesno(data_type.is_global)]
        field_vals = []
        # Count "is_indexed?" columns added, because data types with fewer fields will add fewer columns
        indexed_field_count = 0
        for i, field in enumerate(data_type.fields):
            field_vals.append(field.field_name)
            if i in indexed_field_numbers:
                field_vals.append('yes' if field.is_indexed else 'no')
                indexed_field_count += 1
        field_vals.extend(
            empty_padding_list(max_fields - len(data_type.fields) +
                               len(indexed_field_numbers) -
                               indexed_field_count))
        item_att_vals = (data_type.item_attributes +
                         empty_padding_list(max_item_attributes -
                                            len(data_type.item_attributes)))
        prop_vals = []
        if data_type.tag in type_field_properties:
            props = type_field_properties.get(data_type.tag)
            prop_vals.extend(
                [props.get(key, "") for key in field_prop_headers])
        row = tuple(common_vals[2 if html_response else 0:] + field_vals +
                    item_att_vals + prop_vals)
        types_sheet["rows"].append(row)

    types_sheet["rows"] = tuple(types_sheet["rows"])
    types_sheet["headers"] = tuple(types_sheet["headers"])
    excel_sheets["types"] = types_sheet

    # Prepare 'items' sheet data for each data-type
    for n, data_type in enumerate(data_types_book):
        _update_progress(total_tables, n, total_tables)
        item_sheet = {"headers": [], "rows": []}
        item_helpers = item_helpers_by_type[data_type.tag]
        max_users = item_helpers["max_users"]
        max_groups = item_helpers["max_groups"]
        max_locations = item_helpers["max_locations"]
        max_field_prop_combos = item_helpers["max_field_prop_combos"]
        common_headers = ["UID", DELETE_HEADER]
        user_headers = ["user %d" % x for x in range(1, max_users + 1)]
        group_headers = ["group %d" % x for x in range(1, max_groups + 1)]
        location_headers = [
            "location %d" % x for x in range(1, max_locations + 1)
        ]
        field_headers = []
        item_att_headers = [
            "property: " + attribute for attribute in data_type.item_attributes
        ]
        for field in data_type.fields:
            if len(field.properties) == 0:
                field_headers.append("field: " + field.field_name)
            else:
                prop_headers = []
                for x in range(1, max_field_prop_combos[field.field_name] + 1):
                    for property in field.properties:
                        prop_headers.append("%(name)s: %(prop)s %(count)s" % {
                            "name": field.field_name,
                            "prop": property,
                            "count": x
                        })
                    prop_headers.append("field: %(name)s %(count)s" % {
                        "name": field.field_name,
                        "count": x
                    })
                field_headers.extend(prop_headers)
        item_sheet["headers"] = tuple(
            common_headers[2 if html_response else 0:] + field_headers +
            item_att_headers + user_headers + group_headers + location_headers)
        excel_sheets[data_type.tag] = item_sheet
        for item_row in data_items_book_by_type[data_type.tag]:
            common_vals = [str(_id_from_doc(item_row)), "N"]
            user_vals = ([user.raw_username for user in item_row.users] +
                         empty_padding_list(max_users - len(item_row.users)))
            group_vals = (
                [group.name for group in item_row.groups] +
                empty_padding_list(max_groups - len(item_row.groups)))
            location_vals = (
                [loc.site_code for loc in item_row.locations] +
                empty_padding_list(max_groups - len(item_row.locations)))
            field_vals = []
            item_att_vals = [
                item_row.item_attributes[attribute]
                for attribute in data_type.item_attributes
            ]
            for field in data_type.fields:
                if len(field.properties) == 0:
                    fixture_fields = item_row.fields.get(field.field_name)
                    if fixture_fields and any(fixture_fields.field_list):
                        value = item_row.fields.get(
                            field.field_name).field_list[0].field_value
                    else:
                        value = ""
                    field_vals.append(value)
                else:
                    field_prop_vals = []
                    cur_combo_count = len(
                        item_row.fields.get(field.field_name).field_list)
                    cur_prop_count = len(field.properties)
                    for count, field_prop_combo in enumerate(
                            item_row.fields.get(field.field_name).field_list):
                        for property in field.properties:
                            field_prop_vals.append(
                                field_prop_combo.properties.get(
                                    property, None) or "")
                        field_prop_vals.append(field_prop_combo.field_value)
                    padding_list_len = (
                        (max_field_prop_combos[field.field_name] -
                         cur_combo_count) * (cur_prop_count + 1))
                    field_prop_vals.extend(
                        empty_padding_list(padding_list_len))
                    field_vals.extend(field_prop_vals)
            row = tuple(common_vals[2 if html_response else 0:] + field_vals +
                        item_att_vals + user_vals + group_vals + location_vals)
            item_sheet["rows"].append(row)
        item_sheet["rows"] = tuple(item_sheet["rows"])
        excel_sheets[data_type.tag] = item_sheet

    return data_types_book, excel_sheets
Beispiel #43
0
def download_item_lists(request, domain, html_response=False):
    """
        Is used to serve excel_download and html_view for view_lookup_tables
    """
    table_ids = request.GET.getlist("table_id")
    if table_ids and table_ids[0]:
        try:
            data_types_view = [FixtureDataType.get(id) for id in request.GET.getlist("table_id")]
        except ResourceNotFound as Ex:
            if html_response:
                messages.info(request, _("Sorry, we couldn't find that table. If you think this is a mistake please report an issue."))
                raise
            data_types_view = FixtureDataType.by_domain(domain)
    else:
        data_types_view = FixtureDataType.by_domain(domain)

    if html_response:
        data_types_view = list(data_types_view)[0:1]
    # book-keeping data from view_results for repeated use
    data_types_book = []
    data_items_book_by_type = {}
    item_helpers_by_type = {}
    """
        Contains all excel sheets in following format
        excel_sheets = {
            "types": {
                "headers": [],
                "rows": [(row), (row), (row)]
            }
            "next-sheet": {
                "headers": [],
                "rows": [(row), (row), (row)]
            },
            ...
        }
    """
    excel_sheets = {}
    
    def empty_padding_list(length):
        return ["" for x in range(0, length)]

    max_fields = 0
    """
        - Helper to generate headers like "field 2: property 1"
        - Captures max_num_of_properties for any field of any type at the list-index.
        Example values:
            [0, 1] -> "field 2: property 1" (first-field has zero-props, second has 1 property)
            [1, 1] -> "field 1: property 1" (first-field has 1 property, second has 1 property)
            [0, 2] -> "field 2: property 1", "field 2: property 2"
    """
    field_prop_count = []
    """
        captures all possible 'field-property' values for each data-type
        Example value
          {u'clinics': {'field 2 : property 1': u'lang'}, u'growth_chart': {'field 2 : property 2': u'maxWeight'}}
    """
    type_field_properties = {}
    get_field_prop_format = lambda x, y: "field " + str(x) + " : property " + str(y)
    for data_type in data_types_view:
        # Helpers to generate 'types' sheet
        type_field_properties[data_type.tag] = {}
        data_types_book.append(data_type)
        if len(data_type.fields) > max_fields:
            max_fields = len(data_type.fields)
        for index, field in enumerate(data_type.fields):
            if len(field_prop_count) <= index:
                field_prop_count.append(len(field.properties))
            elif field_prop_count[index] <= len(field.properties):
                field_prop_count[index] = len(field.properties)
            if len(field.properties) > 0:
                for prop_index, property in enumerate(field.properties):
                    prop_key = get_field_prop_format(index + 1, prop_index + 1)
                    type_field_properties[data_type.tag][prop_key] = property

        # Helpers to generate item-sheets
        data_items_book_by_type[data_type.tag] = []
        max_users = 0
        max_groups = 0
        max_field_prop_combos = {field_name: 0 for field_name in data_type.fields_without_attributes}
        for item_row in FixtureDataItem.by_data_type(domain, data_type.get_id):
            data_items_book_by_type[data_type.tag].append(item_row)
            group_len = len(item_row.groups)
            max_groups = group_len if group_len > max_groups else max_groups
            user_len = len(item_row.users)
            max_users = user_len if user_len > max_users else max_users
            for field_key in item_row.fields:
                if field_key in max_field_prop_combos:
                    max_combos = max_field_prop_combos[field_key]
                    cur_combo_len = len(item_row.fields[field_key].field_list)
                    max_combos = cur_combo_len if cur_combo_len > max_combos else max_combos
                    max_field_prop_combos[field_key] = max_combos

        item_helpers = {
            "max_users": max_users,
            "max_groups": max_groups,
            "max_field_prop_combos": max_field_prop_combos,
        }
        item_helpers_by_type[data_type.tag] = item_helpers

    # Prepare 'types' sheet data
    types_sheet = {"headers": [], "rows": []}
    types_sheet["headers"] = [DELETE_HEADER, "table_id", 'is_global?']
    types_sheet["headers"].extend(["field %d" % x for x in range(1, max_fields + 1)])
    field_prop_headers = []
    for field_num, prop_num in enumerate(field_prop_count):
        if prop_num > 0:
            for c in range(0, prop_num):
                prop_key = get_field_prop_format(field_num + 1, c + 1)
                field_prop_headers.append(prop_key)
                types_sheet["headers"].append(prop_key)

    for data_type in data_types_book:
        common_vals = ["N", data_type.tag, yesno(data_type.is_global)]
        field_vals = [field.field_name for field in data_type.fields] + empty_padding_list(max_fields - len(data_type.fields))
        prop_vals = []
        if type_field_properties.has_key(data_type.tag):
            props = type_field_properties.get(data_type.tag)
            prop_vals.extend([props.get(key, "") for key in field_prop_headers])
        row = tuple(common_vals[2 if html_response else 0:] + field_vals + prop_vals)
        types_sheet["rows"].append(row)

    types_sheet["rows"] = tuple(types_sheet["rows"])
    types_sheet["headers"] = tuple(types_sheet["headers"])
    excel_sheets["types"] = types_sheet
    
    # Prepare 'items' sheet data for each data-type
    for data_type in data_types_book:
        item_sheet = {"headers": [], "rows": []}
        item_helpers = item_helpers_by_type[data_type.tag]
        max_users = item_helpers["max_users"]
        max_groups = item_helpers["max_groups"]
        max_field_prop_combos = item_helpers["max_field_prop_combos"]
        common_headers = ["UID", DELETE_HEADER]
        user_headers = ["user %d" % x for x in range(1, max_users + 1)]
        group_headers = ["group %d" % x for x in range(1, max_groups + 1)]
        field_headers = []
        for field in data_type.fields:
            if len(field.properties) == 0:
                field_headers.append("field: " + field.field_name)
            else:
                prop_headers = []
                for x in range(1, max_field_prop_combos[field.field_name] + 1):
                    for property in field.properties:
                        prop_headers.append("%(name)s: %(prop)s %(count)s" % {
                            "name": field.field_name,
                            "prop": property,
                            "count": x
                        })
                    prop_headers.append("field: %(name)s %(count)s" % {
                        "name": field.field_name,
                        "count": x
                    })
                field_headers.extend(prop_headers)
        item_sheet["headers"] = tuple(
            common_headers[2 if html_response else 0:] + field_headers + user_headers + group_headers
        )
        excel_sheets[data_type.tag] = item_sheet
        for item_row in data_items_book_by_type[data_type.tag]:
            common_vals = [str(_id_from_doc(item_row)), "N"]
            user_vals = [user.raw_username for user in item_row.users] + empty_padding_list(max_users - len(item_row.users))
            group_vals = [group.name for group in item_row.groups] + empty_padding_list(max_groups - len(item_row.groups))
            field_vals = []
            for field in data_type.fields:
                if len(field.properties) == 0:
                    if any(item_row.fields.get(field.field_name).field_list):
                        value = item_row.fields.get(field.field_name).field_list[0].field_value
                    else:
                        value = ""
                    field_vals.append(value)
                else:
                    field_prop_vals = []
                    cur_combo_count = len(item_row.fields.get(field.field_name).field_list)
                    cur_prop_count = len(field.properties)
                    for count, field_prop_combo in enumerate(item_row.fields.get(field.field_name).field_list):
                        for property in field.properties:
                            field_prop_vals.append(field_prop_combo.properties.get(property, None) or "")
                        field_prop_vals.append(field_prop_combo.field_value)
                    padding_list_len = (max_field_prop_combos[field.field_name] - cur_combo_count) * (cur_prop_count + 1)
                    field_prop_vals.extend(empty_padding_list(padding_list_len))
                    # import pdb; pdb.set_trace();
                    field_vals.extend(field_prop_vals)
            row = tuple(
                common_vals[2 if html_response else 0:] + field_vals + user_vals + group_vals
            )
            item_sheet["rows"].append(row)
        item_sheet["rows"] = tuple(item_sheet["rows"])
        excel_sheets[data_type.tag] = item_sheet

    if html_response:
        return excel_sheets

    header_groups = [("types", excel_sheets["types"]["headers"])]
    value_groups = [("types", excel_sheets["types"]["rows"])]
    for data_type in data_types_book:
        header_groups.append((data_type.tag, excel_sheets[data_type.tag]["headers"]))
        value_groups.append((data_type.tag, excel_sheets[data_type.tag]["rows"]))

    fd, path = tempfile.mkstemp()
    with os.fdopen(fd, 'w') as temp:
        export_raw(tuple(header_groups), tuple(value_groups), temp)
    format = Format.XLS_2007

    fl = open(path, 'r')
    fileref = expose_download(
        fl.read(),
        60 * 10,
        mimetype=Format.from_format(format).mimetype,
        content_disposition='attachment; filename="%s_fixtures.xlsx"' % domain,
    )
    return json_response({"download_id": fileref.download_id})
Beispiel #44
0
def _prepare_fixture(table_ids, domain, html_response=False, task=None):
    if table_ids and table_ids[0]:
        try:
            data_types_view = [FixtureDataType.get(id) for id in table_ids]
        except ResourceNotFound:
            if html_response:
                raise FixtureDownloadError(
                    _("Sorry, we couldn't find that table. If you think this "
                      "is a mistake please report an issue."))
            data_types_view = FixtureDataType.by_domain(domain)
    else:
        data_types_view = FixtureDataType.by_domain(domain)

    if html_response:
        data_types_view = list(data_types_view)[0:1]

    total_tables = len(data_types_view)
    # when total_tables < 4 the final percentage can be >= 100%, but for
    # a small number of tables it renders more accurate progress
    total_events = (total_tables + (0 if total_tables < 4 else 1)) * 10

    now = datetime.utcnow
    last_update = [now()]
    upate_period = timedelta(seconds=1)  # do not update progress more than once a second

    def _update_progress(event_count, item_count, items_in_table):
        if task and now() - last_update[0] > upate_period:
            last_update[0] = now()
            processed = event_count * 10 + (10. * item_count / items_in_table)
            processed = min(processed, total_events)  # limit at 100%
            DownloadBase.set_progress(task, processed, total_events)

    # book-keeping data from view_results for repeated use
    data_types_book = []
    data_items_book_by_type = {}
    item_helpers_by_type = {}
    """
        Contains all excel sheets in following format
        excel_sheets = {
            "types": {
                "headers": [],
                "rows": [(row), (row), (row)]
            }
            "next-sheet": {
                "headers": [],
                "rows": [(row), (row), (row)]
            },
            ...
        }
    """
    excel_sheets = {}

    def empty_padding_list(length):
        return ["" for x in range(0, length)]

    max_fields = 0
    max_item_attributes = 0
    """
        - Helper to generate headers like "field 2: property 1"
        - Captures max_num_of_properties for any field of any type at the list-index.
        Example values:
            [0, 1] -> "field 2: property 1" (first-field has zero-props, second has 1 property)
            [1, 1] -> "field 1: property 1" (first-field has 1 property, second has 1 property)
            [0, 2] -> "field 2: property 1", "field 2: property 2"
    """
    field_prop_count = []
    """
        captures all possible 'field-property' values for each data-type
        Example value
          {
            u'clinics': {'field 2 : property 1': u'lang'},
            u'growth_chart': {'field 2 : property 2': u'maxWeight'}
          }
    """
    type_field_properties = {}
    get_field_prop_format = lambda x, y: "field " + str(x) + " : property " + str(y)
    for event_count, data_type in enumerate(data_types_view):
        # Helpers to generate 'types' sheet
        type_field_properties[data_type.tag] = {}
        data_types_book.append(data_type)
        if len(data_type.fields) > max_fields:
            max_fields = len(data_type.fields)
        if len(data_type.item_attributes) > max_item_attributes:
            max_item_attributes = len(data_type.item_attributes)
        for index, field in enumerate(data_type.fields):
            if len(field_prop_count) <= index:
                field_prop_count.append(len(field.properties))
            elif field_prop_count[index] <= len(field.properties):
                field_prop_count[index] = len(field.properties)
            if len(field.properties) > 0:
                for prop_index, property in enumerate(field.properties):
                    prop_key = get_field_prop_format(index + 1, prop_index + 1)
                    type_field_properties[data_type.tag][prop_key] = property

        # Helpers to generate item-sheets
        data_items_book_by_type[data_type.tag] = []
        max_users = 0
        max_groups = 0
        max_field_prop_combos = {field_name: 0 for field_name in data_type.fields_without_attributes}
        fixture_data = FixtureDataItem.by_data_type(domain, data_type.get_id)
        num_rows = len(fixture_data)
        for n, item_row in enumerate(fixture_data):
            _update_progress(event_count, n, num_rows)
            data_items_book_by_type[data_type.tag].append(item_row)
            group_len = len(item_row.groups)
            max_groups = group_len if group_len > max_groups else max_groups
            user_len = len(item_row.users)
            max_users = user_len if user_len > max_users else max_users
            for field_key in item_row.fields:
                if field_key in max_field_prop_combos:
                    max_combos = max_field_prop_combos[field_key]
                    cur_combo_len = len(item_row.fields[field_key].field_list)
                    max_combos = cur_combo_len if cur_combo_len > max_combos else max_combos
                    max_field_prop_combos[field_key] = max_combos

        item_helpers = {
            "max_users": max_users,
            "max_groups": max_groups,
            "max_field_prop_combos": max_field_prop_combos,
        }
        item_helpers_by_type[data_type.tag] = item_helpers

    # Prepare 'types' sheet data
    types_sheet = {"headers": [], "rows": []}
    types_sheet["headers"] = [DELETE_HEADER, "table_id", 'is_global?']
    types_sheet["headers"].extend(["field %d" % x for x in range(1, max_fields + 1)])
    types_sheet["headers"].extend(["property %d" % x for x in range(1, max_item_attributes + 1)])
    field_prop_headers = []
    for field_num, prop_num in enumerate(field_prop_count):
        if prop_num > 0:
            for c in range(0, prop_num):
                prop_key = get_field_prop_format(field_num + 1, c + 1)
                field_prop_headers.append(prop_key)
                types_sheet["headers"].append(prop_key)

    for data_type in data_types_book:
        common_vals = ["N", data_type.tag, yesno(data_type.is_global)]
        field_vals = ([field.field_name for field in data_type.fields]
                      + empty_padding_list(max_fields - len(data_type.fields)))
        item_att_vals = (data_type.item_attributes + empty_padding_list(
            max_item_attributes - len(data_type.item_attributes)
        ))
        prop_vals = []
        if data_type.tag in type_field_properties:
            props = type_field_properties.get(data_type.tag)
            prop_vals.extend([props.get(key, "") for key in field_prop_headers])
        row = tuple(common_vals[2 if html_response else 0:] + field_vals + item_att_vals + prop_vals)
        types_sheet["rows"].append(row)

    types_sheet["rows"] = tuple(types_sheet["rows"])
    types_sheet["headers"] = tuple(types_sheet["headers"])
    excel_sheets["types"] = types_sheet

    # Prepare 'items' sheet data for each data-type
    for n, data_type in enumerate(data_types_book):
        _update_progress(total_tables, n, total_tables)
        item_sheet = {"headers": [], "rows": []}
        item_helpers = item_helpers_by_type[data_type.tag]
        max_users = item_helpers["max_users"]
        max_groups = item_helpers["max_groups"]
        max_field_prop_combos = item_helpers["max_field_prop_combos"]
        common_headers = ["UID", DELETE_HEADER]
        user_headers = ["user %d" % x for x in range(1, max_users + 1)]
        group_headers = ["group %d" % x for x in range(1, max_groups + 1)]
        field_headers = []
        item_att_headers = ["property: " + attribute for attribute in data_type.item_attributes]
        for field in data_type.fields:
            if len(field.properties) == 0:
                field_headers.append("field: " + field.field_name)
            else:
                prop_headers = []
                for x in range(1, max_field_prop_combos[field.field_name] + 1):
                    for property in field.properties:
                        prop_headers.append("%(name)s: %(prop)s %(count)s" % {
                            "name": field.field_name,
                            "prop": property,
                            "count": x
                        })
                    prop_headers.append("field: %(name)s %(count)s" % {
                        "name": field.field_name,
                        "count": x
                    })
                field_headers.extend(prop_headers)
        item_sheet["headers"] = tuple(
            common_headers[2 if html_response else 0:]
            + field_headers
            + item_att_headers
            + user_headers
            + group_headers
        )
        excel_sheets[data_type.tag] = item_sheet
        for item_row in data_items_book_by_type[data_type.tag]:
            common_vals = [str(_id_from_doc(item_row)), "N"]
            user_vals = ([user.raw_username for user in item_row.users]
                         + empty_padding_list(max_users - len(item_row.users)))
            group_vals = ([group.name for group in item_row.groups]
                          + empty_padding_list(max_groups - len(item_row.groups)))
            field_vals = []
            item_att_vals = [item_row.item_attributes[attribute] for attribute in data_type.item_attributes]
            for field in data_type.fields:
                if len(field.properties) == 0:
                    fixture_fields = item_row.fields.get(field.field_name)
                    if fixture_fields and any(fixture_fields.field_list):
                        value = item_row.fields.get(field.field_name).field_list[0].field_value
                    else:
                        value = ""
                    field_vals.append(value)
                else:
                    field_prop_vals = []
                    cur_combo_count = len(item_row.fields.get(field.field_name).field_list)
                    cur_prop_count = len(field.properties)
                    for count, field_prop_combo in enumerate(item_row.fields.get(field.field_name).field_list):
                        for property in field.properties:
                            field_prop_vals.append(field_prop_combo.properties.get(property, None) or "")
                        field_prop_vals.append(field_prop_combo.field_value)
                    padding_list_len = ((max_field_prop_combos[field.field_name] - cur_combo_count)
                                        * (cur_prop_count + 1))
                    field_prop_vals.extend(empty_padding_list(padding_list_len))
                    field_vals.extend(field_prop_vals)
            row = tuple(
                common_vals[2 if html_response else 0:]
                + field_vals
                + item_att_vals
                + user_vals
                + group_vals
            )
            item_sheet["rows"].append(row)
        item_sheet["rows"] = tuple(item_sheet["rows"])
        excel_sheets[data_type.tag] = item_sheet

    return data_types_book, excel_sheets
Beispiel #45
0
def download_item_lists(request, domain, html_response=False):
    """
        Is used to serve excel_download and html_view for view_lookup_tables
    """
    table_ids = request.GET.getlist("table_id")
    if table_ids and table_ids[0]:
        try:
            data_types_view = [
                FixtureDataType.get(id)
                for id in request.GET.getlist("table_id")
            ]
        except ResourceNotFound as Ex:
            if html_response:
                messages.info(
                    request,
                    _("Sorry, we couldn't find that table. If you think this is a mistake please report an issue."
                      ))
                raise
            data_types_view = FixtureDataType.by_domain(domain)
    else:
        data_types_view = FixtureDataType.by_domain(domain)

    if html_response:
        data_types_view = list(data_types_view)[0:1]
    # book-keeping data from view_results for repeated use
    data_types_book = []
    data_items_book_by_type = {}
    item_helpers_by_type = {}
    """
        Contains all excel sheets in following format
        excel_sheets = {
            "types": {
                "headers": [],
                "rows": [(row), (row), (row)]
            }
            "next-sheet": {
                "headers": [],
                "rows": [(row), (row), (row)]
            },
            ...
        }
    """
    excel_sheets = {}

    def empty_padding_list(length):
        return ["" for x in range(0, length)]

    max_fields = 0
    max_item_attributes = 0
    """
        - Helper to generate headers like "field 2: property 1"
        - Captures max_num_of_properties for any field of any type at the list-index.
        Example values:
            [0, 1] -> "field 2: property 1" (first-field has zero-props, second has 1 property)
            [1, 1] -> "field 1: property 1" (first-field has 1 property, second has 1 property)
            [0, 2] -> "field 2: property 1", "field 2: property 2"
    """
    field_prop_count = []
    """
        captures all possible 'field-property' values for each data-type
        Example value
          {u'clinics': {'field 2 : property 1': u'lang'}, u'growth_chart': {'field 2 : property 2': u'maxWeight'}}
    """
    type_field_properties = {}
    get_field_prop_format = lambda x, y: "field " + str(
        x) + " : property " + str(y)
    for data_type in data_types_view:
        # Helpers to generate 'types' sheet
        type_field_properties[data_type.tag] = {}
        data_types_book.append(data_type)
        if len(data_type.fields) > max_fields:
            max_fields = len(data_type.fields)
        if len(data_type.item_attributes) > max_item_attributes:
            max_item_attributes = len(data_type.item_attributes)
        for index, field in enumerate(data_type.fields):
            if len(field_prop_count) <= index:
                field_prop_count.append(len(field.properties))
            elif field_prop_count[index] <= len(field.properties):
                field_prop_count[index] = len(field.properties)
            if len(field.properties) > 0:
                for prop_index, property in enumerate(field.properties):
                    prop_key = get_field_prop_format(index + 1, prop_index + 1)
                    type_field_properties[data_type.tag][prop_key] = property

        # Helpers to generate item-sheets
        data_items_book_by_type[data_type.tag] = []
        max_users = 0
        max_groups = 0
        max_field_prop_combos = {
            field_name: 0
            for field_name in data_type.fields_without_attributes
        }
        for item_row in FixtureDataItem.by_data_type(domain, data_type.get_id):
            data_items_book_by_type[data_type.tag].append(item_row)
            group_len = len(item_row.groups)
            max_groups = group_len if group_len > max_groups else max_groups
            user_len = len(item_row.users)
            max_users = user_len if user_len > max_users else max_users
            for field_key in item_row.fields:
                if field_key in max_field_prop_combos:
                    max_combos = max_field_prop_combos[field_key]
                    cur_combo_len = len(item_row.fields[field_key].field_list)
                    max_combos = cur_combo_len if cur_combo_len > max_combos else max_combos
                    max_field_prop_combos[field_key] = max_combos

        item_helpers = {
            "max_users": max_users,
            "max_groups": max_groups,
            "max_field_prop_combos": max_field_prop_combos,
        }
        item_helpers_by_type[data_type.tag] = item_helpers

    # Prepare 'types' sheet data
    types_sheet = {"headers": [], "rows": []}
    types_sheet["headers"] = [DELETE_HEADER, "table_id", 'is_global?']
    types_sheet["headers"].extend(
        ["field %d" % x for x in range(1, max_fields + 1)])
    types_sheet["headers"].extend(
        ["property %d" % x for x in range(1, max_item_attributes + 1)])
    field_prop_headers = []
    for field_num, prop_num in enumerate(field_prop_count):
        if prop_num > 0:
            for c in range(0, prop_num):
                prop_key = get_field_prop_format(field_num + 1, c + 1)
                field_prop_headers.append(prop_key)
                types_sheet["headers"].append(prop_key)

    for data_type in data_types_book:
        common_vals = ["N", data_type.tag, yesno(data_type.is_global)]
        field_vals = [
            field.field_name for field in data_type.fields
        ] + empty_padding_list(max_fields - len(data_type.fields))
        item_att_vals = data_type.item_attributes + empty_padding_list(
            max_item_attributes - len(data_type.item_attributes))
        prop_vals = []
        if type_field_properties.has_key(data_type.tag):
            props = type_field_properties.get(data_type.tag)
            prop_vals.extend(
                [props.get(key, "") for key in field_prop_headers])
        row = tuple(common_vals[2 if html_response else 0:] + field_vals +
                    item_att_vals + prop_vals)
        types_sheet["rows"].append(row)

    types_sheet["rows"] = tuple(types_sheet["rows"])
    types_sheet["headers"] = tuple(types_sheet["headers"])
    excel_sheets["types"] = types_sheet

    # Prepare 'items' sheet data for each data-type
    for data_type in data_types_book:
        item_sheet = {"headers": [], "rows": []}
        item_helpers = item_helpers_by_type[data_type.tag]
        max_users = item_helpers["max_users"]
        max_groups = item_helpers["max_groups"]
        max_field_prop_combos = item_helpers["max_field_prop_combos"]
        common_headers = ["UID", DELETE_HEADER]
        user_headers = ["user %d" % x for x in range(1, max_users + 1)]
        group_headers = ["group %d" % x for x in range(1, max_groups + 1)]
        field_headers = []
        item_att_headers = [
            "property: " + attribute for attribute in data_type.item_attributes
        ]
        for field in data_type.fields:
            if len(field.properties) == 0:
                field_headers.append("field: " + field.field_name)
            else:
                prop_headers = []
                for x in range(1, max_field_prop_combos[field.field_name] + 1):
                    for property in field.properties:
                        prop_headers.append("%(name)s: %(prop)s %(count)s" % {
                            "name": field.field_name,
                            "prop": property,
                            "count": x
                        })
                    prop_headers.append("field: %(name)s %(count)s" % {
                        "name": field.field_name,
                        "count": x
                    })
                field_headers.extend(prop_headers)
        item_sheet["headers"] = tuple(
            common_headers[2 if html_response else 0:] + field_headers +
            item_att_headers + user_headers + group_headers)
        excel_sheets[data_type.tag] = item_sheet
        for item_row in data_items_book_by_type[data_type.tag]:
            common_vals = [str(_id_from_doc(item_row)), "N"]
            user_vals = [
                user.raw_username for user in item_row.users
            ] + empty_padding_list(max_users - len(item_row.users))
            group_vals = [
                group.name for group in item_row.groups
            ] + empty_padding_list(max_groups - len(item_row.groups))
            field_vals = []
            item_att_vals = [
                item_row.item_attributes[attribute]
                for attribute in data_type.item_attributes
            ]
            for field in data_type.fields:
                if len(field.properties) == 0:
                    if any(item_row.fields.get(field.field_name).field_list):
                        value = item_row.fields.get(
                            field.field_name).field_list[0].field_value
                    else:
                        value = ""
                    field_vals.append(value)
                else:
                    field_prop_vals = []
                    cur_combo_count = len(
                        item_row.fields.get(field.field_name).field_list)
                    cur_prop_count = len(field.properties)
                    for count, field_prop_combo in enumerate(
                            item_row.fields.get(field.field_name).field_list):
                        for property in field.properties:
                            field_prop_vals.append(
                                field_prop_combo.properties.get(
                                    property, None) or "")
                        field_prop_vals.append(field_prop_combo.field_value)
                    padding_list_len = (
                        max_field_prop_combos[field.field_name] -
                        cur_combo_count) * (cur_prop_count + 1)
                    field_prop_vals.extend(
                        empty_padding_list(padding_list_len))
                    # import pdb; pdb.set_trace();
                    field_vals.extend(field_prop_vals)
            row = tuple(common_vals[2 if html_response else 0:] + field_vals +
                        item_att_vals + user_vals + group_vals)
            item_sheet["rows"].append(row)
        item_sheet["rows"] = tuple(item_sheet["rows"])
        excel_sheets[data_type.tag] = item_sheet

    if html_response:
        return excel_sheets

    header_groups = [("types", excel_sheets["types"]["headers"])]
    value_groups = [("types", excel_sheets["types"]["rows"])]
    for data_type in data_types_book:
        header_groups.append(
            (data_type.tag, excel_sheets[data_type.tag]["headers"]))
        value_groups.append(
            (data_type.tag, excel_sheets[data_type.tag]["rows"]))

    fd, path = tempfile.mkstemp()
    with os.fdopen(fd, 'w') as temp:
        export_raw(tuple(header_groups), tuple(value_groups), temp)
    format = Format.XLS_2007

    fl = open(path, 'r')
    fileref = expose_download(
        fl.read(),
        60 * 10,
        mimetype=Format.from_format(format).mimetype,
        content_disposition='attachment; filename="%s_fixtures.xlsx"' % domain,
    )
    return json_response({"download_id": fileref.download_id})
Beispiel #46
0
def download_item_lists(request, domain):
    data_types = FixtureDataType.by_domain(domain)
    data_type_schemas = []
    max_fields = 0
    max_groups = 0
    max_users = 0
    mmax_groups = 0
    mmax_users = 0
    data_tables = []
    
    def _get_empty_list(length):
        return ["" for x in range(0, length)]


    # Fills sheets' schemas and data
    for data_type in data_types:
        type_schema = [str(_id_from_doc(data_type)), "N", data_type.name, data_type.tag, yesno(data_type.is_global)]
        fields = [field for field in data_type.fields]
        type_id = data_type.get_id
        data_table_of_type = []
        for item_row in FixtureDataItem.by_data_type(domain, type_id):
            group_len = len(item_row.get_groups())
            max_groups = group_len if group_len>max_groups else max_groups
            user_len = len(item_row.get_users())
            max_users = user_len if user_len>max_users else max_users
        for item_row in FixtureDataItem.by_data_type(domain, type_id):
            groups = [group.name for group in item_row.get_groups()] + _get_empty_list(max_groups - len(item_row.get_groups()))
            users = [user.raw_username for user in item_row.get_users()] + _get_empty_list(max_users - len(item_row.get_users()))
            data_row = tuple([str(_id_from_doc(item_row)), "N"] +
                             [item_row.fields.get(field, None) or "" for field in fields] +
                             groups + users)
            data_table_of_type.append(data_row)
        type_schema.extend(fields)
        data_type_schemas.append(tuple(type_schema))
        if max_fields<len(type_schema):
            max_fields = len(type_schema)
        data_tables.append((data_type.tag,tuple(data_table_of_type)))
        mmax_users = max_users if max_users>mmax_users else mmax_users
        mmax_groups = max_groups if max_groups>mmax_groups else mmax_groups
        max_users = 0
        max_groups = 0

    type_headers = ["UID", DELETE_HEADER, "name", "tag", 'is_global?'] + ["field %d" % x for x in range(1, max_fields - 4)]
    type_headers = ("types", tuple(type_headers))
    table_headers = [type_headers]    
    for type_schema in data_type_schemas:
        item_header = (type_schema[3], tuple(["UID", DELETE_HEADER] +
                                             ["field: " + x for x in type_schema[5:]] +
                                             ["group %d" % x for x in range(1, mmax_groups + 1)] +
                                             ["user %d" % x for x in range(1, mmax_users + 1)]))
        table_headers.append(item_header)

    table_headers = tuple(table_headers)
    type_rows = ("types", tuple(data_type_schemas))
    data_tables = tuple([type_rows]+data_tables)

    """
    Example of sheets preperation:
    
    headers:
     (("employee", ("id", "name", "gender")),
      ("building", ("id", "name", "address")))
    
    data:
     (("employee", (("1", "cory", "m"),
                    ("2", "christian", "m"),
                    ("3", "amelia", "f"))),
      ("building", (("1", "dimagi", "585 mass ave."),
                    ("2", "old dimagi", "529 main st."))))
    """
    
    fd, path = tempfile.mkstemp()
    with os.fdopen(fd, 'w') as temp:
        export_raw((table_headers), (data_tables), temp)
    format = Format.XLS_2007
    return export_response(open(path), format, "%s_fixtures" % domain)
Beispiel #47
0
 def getFacilities(cls, domain=None):
     domain = domain or cls.domain
     data_type = FixtureDataType.by_domain_tag(domain, 'site').first()
     data_items = FixtureDataItem.by_data_type(domain, data_type.get_id)
     return [dict(text=item.fields_without_attributes.get("site_name"), val=item.fields_without_attributes.get("site_id")) for item in data_items]