def test_clean_keys(self): LabelValueStore.objects(silo_id=self.silo.id).delete() lvs = LabelValueStore() orig_data = { 'Header 1': 'r1c1', 'create_date': 'r1c3', 'edit_date': 'r1c2', '_id': 'r1c4' } for k, v in orig_data.iteritems(): key = cleanKey(k) val = smart_str(v, strings_only=True) key = smart_str(key) val = val.strip() setattr(lvs, key, val) lvs.silo_id = self.silo.id lvs.save() returned_data = json.loads(LabelValueStore.objects( silo_id=self.silo.id).to_json())[0] returned_data.pop('_id') expected_data = { 'Header 1': 'r1c1', 'created_date': 'r1c3', 'editted_date': 'r1c2', 'user_assigned_id': 'r1c4', 'read_id': -1, 'silo_id': self.silo.id } self.assertEqual(returned_data, expected_data) LabelValueStore.objects(silo_id=self.silo.id).delete()
def silo_data_api(request, id): if id <= 0: return HttpResponseBadRequest("The silo_id = %s is invalid" % id) data = LabelValueStore.objects(silo_id=id).to_json() json_data = json.loads(data) return JsonResponse(json_data, safe=False)
def getSiloColumnNames(id): lvs = LabelValueStore.objects(silo_id=id).to_json() data = {} jsonlvs = json.loads(lvs) for item in jsonlvs: for k, v in item.iteritems(): #print("The key and value are ({}) = ({})".format(k, v)) if k == "_id" or k == "edit_date" or k == "create_date" or k == "silo_id": continue else: data[k] = v return data
def combineColumns(silo_id): client = MongoClient(settings.MONGODB_HOST) db = client.tola lvs = json.loads(LabelValueStore.objects(silo_id=silo_id).to_json()) cols = [] for l in lvs: cols.extend([k for k in l.keys() if k not in cols]) for l in lvs: for c in cols: if c not in l.keys(): db.label_value_store.update_one({"_id": ObjectId(l["_id"]["$oid"])}, {"$set": {c: ""}}, False) return True
def combineColumns(silo_id): client = MongoClient(settings.MONGODB_HOST) db = client.tola lvs = json.loads(LabelValueStore.objects(silo_id=silo_id).to_json()) cols = [] for l in lvs: cols.extend([k for k in l.keys() if k not in cols]) for l in lvs: for c in cols: if c not in l.keys(): db.label_value_store.update_one( {"_id": ObjectId(l['_id']['$oid'])}, {"$set": { c: '' }}, False) return True
def table_dashboard(request, id=0): """ Dynamic Dashboard report based on Table Data find lat and long fields :return: """ # get all countires countries = Country.objects.all() get_table = Silo.objects.get(id=id) try: get_init_fields = UniqueFields.objects.get(silo__id=id) except UniqueFields.DoesNotExist: get_init_fields = None doc = LabelValueStore.objects(silo_id=id).to_json() try: data = ast.literal_eval(doc) except ValueError: data = json.loads(doc) latitude = [] longitude = [] lat_long = {} country = {} # each field needs a count of unique answers if get_init_fields is None and data: get_fields = {} # loop over the field names only for field in data[0]: # to-do move these into models exclude_string = [ 'read_id', 'silo_id', '_id', 'formhub/uuid', 'meta/instanceID', 'user_assigned_id', 'meta/instanceName', 'create_date' ] map_lat_string = ['lat', 'latitude', 'x'] map_long_string = ['long', 'lng', 'longitude', 'y'] map_country_string = ['countries', 'country'] map_location = ['location', 'coordinated', 'coord'] if field not in exclude_string: # create a dict with fields as the key get_fields[field] = {} cnt = Counter() answers = [] # a list for the answers for idx, col in enumerate(data): # get_fields[field][idx] = col[field] # append list of all answers try: # append list of answers answers.append(col[field]) except KeyError: answers.append(None) # no answer # loop and count each unique answer """ TODO: Needs to be moved to a recursive function that checks each level for list or dict and continues to parse until a count can be found """ for a in answers: # if answer has a dict or list count each element if isinstance(a, dict): for x in a.keys(): cnt[x] += 1 elif isinstance(a, list): # if a list within a list for x in a: if isinstance(x, dict): for y in x.keys(): cnt[y] += 1 else: cnt[x] += 1 else: cnt[a] += 1 unique_count = cnt # append unique answer plus count to dict get_fields[field][idx] = unique_count.most_common() temp = [] temp2 = [] for letter, count in get_fields[field][idx]: if isinstance(letter, unicode): temp.append( SafeString( unicodedata.normalize('NFKD', letter).encode( 'ascii', 'ignore'))) else: temp.append(letter) temp2.append(count) try: find_none = temp.index(None) temp[find_none] = 'None' except ValueError: temp = temp get_fields[field][idx] = { "label": SafeString(temp), "count": SafeString(temp2) } # if a latitude string add it to the map list if field in map_lat_string: for idx, col in enumerate(data): latitude.append(col[field]) # if a longitude string add it to the map list if field in map_long_string: for idx, col in enumerate(data): longitude.append(col[field]) # if a longitude string add it to the map list if field in map_location: for idx, col in enumerate(data): latitude.append( itertools.islice(col[field].iteritems(), 3, 4)) longitude.append( itertools.islice(col[field].iteritems(), 4, 5)) # if a country name if field in map_country_string: for idx, col in enumerate(data): country_obj = Country.objects.get(country=col[field]) longitude.append(country_obj.longitude) latitude.append(country_obj.latitude) country.append(country_obj.country) # merge lat and long lat_long = dict(zip(latitude, longitude)) else: get_fields = None try: columns = ast.literal_eval(get_table.columns) except ValueError: columns = json.loads(get_table.columns) return render( request, "reports/table_dashboard.html", { 'data': data, 'get_table': get_table, 'countries': countries, 'get_fields': get_fields, 'lat_long': lat_long, 'country': country, 'columns': columns })
def tableDashboard(request,id=0): """ DEMO only survey for Tola survey for use with public talks about TolaData Share URL to survey and data will be aggregated in tolatables then imported to this dashboard :return: """ # get all countires countries = Country.objects.all() get_table = Silo.objects.get(id=id) try: get_fields = UniqueFields.objects.get(silo__id=id) except UniqueFields.DoesNotExist: get_fields = None doc = LabelValueStore.objects(silo_id=id).to_json() data = ast.literal_eval(doc) from collections import Counter latitude = [] longitude = [] lat_long = {} country = {} # each field needs a count of unique answers if get_fields is None and data: get_fields = {} # loop over the field names only for field in data[0]: # to-do move these into models exclude_string = ['read_id','silo_id','_id','formhub/uuid','meta/instanceID','user_assigned_id','meta/instanceName','create_date'] map_lat_string = ['lat', 'latitude', 'x'] map_long_string = ['long','lng','longitude', 'y'] map_country_string = ['countries','country'] if field not in exclude_string: get_fields[field] = {} # create a dict with fields as the key cnt = Counter() answers = [] # a list for the answers for idx, col in enumerate(data): # get_fields[field][idx] = col[field] # append list of all answers try: answers.append(col[field]) # append list of answers except KeyError: answers.append(None) # no answer # loop and count each unique answer for a in answers: # if answer has a dict in it loop over that if isinstance(a, dict): for x in a: cnt[x] +=1 else: cnt[a] += 1 unique_count = cnt # append unique answer plus count to dict get_fields[field][idx] = unique_count.most_common() from django.utils.safestring import SafeString temp = [] temp2 = [] for letter, count in get_fields[field][idx]: temp.append(str(letter)) temp2.append(str(count)) get_fields[field][idx] = {"label": SafeString(temp), "count": SafeString(temp2)} # if a latitude string add it to the map list if field in map_lat_string: for idx, col in enumerate(data): latitude.append(col[field]) # if a longitude string add it to the map list if field in map_long_string: for idx, col in enumerate(data): longitude.append(col[field]) # merge lat and long lat_long = dict(zip(latitude,longitude)) else: get_fields = None columns = ast.literal_eval(get_table.columns) return render(request, "reports/table_dashboard.html", {'data': data, 'get_table': get_table, 'countries': countries, 'get_fields': get_fields, 'lat_long': lat_long, 'columns': columns})