def memory(request, memory_id): outDict = {} # Get the AG connection ag = manage_sessions.check_ag_conn() # Create a URI from the memory ID outDict['memory'] = view_helpers.fetch_memory_description(ag, memory_id) return HttpResponse(json.dumps(outDict), content_type='application/json')
def accept(request): # Get the mcpid if not request.user.is_anonymous(): mcpid = request.user.userprofile.mcpid else: mcpid = 'anonymous' # Get our graph_db connection ag = manage_sessions.check_ag_conn() # Now we need to add the memory report to AllegroGraph # A bit of an issue is that the model in AllegroGraph differs from the model in the client. In reality, we should be serving all of the views out of Django templates so that we could more easily ensure uniformity in naming schemes when the views are created and thereby not have to create the translator here or in other places. The other place to have the translator is in the AG.Memory object. One could also just dump stuff into the repository and then add equivalences, but that seems a bit messy to me. # initialize a memory report instance memoryReport = ag_models.Memory() # Create the personURI personURI = ag.conn.createURI(ag.contextStrs['person'] + mcpid) # Add information about the stimulus memoryReport.addMeta(stimulus_id = request.POST.get('songid')) memoryReport.addMeta(personURI = personURI) # List of variables to digest digestVars = ['personType','memoryYear','eventType','placeType', 'emotionType', 'memory', 'sharing[sharedGlobally]'] # pdb.set_trace() for var in digestVars: vardata = request.POST.get(var) if vardata and var in ['personType', 'eventType','placeType','emotionType']: # capitalize first letter of the variable so we can find it in the ontology ontoVar = var.replace(var[0],var[0].upper(),1) propertyVar = "has"+ontoVar # Add the details to the Memory instance memoryReport.addTypeDetail(**{propertyVar: vardata}) if vardata and var in ['memory']: memoryReport.addDetail(generalDescription = vardata) if vardata and var == 'memoryYear': memoryReport.addTypeDetail(hasYear = vardata) if vardata and var == 'sharing[sharedGlobally]': memoryReport.addTypeDetail(sharedGlobally = vardata) # Now try submitting the memory status = memoryReport.submit(ag) #pdb.set_trace() return HttpResponse(status, content_type='text/html')
def category(request): # Fetches category info for memory reporting outDict = {} # Get the AG connection ag = manage_sessions.check_ag_conn() for category in ['PersonType','EventType','PlaceType','EmotionType']: #pdb.set_trace() keys = ag.onto[category].keys() # replace underscores tmp = [(re.sub('_',' ',k)) for k in keys] # replace leading whitespace outDict[category] = [re.sub('^\s*','',k) for k in tmp] return HttpResponse(json.dumps(outDict), content_type='application/json')
def song(request, song_id='511'): max_links = 100 subgraph = {"keys":[], "values":[]} outDict = {} params = {} sorted_graph = {} use_random = False createSongSongLinks = True # 'jp_ego_group_and_centrality' # 'ego_group_w_attribs' defaultMethod = 'ego_group_w_attribs' songmeminfo = {} ## The basic organization for this function should be: # # 1. Parse the GET request parameters # 2. Get a graph of song nodes # 3. Calculate the starting positions of song nodes # 4. Fetch memories associated with some set of the song nodes in the graph # 5. Create the structure expected by the force-directed layout of d3 consisting of arrays of nodes and links # Get our graph_db connection graph_db = manage_sessions.check_ag_conn() # Get the GET request - this gives us access to a QueryDict # if isinstance(request,dict): # queryParams = request inputParams = {} if request.method == 'GET': inputParams = dict(request.GET) # Set defaults if the values were not passed in # Get the starting node # if queryParams.get('startNode'): # inputParams['startNode'] = queryParams.get('startNode') # else: # # In order to handle the startup condition of a user accessing meamcentral, the start node selection should be based on the birth year that the user is requested to provide when first entering MEAM Central # inputParams['startNode'] = 511 inputParams['startNode'] = song_id if 'graphConstructionMethod' not in inputParams.keys(): inputParams['graphConstructionMethod'] = defaultMethod elif isinstance(inputParams['graphConstructionMethod'],list): inputParams['graphConstructionMethod'] = inputParams['graphConstructionMethod'][0] if 'userid' not in inputParams.keys(): inputParams['userid'] = 'anonymous'; elif isinstance(inputParams['userid'],list): inputParams['userid'] = inputParams['userid'][0] if 'memNodeDistFromSongNode' not in inputParams.keys(): memNodeDistFromSongNode = 25 if 'year' in inputParams.keys() and isinstance(inputParams['year'],list): inputParams['year'] = inputParams['year'][0] # Get a graph structure for songs, based on some criteria # We are trying out various methods, each associated with a name and a particular function method = inputParams['graphConstructionMethod'] if method in ['userSongs','ego_group_w_year_seed']: # Check to see if the requested userid matches the username useridMatchesSessionID = False if inputParams['userid'] == request.user.username: useridMatchesSessionID = True # If the username is not anonymous and the userid matches the session ID, get the mcpid from the user profile if not request.user.is_anonymous() and useridMatchesSessionID: inputParams['mcpid'] = request.user.userprofile.mcpid else: inputParams['mcpid'] = manage_sessions.lookup_mcpid(inputParams['userid']) if method == 'userSongs': graphData = song_structure.songs_for_userid(graph_db,inputParams) elif method == 'ego_group_w_year_seed': graphData = song_structure.ego_group_w_year_seed(graph_db, inputParams) if not graphData: #pdb.set_trace() return HttpResponse(json.dumps({}), content_type='application/json') elif method == 'ego_group_w_attribs': graphData = song_structure.ego_group_w_attribs(graph_db,inputParams) elif method == 'jp_ego_group_and_centrality': graph_centrality = song_structure.sna_ego_group_and_centrality(graph_db) songIDs = graph_centrality.keys() elif method == 'original': params['readfromfile'] = True params['writetofile'] = True params['fname'] = './song_graph.txt' graph = song_structure.song_graph_by_common_song_set(graph_db, params) # Select a subset of those songs if use_random: subgraph["keys"] = random.sample(graph,max_links) # this returns only the song id pairs subgraph["values"] = [graph[a] for a in subgraph["keys"]] # Extract the list of unique songIDs in this set songIDs = list(set(itertools.chain.from_iterable(subgraph["keys"]))) else: # Sort the list by the number of times a pair of songs was encountered tmp = sorted(graph.iteritems(),key=itemgetter(1),reverse=True) for pair in tmp[0:max_links]: subgraph["keys"].append(pair[0]) subgraph["values"].append(pair[1]) songIDs = list(set(itertools.chain.from_iterable(subgraph["keys"]))) # Get the positioning information of the songs # We really shouldn't have to fetch the memory information associated with songs for this particular view if method in ['jp_ego_group_and_centrality','original']: songmeminfo = view_helpers.fetch_all_song_memories(graph_db, songIDs) # PJ - 15Aug2015 - I think we should be passing subgraph in rather than tmp, as this contains the information necessary to construct the feature matrix if method == 'jp_ego_group_and_centrality': songnodeLoc = getLocation.getSongNode_Location(songmeminfo, xmethod='year',ymethod='centrality',centrality_dict=graph_centrality) else: songnodeLoc = getLocation.getSongNode_Location(songmeminfo, xmethod='svd',ymethod='svd',cooccurence_dict=tmp) elif method in ['ego_group_w_attribs', 'ego_group_w_year_seed']: # only worry about unique songs for positioning useFields = ['song','year','centrality'] songinfoMtx = np.unique(graphData['songinfo']) songnodeLoc = getLocation.getSongNode_Location(songinfoMtx[useFields], xmethod='year', ymethod='centrality', viewParams = {'startNode': inputParams['startNode'], 'height':500, 'width':750}) songmeminfo = graphData['songmeminfo'] #Tracer()() elif method == 'userSongs': #pdb.set_trace() songinfoMtx = np.unique(graphData['songinfo']) songmeminfo = graphData['songmeminfo'] useFields = ['song','year'] songnodeLoc = getLocation.getSongNode_Location(songinfoMtx[useFields], xmethod='year', ymethod='random', viewParams = {'height':500, 'width':750}) # Do the heavy lifting for the d3 layout, creating the node and link objects that we need. We have two types of nodes: song nodes and memory nodes. Memory nodes only link to song nodes. Song nodes are linked both to memory nodes and other song nodes. nodes = [] links = [] outDict["nodes"] = nodes outDict["links"] = links # Loop over the meminfo entries, adding nodes both for songs and memories, and links between songs and memories if method in ['jp_ego_group_and_centrality','original']: songmemkeys = songmeminfo.keys() songID2Idx = {} for idx,currEntry in enumerate(sorted(songmemkeys)): currSongInfo = songmeminfo[currEntry]["songinfo"] # Get the location for this particular song if not usePreviewURL: currSongLocation = Stimulus.objects.filter(stimulus_id = currSongInfo["id"])[0].location # Strip off everything but the filename sep = '/' # sname = currSongLocation.rsplit(sep).pop() sname = currSongLocation currSongInfo["location"] = baseMusicLocation + sep + sname else: currSongInfo["location"] = currSongInfo["songURL"] songDict = {"songNodeID": currEntry, "class": "song", "songmeta": currSongInfo} # Get the locations for the display in d3 if not method == 'jp_ego_group_and_centrality': songDict.update({"x": songnodeLoc[idx,0], "y": songnodeLoc[idx,1]}) else: songDict.update({"x": songnodeLoc[currEntry]['x'], "y":songnodeLoc[currEntry]['y']}) nodes.append(songDict) songIdx = len(nodes)-1 songID2Idx[currEntry] = songIdx nodes[songIdx]["children"] = [] for memory in songmeminfo[currEntry]["memories"]: nodeDict = memory nodeDict["class"] = "memory" nodes.append(nodeDict) memIdx = len(nodes)-1 nodes[songIdx]["children"].append(memIdx) linkDict = {"source": songIdx, "target": memIdx} linkDict["class"] = "songmemory" links.append(linkDict) # Loop over the subgraph to create links between song nodes if createSongSongLinks: for songPair in subgraph["keys"]: linkDict = {"source": songID2Idx[songPair[0]], "target": songID2Idx[songPair[1]], "strength": subgraph["values"][subgraph["keys"].index(songPair)]} linkDict["class"] = "songsong" links.append(linkDict) elif method in ['ego_group_w_attribs','userSongs','ego_group_w_year_seed']: # Convert the array of song information into dicts that we can feed into songmeta fields songinfoVars = ['title','album','artist','year','previewURL'] names = songinfoMtx[songinfoVars].dtype.names # The map() function converts the years to strings songmetaMtx = [dict(zip(names, map(str,record))) for record in songinfoMtx[songinfoVars]] # Create the song nodes songID2Idx = {} sidx = 0 songmeminfoKeys = songmeminfo.keys() for currSongID in songinfoMtx['song']: # Create our dictionary to be associated with this song node songDict = {"songNodeID": str(currSongID), "class": "song", "songmeta": songmetaMtx[sidx]} # Update the dictionary with the position info songDict.update({"x": float(songnodeLoc[sidx]['x']), "px": float(songnodeLoc[sidx]['x']), "y":float(songnodeLoc[sidx]['y']), "py":float(songnodeLoc[sidx]['y'])}) # Get the source of the audio file if not usePreviewURL: #pdb.set_trace() currSongLocation = Stimulus.objects.filter(stimulus_id = currSongID)[0].location # Strip off everything but the filename sep = '/' # sname = currSongLocation.rsplit(sep).pop() sname = currSongLocation songDict["songmeta"]["location"] = baseMusicLocation + sep + sname else: songDict["songmeta"]["location"] = str(songinfoMtx[sidx]["previewURL"]).encode('utf-8') # Append the dictionary to the list of nodes nodes.append(songDict) songIdx = len(nodes)-1 # If we have them, handle the memories associated with this song nodes[songIdx]["children"] = [] if songmeminfo: if str(currSongID) in songmeminfoKeys: currMemList = songmeminfo[str(currSongID)] else: currMemList = [] #pdb.set_trace() # If we are dealing with the startNode, fetch the associated memory descriptions if currMemList and currSongID == inputParams['startNode']: currMemList = view_helpers.fetch_memories_for_song(graph_db, currSongID) numMem = len(currMemList) if numMem: radPerMem = 2*math.pi/numMem # number of radians per memory for positioning else: radPerMem = 0 currTheta = 0 for memory in currMemList: nodeDict = memory nodeDict["class"] = "memory" # Figure out positions for the memory nodes # Have to cast the numbers as regular floats so that the json encoder doesn't choke nodeDict["x"] = float(songnodeLoc[sidx]['x'] + memNodeDistFromSongNode * np.cos(currTheta)) nodeDict["px"] = nodeDict["x"] nodeDict["y"] = float(songnodeLoc[sidx]['y'] + memNodeDistFromSongNode * np.sin(currTheta)) nodeDict["py"] = nodeDict["y"] currTheta += radPerMem # Add the node to the list and to the list of the song node's children nodes.append(nodeDict) memIdx = len(nodes)-1 nodes[songIdx]["children"].append(memIdx) # Create the link between the song node and memory node linkDict = {"source": songIdx, "target": memIdx} linkDict["class"] = "songmemory" links.append(linkDict) sidx += 1 # Increment our song counter # Return information to server #Tracer()() #pdb.set_trace() return HttpResponse(json.dumps(outDict), content_type='application/json')