def get_context_data(self, **kwargs): """Extension of the get_context_data method of the Templateview. Used to add on the cube names to the context dictionary """ context = super(CubeView, self).get_context_data(**kwargs) context['nav_button'] = 0 if self.test: context['test'] = True else: context['test'] = False cube_dict = OrderedDict() # create an empty dictionary to hold the cube names for cube in dwmodel.cubes: print str(cube) cube_dict[str(cube)] = str(dwmodel.cube(cube).label) # add the cubes to the dictionary context['cube_dict'] = OrderedDict(sorted(cube_dict.items(), key=lambda x: x[1], reverse=False)) # add the cube list to the context return context # return the context
def test_cubes(self): """ Testing that each cube can be instantiated Testing that facts in each cube can be called This will insure that you can aggregate and get raw data from each of the cubes in the model used by the datawarehouse. This should work when you change schema and add when you add cubes. """ filename = 'datawarehouse/fixtures/datawarehouse.json' workspace = cubes.create_workspace('sql',dwmodel,engine=engine,fact_prefix=fact_prefix,dimension_prefix=dimension_prefix) for c_key,c_name in dwmodel.cubes.iteritems(): cube = dwmodel.cube(c_name) self.assertIsInstance(cube,cubes.model.Cube) try: # If mapping is explicitly defined, grab the explicit mapping for key cube.key = cube.mappings['id'] except (TypeError,KeyError): # Otherwise either no mappings are listed or a mapping for 'id' does not exist pass try: browser = workspace.browser(cube) except Exception as error: print "Cube: " + c_key + " Failed" print error assert False self.assertIsInstance(browser,cubes.backends.sql.star.SnowflakeBrowser) cell = cubes.Cell(cube) try: facts = browser.facts(cell) except Exception as error: print "Cube: " + c_key + " Failed" print error assert False assert True print "Cube: " + c_key + " Passed"
def get_context_data(self, **kwargs): """Extension of the get_context_data method of the TemplateView This is used to query the database and setup the return_list variable found inherited from the JSONMixin class. This allows the queries and meta dtaa to be returned to the calling browser via an ajax call. """ context = super(SliceBrowser, self).get_context_data(**kwargs) self.return_list.clear() cube_name=self.request.REQUEST['cube'] mode=self.request.REQUEST['mode'] cube=dwmodel.cube(dwmodel.cubes[cube_name]) if mode == 'cube': dim_dict = OrderedDict() range_dict = OrderedDict() level_dict = OrderedDict() for dim in cube.dimensions: if dim.name == 'user': continue dim_dict[dim.name] = dim.label if cube.dimension(dim).info is not None: range_dict[dim.name] = "true" level_dict[dim.name] = '|'.join(dim.level_names) meas_type_dict = OrderedDict() meas_dict = OrderedDict() for meas in cube.measures: meas_dict[meas.name] = meas.name meas_type_dict[meas.name] = '|'.join(meas.aggregations) print range_dict self.return_list["levels"] = level_dict self.return_list["measure_types"] = meas_type_dict self.return_list["measures"] = meas_dict self.return_list["dimensions"] = dim_dict self.return_list["ranges"] = range_dict elif mode == 'dimension': dimension=self.request.REQUEST['dimension'] level=cube.dimension(dimension).levels[0] self.return_list['meta']=level.attributes[0].name self.return_list['label']=level.label self.return_list['max_depth']=len(cube.dimension(dimension).levels) choice_list = self.get_level_choices(level,dimension) level_list = OrderedDict() for choice in choice_list: level_list[choice] = choice self.return_list["options"] = level_list else: dimension=self.request.REQUEST['dimension'] parent_value = self.request.REQUEST['value'] mode = self.request.REQUEST['mode'] level_ndx = cube.dimension(dimension).level_names.index(mode) parent_name = cube.dimension(dimension).levels[level_ndx].attributes[0].name level=cube.dimension(dimension).levels[level_ndx+1] self.return_list['meta']=level.attributes[0].name self.return_list['label']=level.label self.return_list['depth']=level_ndx+1 self.return_list['max_depth']=len(cube.dimension(dimension).levels) choice_list = self.get_level_choices(level,dimension,parent_name=parent_name,parent_value=parent_value) choice_list.sort() level_list = OrderedDict() for choice in choice_list: level_list[choice]=choice self.return_list["options"] = level_list
def get_context_data(self, **kwargs): """This method is responsible for creating and returning the context data. """ context = super(CubeResultsView, self).get_context_data(**kwargs) # call the super # initialize variables mode = None # mode can be raw or aggregated aggregations = None # a list of the aggregations requested agg_paths = [] # the aggregation paths requested agg_dict = {} # holds the aggregations and their respective paths num_slices = None # the number of slices performed cube_name = None # the requested cube name cut_list=list() # a list of the cuts to be performed on the cube hashDict = {} # a dictionary containing slices by dimension hashList = [] # a list of just the slices, without dimension information hashProduct = [] # a cartesian product of slices self.error.clear() # make sure self.error is intially empty # fetch data from GET if 'num_slices' in self.request.GET: num_slices = self.request.REQUEST['num_slices'] if 'cube' in self.request.GET: cube_name = self.request.REQUEST['cube'] if 'drilldim' in self.request.GET: aggregations = self.request.REQUEST['drilldim'].split("|") if 'drillpath' in self.request.GET: agg_paths = self.request.REQUEST['drillpath'].split("|") if 'mode' in self.request.GET: mode = self.request.REQUEST['mode'] # check for errors in the GET data if aggregations == None or aggregations == "": self.error['aggregations_error'] = "You must select an aggregation dimension." if num_slices == "" or num_slices == None: self.error['slices_error'] = "You must provide the number of slices" if cube_name == None or cube_name == "": self.error['cube_error'] = "You must provide the cube name" # couple the aggregations with their paths pathLen = len(agg_paths) for i in range(0, len(aggregations)): if i < pathLen: agg_dict[aggregations[i]] = agg_paths[i] else: agg_dict[aggregations[i]] = "" # For each slice, get the path and dimension and create a cut for x in range(0,int(num_slices)): # initialize the variables. from_path and to_path allow range slicing. dimension = None from_path = None to_path = None path = None #get the path and dimension if 'dimension_'+str(x) in self.request.GET: dimension=(self.request.REQUEST['dimension_'+str(x)]) # get the requested dimension if 'path_'+str(x) in self.request.GET: path=(self.request.REQUEST['path_'+str(x)].split('-')) # get the requested path # check for errors if dimension == None or dimension == "": self.error['dimension'+str(x)+'_error'] = "You must provide a dimension for each slice." if path == None or path == "": self.error['path'+str(x)+'_error'] = "You must provide a path for each slice." # if a dimension and corresponding path were found, create the cut if 'path'+str(x)+'_error' not in self.error and 'dimension'+str(x)+'_error' not in self.error: if len(path) > 1: # if the path has multiple paths, perform a range cut from_path = path[0].split("|") to_path = path[1].split("|") cut = cubes.RangeCut(dimension,from_path,to_path) # define the cut else: cut = cubes.PointCut(dimension,path[0].split("|")) # define the cut # add the cut to the appropriate list if dimension in hashDict: hashDict[dimension].append(cut) else: hashDict[dimension] = [cut] # if 'REMOTE_USER' in self.request.META and self.request.META['REMOTE_USER'] != "": # user = self.request.META['REMOTE_USER'] # else: # user = None # # if user: # cut = cubes.SetCut('testuser',['public',user]) # hashDict['user'] = [cut] # else: # cut = cubes.PointCut('testuser', ['public']) # hashDict['user'] = [cut] for k, v in hashDict.iteritems(): # store cuts by dimension hashList.append(v) for item in itertools.product(*hashList): # compute a cartesian product on the cuts and convert the hashProduct.append((list(item))) # tuples to lists. Each list in hashProduct is a cut list if self.error == {}: # if there are no errors, perform the aggregations # initialize variables results = {} # this dictionary gets serialized into the returned JSON object agg_result = None # this holds the aggregated cubes data counter = 0 # a counter variable cube = dwmodel.cube(dwmodel.cubes[cube_name]) # Create the cube # cube.key = cube.mappings['id'] # enforce cube mappings conn = engine.connect() # Connect to the sqlalchemy engine workspace = cubes.create_workspace("sql",dwmodel,engine=engine, dimension_prefix=dimension_prefix, fact_prefix=fact_prefix) # get a workspace browser = workspace.browser(cube) # get a browser cell = cubes.Cell(cube) # create the cell results['slices'] = [] # create a holder for the slices # for each requested aggregation, we must perform all cuts in the cut list and generate # an object containing aggregated data for the cut if mode == 'raw': result = {} for cutlist in hashProduct: sliceString = "" # create a string containing information about the cuts # this string will be added to measure names # loop over the cuts and create the sliceString and the level for cut in cutlist: sliceString += "_" if hasattr(cut, 'path'): sliceString += "|".join(cut.path) elif hasattr(cut, 'paths'): sliceString += "|".join(cut.paths) else: sliceString += "|".join(cut.from_path) sliceString += "-" sliceString += "|".join(cut.to_path) results['slices'].append(sliceString) # add the slicestring to the results object new_cell = cell.multi_slice(cutlist) # Slice up the cube using all of the cuts raw_result = browser.facts(new_cell) # and get the data counter = 0 # reinitialize the counter # Parse the cubes data into columns which are added to a dictionary for row in raw_result: if counter == 0: # on the first pass, create the column lists for k, v in row.iteritems(): result[k+sliceString] = [] counter = counter + 1 # and increment the counter for k, v in row.iteritems(): # add the key to the appropriate column list result[k+sliceString].append(v) results['raw'] = result # append this object to the dict of objects self.return_list = results # set the return_list, used by JSONMixin else: for a, p in agg_dict.iteritems(): result = {} # create an object for cutlist in hashProduct: level = None sliceString = "" # create a string containing information about the cuts # this string will be added to measure names # loop over the cuts and create the sliceString and the level for cut in cutlist: sliceString += "_" if hasattr(cut, 'path'): sliceString += "|".join(cut.path) else: sliceString += "|".join(cut.from_path) sliceString += "-" sliceString += "|".join(cut.to_path) if a == cut.dimension and p == "": # if the aggregation variable was sliced on and no path if hasattr(cut, 'path'): # was specified, use the slice path to get the appropriate level level = getNextLevel(cube, a, cut.path) else: level = getCurrentLevel(cube, a, cut.from_path) # path isn't a key for range cuts, so use from instead # and get the current level instead of the next level sliceString += "_" + str(a) if level == "level not found": # if there was an error getting the level, then the self.error['level'] = level # aggregation will not work, so return an error self.return_list = self.error return context results['slices'].append(sliceString) # add the slicestring to the results object if level == None and p == "": level = getTopLevel(cube, a) # otherwise get the first level of the aggregation variable new_cell = cell.multi_slice(cutlist) # Slice up the cube using all of the cuts if p == "": agg_result = browser.aggregate(new_cell, drilldown=[a]) # and aggregate the data else: level = p agg_result = browser.aggregate(new_cell, drilldown=[(a,'default',p)]) # add information about the x-axis to the object if str(a) == level: result['level'] = level else: result['level'] = str(a + "." + level) # get the levels. results are sorted by level up to the requested level depth arguments = [] # create an empty list for the levels for x in cube.dimension(a).levels: # loop over the levels for this cube arguments.append(x.attributes[0].ref()) # append the level to the arguments list if x.name == level: # if we've reached the requested level, then break break counter = 0 newlist = [] # create an empty list for row in agg_result: # create a list copy of the cubes result object newlist.append(row) newlist = sortDictByMultipleKeys(newlist, arguments) # sort the result object by the levels # Parse the cubes data into columns which are added to a dictionary for row in newlist: if counter == 0: # on the first pass, create the column lists for k, v in row.iteritems(): result[k+sliceString] = [] counter = counter + 1 # and increment the counter for k, v in row.iteritems(): # add the values to the appropriate column list result[k+sliceString].append(v) results[a] = result # append this object to the dict of objects self.return_list = results # set the return_list, used by JSONMixin else: self.return_list = self.error # if errors were present, return them conn.close()