def create_browser(): #workspace = Workspace(config="slicer.ini") print("Creating Workspace and model") workspace = Workspace() workspace.register_default_store("sql", url="sqlite:///data.sqlite") workspace.import_model("movie_ratings_model.json") browser = workspace.browser("ratings") return browser
def __init__(self): print("Creating Workspace and model") workspace = Workspace() workspace.register_default_store("sql", url="sqlite:///data.sqlite") workspace.import_model("movie_ratings_model.json") browser = workspace.browser("ratings") self.browser = browser
def get_cubes_workspace(self): workspace = Workspace() workspace.register_default_store( "sql", url=Connector.get_database_url(), schema=settings.NIAMOTO_FACT_TABLES_SCHEMA, dimension_schema=settings.NIAMOTO_DIMENSIONS_SCHEMA, ) workspace.import_model(self.generate_cubes_model()) return workspace
def analiza_temperatura(request): if request.GET.get("czy_analiza", None): print("Super dokonaj analizy!") # Stwórz Workspace z pliku konfiguracyjnego: workspace = Workspace() workspace.register_default_store("sql", url="sqlite:///db.sqlite3") # Ładuj model: workspace.import_model("model.json") # Twórz obiekt browser: browser = workspace.browser("analiza_temperatura") # Twórz wyniki agregacji: if request.GET.get("wiek_pacjenta", None): res = browser.aggregate(drilldown=["wiek_pacjenta"]) po_czym = "wiek_pacjenta" elif request.GET.get("data_pomiaru", None): res = browser.aggregate(drilldown=["data_pomiaru"]) po_czym = "data_pomiaru" elif request.GET.get("kontynent", None): res = browser.aggregate(drilldown=["kontynent"]) po_czym = "kontynent" elif request.GET.get("kraj", None): res = browser.aggregate(drilldown=["kraj"]) po_czym = "kraj" elif request.GET.get("obszar", None): res = browser.aggregate(drilldown=["obszar"]) po_czym = "obszar" # Wyświetl podsumowanie całkowite i dla grupy: lista = [] print(res.summary) for r in res: print(type(r)) print(r) lista.append(r) # Twórz kontekst: print(type(res)) cont = {"agre_list": lista, "czy_analiza": True, "po_czym": po_czym} else: cont = {} print("Lipa") return render(request, "aplikacja/analiza/temperatura.html", cont)
def setUp(self): super(SlicerModelTestCase, self).setUp() ws = Workspace() ws.register_default_store("sql", url=TEST_DB_URL) self.ws = ws self.slicer.cubes_workspace = ws # Satisfy browser with empty tables # TODO: replace this once we have data store = ws.get_store("default") table = Table("sales", store.metadata) table.append_column(Column("id", Integer)) table.create() ws.import_model(self.model_path("model.json")) ws.import_model(self.model_path("sales_no_date.json"))
def create_workspace(self, store=None, model=None): """Create shared workspace. Add default store specified in `store` as a dictionary and `model` which can be a filename relative to ``tests/models`` or a moel dictionary. If no store is provided but class has an engine or `sql_engine` set, then the existing engine will be used as the default SQL store.""" workspace = Workspace() if store: store = dict(store) store_type = store.pop("type", "sql") workspace.register_default_store(store_type, **store) elif self.engine: workspace.register_default_store("sql", engine=self.engine) if model: if isinstance(model, compat.string_type): model = self.model_path(model) workspace.import_model(model) return workspace
from cubes import Workspace print("Python Cubes Test!") # Stwórz Workspace z pliku konfiguracyjnego: workspace = Workspace() workspace.register_default_store("sql", url="sqlite:///db.sqlite3") # Ładuj model: workspace.import_model("model.json") # Twórz obiekt browser: browser = workspace.browser("analiza_temperatura") # Twórz wyniki agregacji, agreguj po GRUPA: res = browser.aggregate(drilldown=["kontynent"]) # Wyświetl podsumowanie całkowite i dla grupy: print(res.summary) for r in res: print(r)
from __future__ import print_function from cubes import Workspace, Cell, PointCut # 1. Create a workspace workspace = Workspace() workspace.register_default_store("sql", url="sqlite:///data.sqlite") workspace.import_model("model.json") # 2. Get a browser browser = workspace.browser("quake_events") # 3. Play with aggregates result = browser.aggregate() print("Total\n" "----------------------") print("Record count : %8d" % result.summary["record_count"]) print("Total amount : %8d" % result.summary["average_mean"]) # # 4. Drill-down through a dimension # """ print("\n" "Drill Down by Category (top-level Item hierarchy)\n" "==================================================") # result = browser.aggregate(drilldown=["location"]) # print(("%-20s%10s%10s%10s\n"+"-"*50) % ("Category", "Count", "Total", "Double"))
class TweetCube: def __init__(self, concept): self.createCube() self.concept = concept def createCube(self): self.workspace = Workspace() self.workspace.register_default_store("sql", url="mysql://root:@localhost/datawarehouse") model = cubes.read_model_metadata_bundle("../CubeModelisation/model/") self.workspace.import_model(model) self.browserTweet = self.workspace.browser("tweet") def getPieChartSource(self): cube = self.workspace.cube("tweet") cube.browser = self.browserTweet cut = [PointCut("concept", [self.concept])] cell = Cell(cube, cut) result = self.browserTweet.aggregate(cell, drilldown=["location","source"],aggregates=["numberOfTweets_sum"]) output = defaultdict(lambda: defaultdict()) for row in result.table_rows("location"): continent = row.record['location.continentName'] source = row.record['source.sourceName'] output[continent][source] = row.record['numberOfTweets_sum'] temp = {'continentName': '', 'sources': [{'source': '', 'numberOfTweets': ''}, {'source': '', 'numberOfTweets': ''}, {'source': '', 'numberOfTweets': ''}, {'source': '', 'numberOfTweets': ''}]} print("output ",output) i = 0 data = [] continentsList = ['Asia','Africa','Australia','Europe','North America','South America'] for continent in continentsList: temp['continentName'] = continent if output[continent]: temp['sources'][i]['source'] = "iPhone" temp['sources'][i]['numberOfTweets'] = output[continent].get('iPhone', 0) i += 1 temp['sources'][i]['source'] = "Android" temp['sources'][i]['numberOfTweets'] = output[continent].get('Android', 0) i += 1 temp['sources'][i]['source'] = "Web" temp['sources'][i]['numberOfTweets'] = output[continent].get('Web', 0) i += 1 temp['sources'][i]['source'] = "Unknown" temp['sources'][i]['numberOfTweets'] = output[continent].get('Unknown', 0) else: temp['sources'][i]['source'] = "iPhone" temp['sources'][i]['numberOfTweets'] = 0 i += 1 temp['sources'][i]['source'] = "Android" temp['sources'][i]['numberOfTweets'] = 0 i += 1 temp['sources'][i]['source'] = "Web" temp['sources'][i]['numberOfTweets'] = 0 i += 1 temp['sources'][i]['source'] = "Unknown" temp['sources'][i]['numberOfTweets'] = 0 i = 0 data.append(temp) temp = {'continentName': '', 'sources': [{'source': '', 'numberOfTweets': ''}, {'source': '', 'numberOfTweets': ''}, {'source': '', 'numberOfTweets': ''}, {'source': '', 'numberOfTweets': ''}]} return data def getBarChartRaceByLanguageAndDate(self): cube = self.workspace.cube("tweet") cube.browser = self.browserTweet cut = [PointCut("concept", [self.concept])] cell = Cell(cube, cut) result = self.browserTweet.aggregate(cell, drilldown=["time:day", "language"], aggregates=["numberOfTweets_sum"]) output = [] for row in result.table_rows("time"): output.append(row.record) data = defaultdict(lambda: defaultdict(lambda: defaultdict())) languagesList = [] for row in output: date = row['time.day'] + "/" + row['time.month'] + "/" + row['time.year'] language = row['language.languageName'] languagesList.append(language) # creating data structure containing all languages data[date][language]['numberOfTweets'] = row['numberOfTweets_sum'] #GET LIST OF LANGUAGES FROM FILE import pickle with open('../Docs/languagesStructure.pickle', 'rb') as file: languagesList = pickle.load(file) print(len(languagesList)) element = {'date': '', 'languagesList': []} dataList = [] for date in data: element['date'] = date element['languagesList'] = [] print(len(languagesList)) for language in languagesList: if language in data[date]: element['languagesList'].append({'language':language,'numberOfTweets':data[date][language]['numberOfTweets']}) else: element['languagesList'].append({'language':language,'numberOfTweets':0}) dataList.append(element) return dataList def getBarChartRaceBySentimentAndDate(self): cube = self.workspace.cube("tweet") cube.browser = self.browserTweet cut = [PointCut("concept", [self.concept])] cell = Cell(cube, cut) result = self.browserTweet.aggregate(cell, drilldown=["time:day", "sentiment"], aggregates=["numberOfTweets_sum"]) output = [] for row in result.table_rows("time"): output.append(row.record) data = defaultdict(lambda: defaultdict(lambda: defaultdict())) for row in output: date = row['time.day'] + "/" + row['time.month'] + "/" + row['time.year'] sentiment = row['sentiment.sentimentLabel'] data[date][sentiment]['numberOfTweets'] = row['numberOfTweets_sum'] dataList = [] element = {'date': '', 'sentimentsList': []} for date in data: element['date'] = date sentimentElement = {'sentiment': '', 'numberOfTweets': 0} mySentimentsList = [] for sentiment in data[date]: sentimentElement['sentiment'] = sentiment sentimentElement['numberOfTweets'] = data[date][sentiment]['numberOfTweets'] mySentimentsList.append(sentimentElement) sentimentElement = {'sentiment': '', 'numberOfTweets': 0} element['sentimentsList'] = mySentimentsList dataList.append(element) element = {'date': '', 'sentimentsList': []} return dataList
import os.path BASE = os.path.dirname(os.path.abspath(__file__)) from cubes import Workspace, Cell, PointCut from datetime import datetime, timedelta import sys import json from django.http import JsonResponse #-------------------------------------------------------------# workspace = Workspace() workspace.register_default_store("sql", url="sqlite:///"+os.path.join(BASE,"myData.sqlite")) workspace.import_model(os.path.join(BASE,"modal.json")) browser = workspace.browser("FB_POSTS_DATA") #-------------------------------------------------------------# d = datetime.now() - timedelta(days=1) cut = PointCut("pub_date", [d.year, d.month, d.day-6], None) cell = Cell(browser.cube, cuts = [cut]) #-------------------------------------------------------------# def get_post_by_shares(): result = browser.aggregate(cell, drilldown=["name"]) shares = []
"label": "Nombre curso", }, { "name": "creditos", "label": "Numero de creditos", }] }, { "name": "clase", "label": "Clase", "attributes": [{ "name": "curso_materia", "label": "Materia" }, { "name": "seccion_id", "label": "Seccion", }, { "name": "salon_senhalizacion", "label": "Salon", }, { "name": "franja_id", "label": "Franja", }] }] } workspace.import_model(dicc) browser = workspace.browser("clase") result = browser.aggregate(drilldown=['franja']) print(result)
# ("category", "string"), # ("category_label", "string"), # ("subcategory", "string"), # ("subcategory_label", "string"), # ("line_item", "string"), # ("year", "integer"), # ("amount", "integer")], # create_id=True # ) from cubes import Workspace, PointCut, Cell workspace = Workspace() workspace.register_default_store( "sql", url="postgresql://*****:*****@localhost/willowood") workspace.import_model("SalesTable.json") browser = workspace.browser("salestable") result = browser.aggregate() print(result.summary["record_count"]) print(result.summary["Qty"]) print(result.summary["Value"]) cube = browser.cube # result = browser.aggregate(drilldown=["billing_date"]) # # for record in result: # print(' record: ', record)
from cubes import Workspace, Cell # 1. Create a workspace workspace = Workspace() workspace.register_default_store("sql", url="sqlite:///vvo_data.sqlite", dimension_prefix="dm_", fact_prefix="ft_") workspace.import_model("procurements.cubesmodel") # 2. Get a browser browser = workspace.browser("contracts") cube = browser.cube # workspace = cubes.create_workspace("sql", model, url="postgres://localhost/ep2012", # schema="vvo", # dimension_prefix="dm_", # fact_prefix="ft_", # denormalized_view_schema="views", # use_denormalization=False, # denormalized_view_prefix="mft_") def drilldown(cell, dimension): """Drill-down and aggregate recursively through als levels of `dimension`. This function is like recursively traversing directories on a file system and aggregating the file sizes, for example. * `cell` - cube cell to drill-down * `dimension` - dimension to be traversed through all levels """
"IBRD_Balance_Sheet__FY2010.csv", table_name="ibrd_balance", fields=[("category", "string"), ("category_label", "string"), ("subcategory", "string"), ("subcategory_label", "string"), ("line_item", "string"), ("year", "integer"), ("amount", "integer")], create_id=True) #workspace = Workspace(config="slicer.ini") print("Creating Workspace and model") workspace = Workspace() workspace.register_default_store("sql", url="sqlite:///data.sqlite") workspace.import_model("tutorial_model.json") browser = workspace.browser("ibrd_balance") print() result = browser.aggregate() print("General aggregations") print("Record count: %s" % result.summary["record_count"]) print("Amount sum: %s" % result.summary["amount_sum"]) print() print("Drilldown by year") result = browser.aggregate(drilldown=["year"]) for record in result: print(record) print("Drilldown by item") result = browser.aggregate(drilldown=["item"])