示例#1
0
    def __init__(self):
        print("Creating Workspace and model")
        workspace = Workspace()
        workspace.register_default_store("sql", url="sqlite:///data.sqlite")

        workspace.import_model("movie_ratings_model.json")
        browser = workspace.browser("ratings")

        self.browser = browser
示例#2
0
def create_browser():
    #workspace = Workspace(config="slicer.ini")
    print("Creating Workspace and model")
    workspace = Workspace()
    workspace.register_default_store("sql", url="sqlite:///data.sqlite")

    workspace.import_model("movie_ratings_model.json")
    browser = workspace.browser("ratings")
    return browser
示例#3
0
class SlicerTestCase(unittest.TestCase):
    def setUp(self):
        self.w = Workspace()
        self.w.add_slicer("myslicer",
                          "http://localhost:5010",
                          username=os.environ.get("SLICER_USERNAME"),
                          password=os.environ.get("SLICER_PASSWORD"))

        self.cube_list = self.w.list_cubes()

    def first_date_dim(self, cube):
        for d in cube.dimensions:
            if (d.info.get('is_date')):
                return d
        raise BrowserError("No date dimension in cube %s" % cube.name)

    def test_basic(self):
        for c in self.cube_list:
            if c.get('category') is not None and 'Mix' in c.get(
                    'category', ''):
                continue
            print("Doing %s..." % c.get('name')),
            cube = self.w.cube(c.get('name'))
            date_dim = self.first_date_dim(cube)
            cut = cubes.browser.RangeCut(date_dim, [2013, 9, 25], None)
            cell = cubes.browser.Cell(cube, [cut])
            drill_levels = [
                l for l in date_dim.hierarchy().levels
                if l.name in ('day', 'date')
            ]
            if not drill_levels:
                print "Skipping cube %s with no day/date drilldown available." % c.get(
                    'name')
                continue
            drill = cubes.browser.Drilldown(
                [(date_dim, None, date_dim.level(drill_levels[0]))], cell)
            b = self.w.browser(cube)
            try:
                attr_dim = cube.dimension("attr")
                split = cubes.browser.PointCut(attr_dim, ['paid', 'pnb'])
            except:
                split = None
            try:
                kw = {}
                if cube.aggregates:
                    kw['aggregates'] = [cube.aggregates[0]]
                elif cube.measures:
                    kw['measures'] = [cube.measures[0]]
                else:
                    raise ValueError(
                        "Cube has neither aggregates nor measures")
                result = b.aggregate(cell, drilldown=drill, split=split, **kw)
                print result.cells
            except:
                traceback.print_exc()
def main():
    settings = ConfigParser()
    settings.read("slicer.ini")
    # Creating workspace
    workspace = Workspace(config=settings)
    # Creating browser so that we can do actual aggregations and other data queries for the cube
    browser = workspace.browser('death_fact')
    cube = browser.cube
    # Pass browser in data_aggregate - this function will do all aggregations and queries
    compare_kill_distances(browser, cube)
    count_match_deaths(browser, cube)
示例#5
0
def analiza_temperatura(request):
    if request.GET.get("czy_analiza", None):
        print("Super dokonaj analizy!")
        # Stwórz Workspace z pliku konfiguracyjnego:
        workspace = Workspace()
        workspace.register_default_store("sql", url="sqlite:///db.sqlite3")

        # Ładuj model:
        workspace.import_model("model.json")

        # Twórz obiekt browser:
        browser = workspace.browser("analiza_temperatura")

        # Twórz wyniki agregacji:
        if request.GET.get("wiek_pacjenta", None):
            res = browser.aggregate(drilldown=["wiek_pacjenta"])
            po_czym = "wiek_pacjenta"
        elif request.GET.get("data_pomiaru", None):
            res = browser.aggregate(drilldown=["data_pomiaru"])
            po_czym = "data_pomiaru"
        elif request.GET.get("kontynent", None):
            res = browser.aggregate(drilldown=["kontynent"])
            po_czym = "kontynent"
        elif request.GET.get("kraj", None):
            res = browser.aggregate(drilldown=["kraj"])
            po_czym = "kraj"
        elif request.GET.get("obszar", None):
            res = browser.aggregate(drilldown=["obszar"])
            po_czym = "obszar"

        # Wyświetl podsumowanie całkowite i dla grupy:
        lista = []
        print(res.summary)
        for r in res:
            print(type(r))
            print(r)
            lista.append(r)

        # Twórz kontekst:
        print(type(res))
        cont = {"agre_list": lista, "czy_analiza": True, "po_czym": po_czym}

    else:
        cont = {}
        print("Lipa")

    return render(request, "aplikacja/analiza/temperatura.html", cont)
示例#6
0
文件: test_slicer.py 项目: 6si/cubes
class SlicerTestCase(unittest.TestCase):
    def setUp(self):
        self.w = Workspace()
        self.w.add_slicer("myslicer", "http://localhost:5010",
                          username=os.environ.get("SLICER_USERNAME"),
                          password=os.environ.get("SLICER_PASSWORD"))

        self.cube_list = self.w.list_cubes()

    def first_date_dim(self, cube):
        for d in cube.dimensions:
            if ( d.info.get('is_date') ):
                return d
        raise BrowserError("No date dimension in cube %s" % cube.name)

    def test_basic(self):
        for c in self.cube_list:
            if c.get('category') is not None and 'Mix' in c.get('category', ''):
                continue

            cube = self.w.cube(c.get('name'))
            date_dim = self.first_date_dim(cube)
            cut = cubes.browser.RangeCut(date_dim, [ 2013, 9, 25 ], None)
            cell = cubes.browser.Cell(cube, [ cut ])
            drill_levels = [ l for l in date_dim.hierarchy().levels if l.name in ('day', 'date') ]
            if not drill_levels:
                continue

            drill = cubes.browser.Drilldown([(date_dim, None, date_dim.level(drill_levels[0]))], cell)
            b = self.w.browser(cube)
            try:
                attr_dim = cube.dimension("attr")
                split = cubes.browser.PointCut(attr_dim, ['paid', 'pnb'])
            except:
                split = None
            try:
                kw = {}
                if cube.aggregates:
                    kw['aggregates'] = [cube.aggregates[0]]
                elif cube.measures:
                    kw['measures'] = [ cube.measures[0] ]
                else:
                    raise ValueError("Cube has neither aggregates nor measures")
                result = b.aggregate(cell, drilldown=drill, split=split, **kw)
            except:
                traceback.print_exc()
示例#7
0
class SlicerTestCase(unittest.TestCase):
    def setUp(self):
        self.w = Workspace()
        self.w.add_slicer("myslicer", "http://localhost:5010")

        self.cube_list = self.w.list_cubes()

    def first_date_dim(self, cube):
        for d in cube.dimensions:
            if ( d.info.get('is_date') ):
                return d
        raise BrowserError("No date dimension in cube %s" % cube.name)

    def test_basic(self):
        for c in self.cube_list:
            if c.get('category') is not None and 'Mix' in c.get('category', ''):
                continue
            print ("Doing %s..." % c.get('name')),
            cube = self.w.cube(c.get('name'))
            date_dim = self.first_date_dim(cube)
            cut = cubes.browser.RangeCut(date_dim, [ 2013, 9, 25 ], None)
            cell = cubes.browser.Cell(cube, [ cut ])
            drill = cubes.browser.Drilldown([(date_dim, None, date_dim.level('day'))], cell)
            b = self.w.browser(cube)
            try:
                attr_dim = cube.dimension("attr")
                split = cubes.browser.PointCut(attr_dim, ['paid', 'pnb'])
            except:
                split = None
            try:
                kw = {}
                if cube.aggregates:
                    kw['aggregates'] = [cube.aggregates[0]]
                elif cube.measures:
                    kw['measures'] = [ cube.measures[0] ]
                else:
                    raise ValueError("Cube has neither aggregates nor measures")
                result = b.aggregate(cell, drilldown=drill, split=split, **kw)
                print result.cells
            except:
                import sys
                print sys.exc_info()
from __future__ import print_function
from cubes import Workspace, Cell, PointCut

workspace = Workspace()
workspace.register_default_store("sql", url="sqlite:///data_sqlite/f1.sqlite")
workspace.import_model("models/model.json")

browser = workspace.browser("qualifying")

cut1 = PointCut("drivers", [])
cut2 = PointCut("races", [])
cell = Cell(browser.cube, cuts=[cut1, cut2])
result = browser.aggregate(cell, drilldown=["drivers", "races"])

list_res = [row for row in result]


def filter_racer(data, name, year):
    temp = list(
        filter(
            lambda x: x['drivers.surname'] == name and x['races.year'] == year,
            data))

    return sorted(temp, key=lambda x: x['position_min'])


for line in filter_racer(list_res, 'Hamilton', 2009):
    print(line)
示例#9
0
class TweetCube:

    def __init__(self, concept):
        self.createCube()
        self.concept = concept

    def createCube(self):
        self.workspace = Workspace()
        self.workspace.register_default_store("sql",
                                         url="mysql://root:@localhost/datawarehouse")
        model = cubes.read_model_metadata_bundle("../CubeModelisation/model/")
        self.workspace.import_model(model)
        self.browserTweet = self.workspace.browser("tweet")

    def getPieChartSource(self):
        cube = self.workspace.cube("tweet")
        cube.browser = self.browserTweet

        cut = [PointCut("concept", [self.concept])]
        cell = Cell(cube, cut)

        result = self.browserTweet.aggregate(cell, drilldown=["location","source"],aggregates=["numberOfTweets_sum"])
        output = defaultdict(lambda: defaultdict())

        for row in result.table_rows("location"):
            continent = row.record['location.continentName']
            source = row.record['source.sourceName']
            output[continent][source] = row.record['numberOfTweets_sum']
        temp = {'continentName': '',
                'sources': [{'source': '', 'numberOfTweets': ''}, {'source': '', 'numberOfTweets': ''},
                            {'source': '', 'numberOfTweets': ''}, {'source': '', 'numberOfTweets': ''}]}
        print("output ",output)
        i = 0
        data = []
        continentsList = ['Asia','Africa','Australia','Europe','North America','South America']
        for continent in continentsList:
            temp['continentName'] = continent
            if output[continent]:
                temp['sources'][i]['source'] = "iPhone"
                temp['sources'][i]['numberOfTweets'] = output[continent].get('iPhone', 0)
                i += 1
                temp['sources'][i]['source'] = "Android"
                temp['sources'][i]['numberOfTweets'] = output[continent].get('Android', 0)
                i += 1
                temp['sources'][i]['source'] = "Web"
                temp['sources'][i]['numberOfTweets'] = output[continent].get('Web', 0)
                i += 1
                temp['sources'][i]['source'] = "Unknown"
                temp['sources'][i]['numberOfTweets'] = output[continent].get('Unknown', 0)
            else:
                temp['sources'][i]['source'] = "iPhone"
                temp['sources'][i]['numberOfTweets'] = 0
                i += 1
                temp['sources'][i]['source'] = "Android"
                temp['sources'][i]['numberOfTweets'] = 0
                i += 1
                temp['sources'][i]['source'] = "Web"
                temp['sources'][i]['numberOfTweets'] = 0
                i += 1
                temp['sources'][i]['source'] = "Unknown"
                temp['sources'][i]['numberOfTweets'] = 0

            i = 0
            data.append(temp)
            temp = {'continentName': '',
                'sources': [{'source': '', 'numberOfTweets': ''}, {'source': '', 'numberOfTweets': ''},
                            {'source': '', 'numberOfTweets': ''}, {'source': '', 'numberOfTweets': ''}]}
        return data

    def getBarChartRaceByLanguageAndDate(self):
        cube = self.workspace.cube("tweet")
        cube.browser = self.browserTweet

        cut = [PointCut("concept", [self.concept])]
        cell = Cell(cube, cut)

        result = self.browserTweet.aggregate(cell, drilldown=["time:day", "language"],
                                             aggregates=["numberOfTweets_sum"])
        output = []
        for row in result.table_rows("time"):
            output.append(row.record)
        data = defaultdict(lambda: defaultdict(lambda: defaultdict()))
        languagesList = []
        for row in output:
            date = row['time.day'] + "/" + row['time.month'] + "/" + row['time.year']
            language = row['language.languageName']
            languagesList.append(language)
            # creating data structure containing all languages
            data[date][language]['numberOfTweets'] = row['numberOfTweets_sum']

        #GET LIST OF LANGUAGES FROM FILE
        import pickle
        with open('../Docs/languagesStructure.pickle', 'rb') as file:
            languagesList = pickle.load(file)
        print(len(languagesList))
        element = {'date': '', 'languagesList': []}
        dataList = []
        for date in data:
            element['date'] = date
            element['languagesList'] = []
            print(len(languagesList))
            for language in languagesList:
                if language in data[date]:
                    element['languagesList'].append({'language':language,'numberOfTweets':data[date][language]['numberOfTweets']})
                else:
                    element['languagesList'].append({'language':language,'numberOfTweets':0})
            dataList.append(element)
        return dataList


    def getBarChartRaceBySentimentAndDate(self):
        cube = self.workspace.cube("tweet")
        cube.browser = self.browserTweet

        cut = [PointCut("concept", [self.concept])]
        cell = Cell(cube, cut)

        result = self.browserTweet.aggregate(cell, drilldown=["time:day", "sentiment"],
                                             aggregates=["numberOfTweets_sum"])

        output = []
        for row in result.table_rows("time"):
            output.append(row.record)

        data = defaultdict(lambda: defaultdict(lambda: defaultdict()))
        for row in output:
            date = row['time.day'] + "/" + row['time.month'] + "/" + row['time.year']
            sentiment = row['sentiment.sentimentLabel']
            data[date][sentiment]['numberOfTweets'] = row['numberOfTweets_sum']
        dataList = []
        element = {'date': '', 'sentimentsList': []}
        for date in data:
            element['date'] = date
            sentimentElement = {'sentiment': '', 'numberOfTweets': 0}
            mySentimentsList = []
            for sentiment in data[date]:
                sentimentElement['sentiment'] = sentiment
                sentimentElement['numberOfTweets'] = data[date][sentiment]['numberOfTweets']
                mySentimentsList.append(sentimentElement)
                sentimentElement = {'sentiment': '', 'numberOfTweets': 0}
            element['sentimentsList'] = mySentimentsList
            dataList.append(element)
            element = {'date': '', 'sentimentsList': []}
        return dataList
示例#10
0
from cubes import Workspace

workspace = Workspace()
workspace.register_default_store("sql", url="sqlite:///webshop.sqlite")
workspace.import_model("model.json")
browser = workspace.browser("sales")
result = browser.aggregate()
print("sum = ", result.summary["quantity_sum"])

result = browser.aggregate(drilldown=["date_sale:week", "product"])
for record in result:
    print(record)
示例#11
0
from __future__ import print_function
from cubes import Workspace, Cell, PointCut

# 1. Create a workspace
workspace = Workspace()
workspace.register_default_store("sql", url="sqlite:///data.sqlite")
workspace.import_model("model.json")

# 2. Get a browser
browser = workspace.browser("airquality")

# 3. Play with aggregates
result = browser.aggregate()

print("Total\n" "----------------------")

print("Record count : %8d" % result.summary["record_count"])
print("Total amount : %8d" % result.summary["average_mean"])

#
# 4. Drill-down through a dimension
#
"""
print("\n"
      "Drill Down by Category (top-level Item hierarchy)\n"
      "==================================================")
#
result = browser.aggregate(drilldown=["location"])
#
print(("%-20s%10s%10s%10s\n"+"-"*50) % ("Category", "Count", "Total", "Double"))
#
class ValidateDjangoOrmBrowser(TransactionTestCase):
    fixtures = ['irbdbalance.json']
    maxDiff = None

    def setUp(self):
        super(ValidateDjangoOrmBrowser, self).setUp()
        self.workspace = Workspace(
            cubes_root=settings.SLICER_MODELS_DIR,
            config=path.join(settings.SLICER_MODELS_DIR, 'slicer-django_backend.ini'),
        )
        self.browser = self.workspace.browser("irbd_balance")

    def test_simple_aggregation(self):
        result = self.browser.aggregate()
        self.assertEquals(result.summary, {
            'record_count': 62,
            'amount_sum': 1116860,
        })

    def test_simple_drilldown(self):
        result = self.browser.aggregate(drilldown=["item"])
        values = [
            (row.label, row.record["record_count"], row.record["amount_sum"])
            for row in result.table_rows("item")
        ]
        six.assertCountEqual(self, values, [
            (u'Assets', 32, 558430), (u'Equity', 8, 77592), (u'Liabilities', 22, 480838)
        ])

    def test_simple_slice(self):
        cut = PointCut("item", ["e"])
        cell = Cell(self.browser.cube, cuts=[cut])
        result = self.browser.aggregate(cell, drilldown=["item"])
        values = [
            (row.label, row.record["record_count"], row.record["amount_sum"])
            for row in result.table_rows("item")
        ]
        six.assertCountEqual(self, values, [
            (u'Retained Earnings', 2, 58663),
            (u'Deferred Amounts', 2, 672),
            (u'Capital Stock', 2, 22983),
            (u'Other', 2, -4726)
        ])

    def test_facts_list(self):
        facts = self.browser.facts(page=1, page_size=10, order=['item.line_item', 'amount'])
        six.assertCountEqual(self, facts, [
            {
                'amount': 2707,
                'item.category': u'l',
                'item.subcategory': u'ol',
                'item.subcategory_label': u'Other Liabilities',
                'item.category_label': u'Liabilities',
                'id': 54,
                'year': 2009,
                'item.line_item': u'Accounts payable and misc liabilities'
            }, {
                'amount': 2793,
                'item.category': u'l',
                'item.subcategory': u'ol',
                'item.subcategory_label': u'Other Liabilities',
                'item.category_label': u'Liabilities',
                'id': 53,
                'year': 2010,
                'item.line_item': u'Accounts payable and misc liabilities'
            }, {
                'amount': 1190,
                'item.category': u'l',
                'item.subcategory': u'ol',
                'item.subcategory_label': u'Other Liabilities',
                'item.category_label': u'Liabilities',
                'id': 49,
                'year': 2010,
                'item.line_item': u'Accrued charges on borrowings'
            }, {
                'amount': 1495,
                'item.category': u'l',
                'item.subcategory': u'ol',
                'item.subcategory_label': u'Other Liabilities',
                'item.category_label': u'Liabilities',
                'id': 50,
                'year': 2009,
                'item.line_item': u'Accrued charges on borrowings'
            }, {
                'amount': 764,
                'item.category': u'a',
                'item.subcategory': u'orcv',
                'item.subcategory_label': u'Other Receivables',
                'item.category_label': u'Assets',
                'id': 23,
                'year': 2010,
                'item.line_item': u'Accrued income on loans'
            }, {
                'amount': 889,
                'item.category': u'a',
                'item.subcategory': u'orcv',
                'item.subcategory_label': u'Other Receivables',
                'item.category_label': u'Assets',
                'id': 24,
                'year': 2009,
                'item.line_item': u'Accrued income on loans'
            }, {
                'amount': -3043,
                'item.category': u'e',
                'item.subcategory': u'oe',
                'item.subcategory_label': u'Other',
                'item.category_label': u'Equity',
                'id': 61,
                'year': 2010,
                'item.line_item': u'Accumulated Other Comorehensive Loss'
            }, {
                'amount': -1683,
                'item.category': u'e',
                'item.subcategory': u'oe',
                'item.subcategory_label': u'Other',
                'item.category_label': u'Equity',
                'id': 62,
                'year': 2009,
                'item.line_item': u'Accumulated Other Comorehensive Loss'
            }, {
                'amount': 110040,
                'item.category': u'l',
                'item.subcategory': u'b',
                'item.subcategory_label': u'Borrowings',
                'item.category_label': u'Liabilities',
                'id': 34,
                'year': 2009,
                'item.line_item': u'All'
            }, {
                'amount': 128577,
                'item.category': u'l',
                'item.subcategory': u'b',
                'item.subcategory_label': u'Borrowings',
                'item.category_label': u'Liabilities',
                'id': 33,
                'year': 2010,
                'item.line_item': u'All'
            },
        ])

    def test_multiple_drilldowns(self):
        # "?drilldown=year&drilldown=item&aggregates=amount_sum"
        result = self.browser.aggregate(drilldown=["year", "item"], aggregates=["amount_sum"])
        values = [
            (row.label, row.record)
            for row in result.table_rows("item")
        ]
        six.assertCountEqual(self, values, [
            ('Assets', {'amount_sum': 275420, 'item.category': 'a', 'item.category_label': 'Assets', 'year': 2009}),
            ('Equity', {'amount_sum': 40037, 'item.category': 'e', 'item.category_label': 'Equity', 'year': 2009}),
            ('Liabilities', {'amount_sum': 235383, 'item.category': 'l', 'item.category_label': 'Liabilities', 'year': 2009}),
            ('Assets', {'amount_sum': 283010, 'item.category': 'a', 'item.category_label': 'Assets', 'year': 2010}),
            ('Equity', {'amount_sum': 37555, 'item.category': 'e', 'item.category_label': 'Equity', 'year': 2010}),
            ('Liabilities', {'amount_sum': 245455, 'item.category': 'l', 'item.category_label': 'Liabilities', 'year': 2010})
        ])

    @skip('SetCuts are not yet supported by this backend')
    def test_multiple_drilldowns_with_set_cuts(self):
        # "?drilldown=year&drilldown=item&aggregates=amount_sum&cut=item.category:a;e;l|year:2009;2010"
        cuts = [
            SetCut("item", [['a'], ['e'], ['l']]),
            SetCut("year", [[2009], [2010]]),
        ]
        cell = Cell(self.browser.cube, cuts=cuts)
        result = self.browser.aggregate(cell, drilldown=["year", "item"], aggregates=["amount_sum"])
        values = [
            (row.label, row.record)
            for row in result.table_rows("item")
        ]
        six.assertCountEqual(self, values, [
            ('Assets', {'amount_sum': 275420, 'item.category': 'a', 'item.category_label': 'Assets', 'year': 2009}),
            ('Equity', {'amount_sum': 40037, 'item.category': 'e', 'item.category_label': 'Equity', 'year': 2009}),
            ('Liabilities', {'amount_sum': 235383, 'item.category': 'l', 'item.category_label': 'Liabilities', 'year': 2009}),
            ('Assets', {'amount_sum': 283010, 'item.category': 'a', 'item.category_label': 'Assets', 'year': 2010}),
            ('Equity', {'amount_sum': 37555, 'item.category': 'e', 'item.category_label': 'Equity', 'year': 2010}),
            ('Liabilities', {'amount_sum': 245455, 'item.category': 'l', 'item.category_label': 'Liabilities', 'year': 2010})
        ])
示例#13
0
from cubes import Workspace, PointCut, Cell, Cube, cuts_from_string

CUBE_NAME = "ft_billing"
#analytical workspace
workspace = Workspace()
workspace.register_default_store(
    "sql",
    url="postgresql://venusbailey@localhost:5432/notify_reports",
    schema="public")
#Model
workspace.import_model("./model.json")

# Aggregations

print('\n\n...test aggregation...')
browser = workspace.browser(CUBE_NAME)
cube = browser.cube
result = browser.aggregate()
print('Sum of record counts = ' + str(result.summary["record_count"]))
print('Amount = ' + str(result.summary["amount_sum"]))

# Drill down on year
print('\n\n....drilling down on year...')
result = browser.aggregate(drilldown=["dm_datetime:year"])
for record in result:
    print(record)

# Drill down on service
print('\n\n...drilling down on services...')
result = browser.aggregate(drilldown=["dm_service:service_name"])
for record in result:
示例#14
0
from cubes import Workspace, Cell, PointCut

# 1. Create a workspace
workspace = Workspace()
workspace.register_default_store("sql", url="sqlite:///data.sqlite")
workspace.add_model("model.json")

# 2. Get a browser
browser = workspace.browser("irbd_balance")

# 3. Play with aggregates
result = browser.aggregate()

print "Total\n" \
      "----------------------"

print "Record count: %8d" % result.summary["record_count"]
print "Total amount: %8d" % result.summary["amount_sum"]

#
# 4. Drill-down through a dimension
#

print "\n" \
      "Drill Down by Category (top-level Item hierarchy)\n" \
      "================================================="
#
result = browser.aggregate(drilldown=["item"])
#
print ("%-20s%10s%10s\n"+"-"*40) % ("Category", "Count", "Total")
#
示例#15
0
from cubes import Workspace, Cell

# 1. Create a workspace
workspace = Workspace()
workspace.register_default_store("sql",
                                 url="sqlite:///vvo_data.sqlite",
                                 dimension_prefix="dm_",
                                 fact_prefix="ft_")
workspace.import_model("procurements.cubesmodel")

# 2. Get a browser
browser = workspace.browser("contracts")
cube = browser.cube

# workspace = cubes.create_workspace("sql", model, url="postgres://localhost/ep2012",
#                                     schema="vvo",
#                                     dimension_prefix="dm_",
#                                     fact_prefix="ft_",
#                                     denormalized_view_schema="views",
#                                     use_denormalization=False,
#                                     denormalized_view_prefix="mft_")

def drilldown(cell, dimension):
    """Drill-down and aggregate recursively through als levels of `dimension`.
    
    This function is like recursively traversing directories on a file system
    and aggregating the file sizes, for example.
    
    * `cell` - cube cell to drill-down
    * `dimension` - dimension to be traversed through all levels
    """
示例#16
0
            "label": "Nombre curso",
        }, {
            "name": "creditos",
            "label": "Numero de creditos",
        }]
    }, {
        "name":
        "clase",
        "label":
        "Clase",
        "attributes": [{
            "name": "curso_materia",
            "label": "Materia"
        }, {
            "name": "seccion_id",
            "label": "Seccion",
        }, {
            "name": "salon_senhalizacion",
            "label": "Salon",
        }, {
            "name": "franja_id",
            "label": "Franja",
        }]
    }]
}

workspace.import_model(dicc)
browser = workspace.browser("clase")
result = browser.aggregate(drilldown=['franja'])
print(result)
示例#17
0
from __future__ import print_function
from cubes import Workspace, Cell, PointCut

# 1. Create a workspace
workspace = Workspace()
workspace.register_default_store("sql", url="sqlite:///data.sqlite")
workspace.import_model("model.json")

# 2. Get a browser
browser = workspace.browser("quake_events")

# 3. Play with aggregates
result = browser.aggregate()

print("Total\n"
      "----------------------")

print("Record count : %8d" % result.summary["record_count"])
print("Total amount : %8d" % result.summary["average_mean"])

#
# 4. Drill-down through a dimension
#
"""
print("\n"
      "Drill Down by Category (top-level Item hierarchy)\n"
      "==================================================")
#
result = browser.aggregate(drilldown=["location"])
#
print(("%-20s%10s%10s%10s\n"+"-"*50) % ("Category", "Count", "Total", "Double"))
示例#18
0
                              ("hours_fri_open", "string"),
                              ("hours_fri_close", "string"),
                              ("hours_sat_open", "string"),
                              ("hours_sat_close", "string")],
                      create_id=False)

from cubes import Workspace
from cubes import PointCut
from cubes import Cell

from prettytable import PrettyTable

workspace = Workspace()
workspace.register_default_store("sql", url="sqlite:///data.sqlite")
workspace.import_model("model.json")
browser = workspace.browser("restaurants")
cube = workspace.cube("restaurants")
'''
# aggregation by zipcode
print("ZipCode, # Restaurants")
result = browser.aggregate(drilldown=["location:zipcode"])
for record in result:
    print(record['location.zipcode'], ",", record['num_restaurants'])
print()
'''

# aggregation by restaurant price
result = browser.aggregate(drilldown=["price"])
t = PrettyTable([
    'Price', '# Restaurants', 'Avg. Rating', '# Reviews/Restaurant',
    'Weekend Open Hours', 'Weekend Close Hours', 'Weekday Open Hours',
示例#19
0
from __future__ import print_function
from cubes import Workspace, Cell, PointCut

# 1. Create a workspace
workspace = Workspace()
workspace.register_default_store("sql", url="sqlite:///data.sqlite")
workspace.import_model("model.json")

# 2. Get a browser
browser = workspace.browser("quake_events")

# 3. Play with aggregates
result = browser.aggregate()

print("Total\n" "----------------------")

print("Record count : %8d" % result.summary["record_count"])
print("Total amount : %8d" % result.summary["average_mean"])

#
# 4. Drill-down through a dimension
#
"""
print("\n"
      "Drill Down by Category (top-level Item hierarchy)\n"
      "==================================================")
#
result = browser.aggregate(drilldown=["location"])
#
print(("%-20s%10s%10s%10s\n"+"-"*50) % ("Category", "Count", "Total", "Double"))
#
示例#20
0
#                             ("subcategory", "string"),
#                             ("subcategory_label", "string"),
#                             ("line_item", "string"),
#                             ("year", "integer"),
#                             ("amount", "integer")],
#                       create_id=True
#                   )

from cubes import Workspace, PointCut, Cell

workspace = Workspace()
workspace.register_default_store(
    "sql", url="postgresql://*****:*****@localhost/willowood")
workspace.import_model("SalesTable.json")

browser = workspace.browser("salestable")

result = browser.aggregate()

print(result.summary["record_count"])

print(result.summary["Qty"])
print(result.summary["Value"])
cube = browser.cube
# result = browser.aggregate(drilldown=["billing_date"])
#
# for record in result:
#     print(' record: ', record)

# TryingOut Yesterday, MTD , YTD, LYTD
cuts = [
示例#21
0
from __future__ import print_function
from cubes import Workspace, Cell, PointCut

# 1. Create a workspace
workspace = Workspace()
workspace.register_default_store("sql", url="sqlite:///data.sqlite")
workspace.import_model("model.json")

# 2. Get a browser
browser = workspace.browser("irbd_balance")

# 3. Play with aggregates
result = browser.aggregate()

print("Total\n"
      "----------------------")

print("Record count : %8d" % result.summary["record_count"])
print("Total amount : %8d" % result.summary["amount_sum"])
print("Double amount: %8d" % result.summary["double_amount_sum"])

#
# 4. Drill-down through a dimension
#

print("\n"
      "Drill Down by Category (top-level Item hierarchy)\n"
      "==================================================")
#
result = browser.aggregate(drilldown=["item"])
#
示例#22
0
        count += 1
        if (count >= level):
            count = 1
            break
        new_cell = cell.drilldown(dimension, row.key)
        drilldown(new_cell, dimension, level)


# 1. Creating a workspace
workspace = Workspace()
workspace.register_default_store("sql", url="sqlite:///restaurant.sqlite")
workspace.import_model("model.json")

# 2. Getting a browser
cube = workspace.cube("restaurant_details")
browser = workspace.browser(cube)
dimension = cube.dimension("location")


# Rolling up to State
print("\n"
      "Roll up to state\n"
      "================")

cell = Cell(browser.cube)
rollup(cell, "location")


# Drilling down into the cities of each state
print("\n"
      "Drill down by state\n"
示例#23
0
from flask import Flask, request
from cubes import Workspace

app = Flask(__name__)
app.debug = True

workspace = Workspace("slicer.ini")
rbrowser = workspace.browser("rating")
tbrowser = workspace.browser("thesis")

def determine_title(url):
    if (url == "/rating"):
        return "Рейтинг. Сводная таблица"
    if (url == "/rating/group"):
        return "Рейтинг по группам"
    if (url == "/rating/group/student"):
        return "Рейтинг по студентам"
    if (url == "/rating/course"):
        return "Рейтинг по дисциплинам"
    if (url == "/rating/course/module"):
        return "Рейтинг по модулям"
    if (url == "/thesis"):
        return "Дипломы. Сводная таблица"
    if (url == "/thesis/group"):
        return "Дипломы по группам"
    if (url == "/thesis/group/student"):
        return "Дипломы по студентам"
    if (url == "/thesis/protected_at"):
        return "Дипломы по дате защиты"
    if (url == "/thesis/teacher"):
        return "Дипломы по преподавателям"
示例#24
0
from cubes import Workspace

print("Python Cubes - Test3")

# Stwórz Workspace z pliku konfiguracyjnego:
workspace = Workspace()
workspace.register_default_store("sql", url="sqlite:///baza.sqlite")

# Ładuj model:
workspace.import_model("model.json")

# Twórz obiekt browser:
browser = workspace.browser("WIDOK")

# Twórz wyniki agregacji, agreguj po GRUPA:
res = browser.aggregate(drilldown=["GRUPA"])
res2 = browser.aggregate(drilldown=["ROK"])
res3 = browser.aggregate(drilldown=["PLEC"])

# Wyświetl podsumowanie całkowite i dla grupy:
print(res.summary)
for r in res:
    print(r)

print("Ala ma kota!")

print(res2.summary)
for r in res2:
    print(r)

print("Ala ma kota!")
示例#25
0
from cubes import Workspace

print("Python Cubes - Test2")

# Stwórz Workspace z pliku konfiguracyjnego:
workspace = Workspace()
workspace.register_default_store("sql", url="sqlite:///baza.sqlite")

# Ładuj model:
workspace.import_model("model.json")

# Twórz obiekt browser:
browser = workspace.browser("POMIAR")

# Twórz wyniki agregacji, agreguj po GRUPA:
res = browser.aggregate(drilldown=["GRUPA"])
res2 = browser.aggregate(drilldown=["ROK"])

# Wyświetl podsumowanie całkowite i dla grupy:
print(res.summary)
for r in res:
    print(r)

print("Ala ma kota!")

print(res2.summary)
for r in res2:
    print(r)
示例#26
0
import os.path
BASE = os.path.dirname(os.path.abspath(__file__))

from cubes import Workspace, Cell, PointCut
from datetime import datetime, timedelta
import sys
import json
from django.http import JsonResponse

#-------------------------------------------------------------#
workspace = Workspace()
workspace.register_default_store("sql", url="sqlite:///"+os.path.join(BASE,"myData.sqlite"))
workspace.import_model(os.path.join(BASE,"modal.json"))

browser = workspace.browser("FB_POSTS_DATA")

#-------------------------------------------------------------#

d =  datetime.now() - timedelta(days=1)

cut = PointCut("pub_date", [d.year, d.month, d.day-6], None)

cell = Cell(browser.cube, cuts = [cut])

#-------------------------------------------------------------#

def get_post_by_shares():
	result = browser.aggregate(cell, drilldown=["name"])

	shares = []
示例#27
0
from cubes import Workspace

print("Python Cubes Test!")

# Stwórz Workspace z pliku konfiguracyjnego:
workspace = Workspace()
workspace.register_default_store("sql", url="sqlite:///db.sqlite3")

# Ładuj model:
workspace.import_model("model.json")

# Twórz obiekt browser:
browser = workspace.browser("analiza_temperatura")

# Twórz wyniki agregacji, agreguj po GRUPA:
res = browser.aggregate(drilldown=["kontynent"])

# Wyświetl podsumowanie całkowite i dla grupy:
print(res.summary)
for r in res:
    print(r)
示例#28
0
                      table_name="ibrd_balance",
                      fields=[("category", "string"),
                              ("category_label", "string"),
                              ("subcategory", "string"),
                              ("subcategory_label", "string"),
                              ("line_item", "string"), ("year", "integer"),
                              ("amount", "integer")],
                      create_id=True)

#workspace = Workspace(config="slicer.ini")
print("Creating Workspace and model")
workspace = Workspace()
workspace.register_default_store("sql", url="sqlite:///data.sqlite")

workspace.import_model("tutorial_model.json")
browser = workspace.browser("ibrd_balance")

print()
result = browser.aggregate()
print("General aggregations")
print("Record count: %s" % result.summary["record_count"])
print("Amount sum: %s" % result.summary["amount_sum"])
print()

print("Drilldown by year")
result = browser.aggregate(drilldown=["year"])
for record in result:
    print(record)
print("Drilldown by item")
result = browser.aggregate(drilldown=["item"])
for record in result: