Exemple #1
0
def create_app(**config):
    app = Flask(__name__)

    app.config.from_object(default_settings)
    app.config.from_envvar('SPENDB_SETTINGS', silent=True)
    app.config.update(config)

    db.init_app(app)
    babel.init_app(app)
    cache.init_app(app)
    mail.init_app(app)
    login_manager.init_app(app)
    data_manager.init_app(app)
    pages.init_app(app)
    migrate.init_app(app, db, directory=app.config.get('ALEMBIC_DIR'))
    cors.init_app(app,
                  resources=r'/api/*',
                  supports_credentials=True,
                  methods=['GET', 'HEAD', 'OPTIONS'])

    ws = Workspace()
    ext.model_provider("spending", metadata={})
    ext.store("spending")
    ws.register_default_store('spending', model_provider='spending')
    app.cubes_workspace = ws
    return app
Exemple #2
0
 def createCube(self):
     self.workspace = Workspace()
     self.workspace.register_default_store("sql",
                                      url="mysql://root:@localhost/datawarehouse")
     model = cubes.read_model_metadata_bundle("../CubeModelisation/model/")
     self.workspace.import_model(model)
     self.browserTweet = self.workspace.browser("tweet")
Exemple #3
0
def create_app(**config):
    app = Flask(__name__, static_folder='../spendb.ui')
    app.config.from_object(default_settings)
    app.config.from_envvar('SPENDB_SETTINGS', silent=True)
    app.config.update(config)

    app.jinja_options['extensions'].extend([
        formencode_jinja2.formfill,
        'jinja2.ext.i18n'
    ])

    db.init_app(app)
    babel.init_app(app)
    cache.init_app(app)
    mail.init_app(app)
    assets.init_app(app)
    login_manager.init_app(app)
    data_manager.init_app(app)
    pages.init_app(app)
    migrate.init_app(app, db, directory=app.config.get('ALEMBIC_DIR'))
    cors.init_app(app, resources=r'/api/*', supports_credentials=True,
                  methods=['GET', 'HEAD', 'OPTIONS'])

    ws = Workspace()
    ext.model_provider("spending", metadata={})
    ext.store("spending")
    ws.register_default_store('spending', model_provider='spending')
    app.cubes_workspace = ws
    return app
Exemple #4
0
    def setUp(self):
        self.w = Workspace()
        self.w.add_slicer("myslicer",
                          "http://localhost:5010",
                          username=os.environ.get("SLICER_USERNAME"),
                          password=os.environ.get("SLICER_PASSWORD"))

        self.cube_list = self.w.list_cubes()
Exemple #5
0
class SlicerTestCase(unittest.TestCase):
    def setUp(self):
        self.w = Workspace()
        self.w.add_slicer("myslicer",
                          "http://localhost:5010",
                          username=os.environ.get("SLICER_USERNAME"),
                          password=os.environ.get("SLICER_PASSWORD"))

        self.cube_list = self.w.list_cubes()

    def first_date_dim(self, cube):
        for d in cube.dimensions:
            if (d.info.get('is_date')):
                return d
        raise BrowserError("No date dimension in cube %s" % cube.name)

    def test_basic(self):
        for c in self.cube_list:
            if c.get('category') is not None and 'Mix' in c.get(
                    'category', ''):
                continue
            print("Doing %s..." % c.get('name')),
            cube = self.w.cube(c.get('name'))
            date_dim = self.first_date_dim(cube)
            cut = cubes.browser.RangeCut(date_dim, [2013, 9, 25], None)
            cell = cubes.browser.Cell(cube, [cut])
            drill_levels = [
                l for l in date_dim.hierarchy().levels
                if l.name in ('day', 'date')
            ]
            if not drill_levels:
                print "Skipping cube %s with no day/date drilldown available." % c.get(
                    'name')
                continue
            drill = cubes.browser.Drilldown(
                [(date_dim, None, date_dim.level(drill_levels[0]))], cell)
            b = self.w.browser(cube)
            try:
                attr_dim = cube.dimension("attr")
                split = cubes.browser.PointCut(attr_dim, ['paid', 'pnb'])
            except:
                split = None
            try:
                kw = {}
                if cube.aggregates:
                    kw['aggregates'] = [cube.aggregates[0]]
                elif cube.measures:
                    kw['measures'] = [cube.measures[0]]
                else:
                    raise ValueError(
                        "Cube has neither aggregates nor measures")
                result = b.aggregate(cell, drilldown=drill, split=split, **kw)
                print result.cells
            except:
                traceback.print_exc()
def main():
    settings = ConfigParser()
    settings.read("slicer.ini")
    # Creating workspace
    workspace = Workspace(config=settings)
    # Creating browser so that we can do actual aggregations and other data queries for the cube
    browser = workspace.browser('death_fact')
    cube = browser.cube
    # Pass browser in data_aggregate - this function will do all aggregations and queries
    compare_kill_distances(browser, cube)
    count_match_deaths(browser, cube)
 def setUp(self):
     super(ValidateDjangoOrmBrowser, self).setUp()
     self.workspace = Workspace(
         cubes_root=settings.SLICER_MODELS_DIR,
         config=path.join(settings.SLICER_MODELS_DIR, 'slicer-django_backend.ini'),
     )
     self.browser = self.workspace.browser("irbd_balance")
Exemple #8
0
    def setUp(self):
        self.w = Workspace()
        self.w.add_slicer("myslicer", "http://localhost:5010",
                          username=os.environ.get("SLICER_USERNAME"),
                          password=os.environ.get("SLICER_PASSWORD"))

        self.cube_list = self.w.list_cubes()
Exemple #9
0
class SlicerTestCase(unittest.TestCase):
    def setUp(self):
        self.w = Workspace()
        self.w.add_slicer("myslicer", "http://localhost:5010",
                          username=os.environ.get("SLICER_USERNAME"),
                          password=os.environ.get("SLICER_PASSWORD"))

        self.cube_list = self.w.list_cubes()

    def first_date_dim(self, cube):
        for d in cube.dimensions:
            if ( d.info.get('is_date') ):
                return d
        raise BrowserError("No date dimension in cube %s" % cube.name)

    def test_basic(self):
        for c in self.cube_list:
            if c.get('category') is not None and 'Mix' in c.get('category', ''):
                continue

            cube = self.w.cube(c.get('name'))
            date_dim = self.first_date_dim(cube)
            cut = cubes.browser.RangeCut(date_dim, [ 2013, 9, 25 ], None)
            cell = cubes.browser.Cell(cube, [ cut ])
            drill_levels = [ l for l in date_dim.hierarchy().levels if l.name in ('day', 'date') ]
            if not drill_levels:
                continue

            drill = cubes.browser.Drilldown([(date_dim, None, date_dim.level(drill_levels[0]))], cell)
            b = self.w.browser(cube)
            try:
                attr_dim = cube.dimension("attr")
                split = cubes.browser.PointCut(attr_dim, ['paid', 'pnb'])
            except:
                split = None
            try:
                kw = {}
                if cube.aggregates:
                    kw['aggregates'] = [cube.aggregates[0]]
                elif cube.measures:
                    kw['measures'] = [ cube.measures[0] ]
                else:
                    raise ValueError("Cube has neither aggregates nor measures")
                result = b.aggregate(cell, drilldown=drill, split=split, **kw)
            except:
                traceback.print_exc()
Exemple #10
0
def create_browser():
    #workspace = Workspace(config="slicer.ini")
    print("Creating Workspace and model")
    workspace = Workspace()
    workspace.register_default_store("sql", url="sqlite:///data.sqlite")

    workspace.import_model("movie_ratings_model.json")
    browser = workspace.browser("ratings")
    return browser
Exemple #11
0
    def __init__(self):
        print("Creating Workspace and model")
        workspace = Workspace()
        workspace.register_default_store("sql", url="sqlite:///data.sqlite")

        workspace.import_model("movie_ratings_model.json")
        browser = workspace.browser("ratings")

        self.browser = browser
Exemple #12
0
class SlicerTestCase(unittest.TestCase):
    def setUp(self):
        self.w = Workspace()
        self.w.add_slicer("myslicer", "http://localhost:5010")

        self.cube_list = self.w.list_cubes()

    def first_date_dim(self, cube):
        for d in cube.dimensions:
            if ( d.info.get('is_date') ):
                return d
        raise BrowserError("No date dimension in cube %s" % cube.name)

    def test_basic(self):
        for c in self.cube_list:
            if c.get('category') is not None and 'Mix' in c.get('category', ''):
                continue
            print ("Doing %s..." % c.get('name')),
            cube = self.w.cube(c.get('name'))
            date_dim = self.first_date_dim(cube)
            cut = cubes.browser.RangeCut(date_dim, [ 2013, 9, 25 ], None)
            cell = cubes.browser.Cell(cube, [ cut ])
            drill = cubes.browser.Drilldown([(date_dim, None, date_dim.level('day'))], cell)
            b = self.w.browser(cube)
            try:
                attr_dim = cube.dimension("attr")
                split = cubes.browser.PointCut(attr_dim, ['paid', 'pnb'])
            except:
                split = None
            try:
                kw = {}
                if cube.aggregates:
                    kw['aggregates'] = [cube.aggregates[0]]
                elif cube.measures:
                    kw['measures'] = [ cube.measures[0] ]
                else:
                    raise ValueError("Cube has neither aggregates nor measures")
                result = b.aggregate(cell, drilldown=drill, split=split, **kw)
                print result.cells
            except:
                import sys
                print sys.exc_info()
Exemple #13
0
def create_workspace(config_file):
    
    global WORKSPACE
    global ENGINE
    
    logger = get_logger()
    logger.setLevel("INFO")
    logger.info("cretating workspace from %s" % config_file)

    WORKSPACE = Workspace(config=config_file)
    ENGINE = engine(WORKSPACE)
Exemple #14
0
    def setUp(self):
        super(SlicerModelTestCase, self).setUp()

        ws = Workspace()
        ws.register_default_store("sql", url=TEST_DB_URL)
        self.ws = ws
        self.slicer.cubes_workspace = ws

        # Satisfy browser with empty tables
        # TODO: replace this once we have data
        store = ws.get_store("default")
        table = Table("sales", store.metadata)
        table.append_column(Column("id", Integer))
        table.create()

        ws.import_model(self.model_path("model.json"))
        ws.import_model(self.model_path("sales_no_date.json"))
Exemple #15
0
    def setUp(self):
        super(SlicerModelTestCase, self).setUp()

        ws = Workspace()
        ws.register_default_store("sql", url=TEST_DB_URL)
        self.ws = ws
        self.slicer.cubes_workspace = ws

        # Satisfy browser with empty tables
        # TODO: replace this once we have data
        store = ws.get_store("default")
        table = Table("sales", store.metadata)
        table.append_column(Column("id", Integer))
        table.create()

        ws.import_model(self.model_path("model.json"))
        ws.import_model(self.model_path("sales_no_date.json"))
Exemple #16
0
 def get_cubes_workspace(self):
     workspace = Workspace()
     workspace.register_default_store(
         "sql",
         url=Connector.get_database_url(),
         schema=settings.NIAMOTO_FACT_TABLES_SCHEMA,
         dimension_schema=settings.NIAMOTO_DIMENSIONS_SCHEMA,
     )
     workspace.import_model(self.generate_cubes_model())
     return workspace
def analiza_temperatura(request):
    if request.GET.get("czy_analiza", None):
        print("Super dokonaj analizy!")
        # Stwórz Workspace z pliku konfiguracyjnego:
        workspace = Workspace()
        workspace.register_default_store("sql", url="sqlite:///db.sqlite3")

        # Ładuj model:
        workspace.import_model("model.json")

        # Twórz obiekt browser:
        browser = workspace.browser("analiza_temperatura")

        # Twórz wyniki agregacji:
        if request.GET.get("wiek_pacjenta", None):
            res = browser.aggregate(drilldown=["wiek_pacjenta"])
            po_czym = "wiek_pacjenta"
        elif request.GET.get("data_pomiaru", None):
            res = browser.aggregate(drilldown=["data_pomiaru"])
            po_czym = "data_pomiaru"
        elif request.GET.get("kontynent", None):
            res = browser.aggregate(drilldown=["kontynent"])
            po_czym = "kontynent"
        elif request.GET.get("kraj", None):
            res = browser.aggregate(drilldown=["kraj"])
            po_czym = "kraj"
        elif request.GET.get("obszar", None):
            res = browser.aggregate(drilldown=["obszar"])
            po_czym = "obszar"

        # Wyświetl podsumowanie całkowite i dla grupy:
        lista = []
        print(res.summary)
        for r in res:
            print(type(r))
            print(r)
            lista.append(r)

        # Twórz kontekst:
        print(type(res))
        cont = {"agre_list": lista, "czy_analiza": True, "po_czym": po_czym}

    else:
        cont = {}
        print("Lipa")

    return render(request, "aplikacja/analiza/temperatura.html", cont)
Exemple #18
0
def create_app(**config):
    app = Flask(__name__)
    app.url_rule_class = NamespaceRouteRule
    app.url_map.converters['fmt'] = FormatConverter
    app.url_map.converters['nodot'] = NoDotConverter

    app.config.from_object(default_settings)
    app.config.from_envvar('OPENSPENDING_SETTINGS', silent=True)
    app.config.update(config)

    app.jinja_options['extensions'].extend(
        [formencode_jinja2.formfill, 'jinja2.ext.i18n'])

    db.init_app(app)
    cache.init_app(app)
    mail.init_app(app)
    assets.init_app(app)
    login_manager.init_app(app)
    configure_uploads(app, (sourcefiles, ))

    @app.before_request
    def require_basic_auth(*args, **kwargs):
        LOCKDOWN_FORCE = app.config['LOCKDOWN_FORCE']
        if not current_user.is_authenticated() and request.path not in [
                "/lockdown", "/__ping__"
        ] and LOCKDOWN_FORCE:
            return redirect("/lockdown", code=302)
        from openspending.model.search import SearchForm
        g.search_form = SearchForm()
        if request.method == "POST" and request.path not in ["/lockdown"]:
            token = session.get('csrf_token', None)
            resquesttoken = request.form.get('csrf_token', None)
            if request.json and not resquesttoken:
                resquesttoken = request.json.get('csrf_token')
            if not token or resquesttoken != token:
                abort(403)

    with app.app_context():
        app.cubes_workspace = Workspace()

        app.cubes_workspace.register_default_store('OpenSpendingStore')

    return app
Exemple #19
0
    def create_workspace(self, store=None, model=None):
        """Create shared workspace. Add default store specified in `store` as
        a dictionary and `model` which can be a filename relative to
        ``tests/models`` or a moel dictionary. If no store is provided but
        class has an engine or `sql_engine` set, then the existing engine will
        be used as the default SQL store."""

        workspace = Workspace()

        if store:
            store = dict(store)
            store_type = store.pop("type", "sql")
            workspace.register_default_store(store_type, **store)
        elif self.engine:
            workspace.register_default_store("sql", engine=self.engine)

        if model:
            if isinstance(model, compat.string_type):
                model = self.model_path(model)
            workspace.import_model(model)

        return workspace
Exemple #20
0
    def create_workspace(self, store=None, model=None):
        """Create shared workspace. Add default store specified in `store` as
        a dictionary and `model` which can be a filename relative to
        ``tests/models`` or a moel dictionary. If no store is provided but
        class has an engine or `sql_engine` set, then the existing engine will
        be used as the default SQL store."""

        workspace = Workspace()

        if store:
            store = dict(store)
            store_type = store.pop("type", "sql")
            workspace.register_default_store(store_type, **store)
        elif self.engine:
            workspace.register_default_store("sql", engine=self.engine)

        if model:
            if isinstance(model, compat.string_type):
                model = self.model_path(model)
            workspace.import_model(model)

        return workspace
Exemple #21
0
 def test_base_existence(self):
     ws = Workspace()
     dim = ws.dimension("base_time")
     self.assertEqual(dim.name, "base_time")
Exemple #22
0
 def test_select_hierarchies(self):
     ws = Workspace()
     dim_time = ws.dimension("base_time")
     dim_date = ws.dimension("base_date")
     self.assertLess(len(dim_date.hierarchies), len(dim_time.hierarchies))
Exemple #23
0
#       Go to the ../hello_world directory and do: python prepare_data.py
#
# Instructions:
#
#       Just run this file:
#
#            python table.py
# Output:
#   * standard input – text table
#   * table.html
#   * cross_table.html
#

from cubes import Workspace, create_formatter

workspace = Workspace("slicer.ini")

# Create formatters
text_formatter = create_formatter("text_table")
html_formatter = create_formatter("simple_html_table")
html_cross_formatter = create_formatter("html_cross_table")

# Get the browser and data

browser = workspace.browser("irbd_balance")

result = browser.aggregate(drilldown=["item"])
result = result.cached()

#
# 1. Create text output
Exemple #24
0
    def setUp(self):
        self.w = Workspace()
        self.w.add_slicer("myslicer", "http://localhost:5010")

        self.cube_list = self.w.list_cubes()
Exemple #25
0
import os

from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from app.celery.celery import NotifyCelery
from cubes import Workspace

migrate = Migrate()
db = SQLAlchemy()
notify_celery = NotifyCelery()
notify_workspace = Workspace()


def create_app(application):

    from app.config import configs

    notify_environment = os.environ['NOTIFY_ENVIRONMENT']

    application.config.from_object(configs[notify_environment])

    # init_app(application)
    notify_celery.init_app(application)

    db.init_app(application)
    migrate.init_app(application, db=db)
    register_blueprint(application)

    return application

Exemple #26
0
from cubes import Workspace
from cubes.compat import ConfigParser

print("Python Cubes - Test1")

# Stwórz Workspace z pliku konfiguracyjnego:
conf = ConfigParser()
conf.read("slicer.ini")
workspace = Workspace(config=conf)

# Ładuj model:
workspace.import_model("model.json")

# Twórz obiekt browser:
browser = workspace.browser("POMIAR")

# Twórz wyniki agregacji, agreguj po GRUPA:
res = browser.aggregate(drilldown=["GRUPA"])

# Wyświetl podsumowanie całkowite i dla grupy:
print(res.summary)
for r in res:
    print(r)
Exemple #27
0
#       Go to the ../hello_world directory and do: python prepare_data.py
#
# Instructions:
#
#       Just run this file:
#
#            python table.py
# Output:
#   * standard input – text table
#   * table.html
#   * cross_table.html
#

from cubes import Workspace, create_formatter

workspace = Workspace("slicer.ini")

# Create formatters
text_formatter = create_formatter("text_table")
html_formatter = create_formatter("simple_html_table")
html_cross_formatter = create_formatter("html_cross_table")

# Get the browser and data

browser = workspace.browser("irbd_balance")

result = browser.aggregate(drilldown=["item"])
result = result.cached()

#
# 1. Create text output
Exemple #28
0
        print(indent, "-" * 3)
        print(indent, "Total Number of reviews: ", row.record["total_number_of_reviews"])
        print(indent, "Price average: $", row.record["price_average"])
        print(indent, "Rating average: ", row.record["rating_average"])
        print("\n")

        count += 1
        if (count >= level):
            count = 1
            break
        new_cell = cell.drilldown(dimension, row.key)
        drilldown(new_cell, dimension, level)


# 1. Creating a workspace
workspace = Workspace()
workspace.register_default_store("sql", url="sqlite:///restaurant.sqlite")
workspace.import_model("model.json")

# 2. Getting a browser
cube = workspace.cube("restaurant_details")
browser = workspace.browser(cube)
dimension = cube.dimension("location")


# Rolling up to State
print("\n"
      "Roll up to state\n"
      "================")

cell = Cell(browser.cube)
from cubes import Workspace, PointCut, Cell, Cube, cuts_from_string

CUBE_NAME = "ft_billing"
#analytical workspace
workspace = Workspace()
workspace.register_default_store(
    "sql",
    url="postgresql://venusbailey@localhost:5432/notify_reports",
    schema="public")
#Model
workspace.import_model("./model.json")

# Aggregations

print('\n\n...test aggregation...')
browser = workspace.browser(CUBE_NAME)
cube = browser.cube
result = browser.aggregate()
print('Sum of record counts = ' + str(result.summary["record_count"]))
print('Amount = ' + str(result.summary["amount_sum"]))

# Drill down on year
print('\n\n....drilling down on year...')
result = browser.aggregate(drilldown=["dm_datetime:year"])
for record in result:
    print(record)

# Drill down on service
print('\n\n...drilling down on services...')
result = browser.aggregate(drilldown=["dm_service:service_name"])
for record in result:
Exemple #30
0
 def test_select_hierarchies(self):
     ws = Workspace()
     dim_time = ws.dimension("base_time")
     dim_date = ws.dimension("base_date")
     self.assertLess(len(dim_date.hierarchies), len(dim_time.hierarchies))
Exemple #31
0
import os.path
BASE = os.path.dirname(os.path.abspath(__file__))

from cubes import Workspace, Cell, PointCut
from datetime import datetime, timedelta
import sys
import json
from django.http import JsonResponse

#-------------------------------------------------------------#
workspace = Workspace()
workspace.register_default_store("sql", url="sqlite:///"+os.path.join(BASE,"myData.sqlite"))
workspace.import_model(os.path.join(BASE,"modal.json"))

browser = workspace.browser("FB_POSTS_DATA")

#-------------------------------------------------------------#

d =  datetime.now() - timedelta(days=1)

cut = PointCut("pub_date", [d.year, d.month, d.day-6], None)

cell = Cell(browser.cube, cuts = [cut])

#-------------------------------------------------------------#

def get_post_by_shares():
	result = browser.aggregate(cell, drilldown=["name"])

	shares = []
from cubes import Workspace, Cell

# 1. Create a workspace
workspace = Workspace()
workspace.register_default_store("sql",
                                 url="sqlite:///vvo_data.sqlite",
                                 dimension_prefix="dm_",
                                 fact_prefix="ft_")
workspace.import_model("procurements.cubesmodel")

# 2. Get a browser
browser = workspace.browser("contracts")
cube = browser.cube

# workspace = cubes.create_workspace("sql", model, url="postgres://localhost/ep2012",
#                                     schema="vvo",
#                                     dimension_prefix="dm_",
#                                     fact_prefix="ft_",
#                                     denormalized_view_schema="views",
#                                     use_denormalization=False,
#                                     denormalized_view_prefix="mft_")

def drilldown(cell, dimension):
    """Drill-down and aggregate recursively through als levels of `dimension`.
    
    This function is like recursively traversing directories on a file system
    and aggregating the file sizes, for example.
    
    * `cell` - cube cell to drill-down
    * `dimension` - dimension to be traversed through all levels
    """
Exemple #33
0
from cubes import Workspace, Cell, PointCut

# 1. Create a workspace
workspace = Workspace()
workspace.register_default_store("sql", url="sqlite:///data.sqlite")
workspace.add_model("model.json")

# 2. Get a browser
browser = workspace.browser("irbd_balance")

# 3. Play with aggregates
result = browser.aggregate()

print "Total\n" \
      "----------------------"

print "Record count: %8d" % result.summary["record_count"]
print "Total amount: %8d" % result.summary["amount_sum"]

#
# 4. Drill-down through a dimension
#

print "\n" \
      "Drill Down by Category (top-level Item hierarchy)\n" \
      "================================================="
#
result = browser.aggregate(drilldown=["item"])
#
print ("%-20s%10s%10s\n"+"-"*40) % ("Category", "Count", "Total")
#
Exemple #34
0
from cubes import StaticModelProvider
from cubes import Workspace

workspace = Workspace()
workspace.register_default_store("sql", url="sqlite:///BI.db")
dicc = {
    "cubes": [{
        "name": "clase",
        "label": "Clases",
        "dimensions": ["franja", "seccion", "salon", "curso"]
    }],
    "dimensions": [{
        "name":
        "franja",
        "label":
        "Franja",
        "attributes": [{
            "name": "dia",
            "label": "Dia"
        }, {
            "name": "hora_inicio",
            "label": "Hora de inicio",
        }, {
            "name": "minuto_inicio",
            "label": "Minuto de inicio",
        }, {
            "name": "hora_fin",
            "label": "Hora de finalizacion",
        }, {
            "name": "minuto_fin",
            "label": "Minuto de finalizacion",
Exemple #35
0
 def test_base_ignorance(self):
     ws = Workspace(load_base_model=False)
     with self.assertRaises(NoSuchDimensionError):
         ws.dimension("base_time")
Exemple #36
0
from cubes import Workspace

workspace = Workspace()
workspace.register_default_store("sql", url="sqlite:///webshop.sqlite")
workspace.import_model("model.json")
browser = workspace.browser("sales")
result = browser.aggregate()
print("sum = ", result.summary["quantity_sum"])

result = browser.aggregate(drilldown=["date_sale:week", "product"])
for record in result:
    print(record)
Exemple #37
0
from __future__ import print_function
from cubes import Workspace, Cell, PointCut

# 1. Create a workspace
workspace = Workspace()
workspace.register_default_store("sql", url="sqlite:///data.sqlite")
workspace.import_model("model.json")

# 2. Get a browser
browser = workspace.browser("irbd_balance")

# 3. Play with aggregates
result = browser.aggregate()

print("Total\n"
      "----------------------")

print("Record count : %8d" % result.summary["record_count"])
print("Total amount : %8d" % result.summary["amount_sum"])
print("Double amount: %8d" % result.summary["double_amount_sum"])

#
# 4. Drill-down through a dimension
#

print("\n"
      "Drill Down by Category (top-level Item hierarchy)\n"
      "==================================================")
#
result = browser.aggregate(drilldown=["item"])
#
Exemple #38
0
from __future__ import print_function
from cubes import Workspace, Cell, PointCut

# 1. Create a workspace
workspace = Workspace()
workspace.register_default_store("sql", url="sqlite:///data.sqlite")
workspace.import_model("model.json")

# 2. Get a browser
browser = workspace.browser("quake_events")

# 3. Play with aggregates
result = browser.aggregate()

print("Total\n" "----------------------")

print("Record count : %8d" % result.summary["record_count"])
print("Total amount : %8d" % result.summary["average_mean"])

#
# 4. Drill-down through a dimension
#
"""
print("\n"
      "Drill Down by Category (top-level Item hierarchy)\n"
      "==================================================")
#
result = browser.aggregate(drilldown=["location"])
#
print(("%-20s%10s%10s%10s\n"+"-"*50) % ("Category", "Count", "Total", "Double"))
#
class ValidateDjangoOrmBrowser(TransactionTestCase):
    fixtures = ['irbdbalance.json']
    maxDiff = None

    def setUp(self):
        super(ValidateDjangoOrmBrowser, self).setUp()
        self.workspace = Workspace(
            cubes_root=settings.SLICER_MODELS_DIR,
            config=path.join(settings.SLICER_MODELS_DIR, 'slicer-django_backend.ini'),
        )
        self.browser = self.workspace.browser("irbd_balance")

    def test_simple_aggregation(self):
        result = self.browser.aggregate()
        self.assertEquals(result.summary, {
            'record_count': 62,
            'amount_sum': 1116860,
        })

    def test_simple_drilldown(self):
        result = self.browser.aggregate(drilldown=["item"])
        values = [
            (row.label, row.record["record_count"], row.record["amount_sum"])
            for row in result.table_rows("item")
        ]
        six.assertCountEqual(self, values, [
            (u'Assets', 32, 558430), (u'Equity', 8, 77592), (u'Liabilities', 22, 480838)
        ])

    def test_simple_slice(self):
        cut = PointCut("item", ["e"])
        cell = Cell(self.browser.cube, cuts=[cut])
        result = self.browser.aggregate(cell, drilldown=["item"])
        values = [
            (row.label, row.record["record_count"], row.record["amount_sum"])
            for row in result.table_rows("item")
        ]
        six.assertCountEqual(self, values, [
            (u'Retained Earnings', 2, 58663),
            (u'Deferred Amounts', 2, 672),
            (u'Capital Stock', 2, 22983),
            (u'Other', 2, -4726)
        ])

    def test_facts_list(self):
        facts = self.browser.facts(page=1, page_size=10, order=['item.line_item', 'amount'])
        six.assertCountEqual(self, facts, [
            {
                'amount': 2707,
                'item.category': u'l',
                'item.subcategory': u'ol',
                'item.subcategory_label': u'Other Liabilities',
                'item.category_label': u'Liabilities',
                'id': 54,
                'year': 2009,
                'item.line_item': u'Accounts payable and misc liabilities'
            }, {
                'amount': 2793,
                'item.category': u'l',
                'item.subcategory': u'ol',
                'item.subcategory_label': u'Other Liabilities',
                'item.category_label': u'Liabilities',
                'id': 53,
                'year': 2010,
                'item.line_item': u'Accounts payable and misc liabilities'
            }, {
                'amount': 1190,
                'item.category': u'l',
                'item.subcategory': u'ol',
                'item.subcategory_label': u'Other Liabilities',
                'item.category_label': u'Liabilities',
                'id': 49,
                'year': 2010,
                'item.line_item': u'Accrued charges on borrowings'
            }, {
                'amount': 1495,
                'item.category': u'l',
                'item.subcategory': u'ol',
                'item.subcategory_label': u'Other Liabilities',
                'item.category_label': u'Liabilities',
                'id': 50,
                'year': 2009,
                'item.line_item': u'Accrued charges on borrowings'
            }, {
                'amount': 764,
                'item.category': u'a',
                'item.subcategory': u'orcv',
                'item.subcategory_label': u'Other Receivables',
                'item.category_label': u'Assets',
                'id': 23,
                'year': 2010,
                'item.line_item': u'Accrued income on loans'
            }, {
                'amount': 889,
                'item.category': u'a',
                'item.subcategory': u'orcv',
                'item.subcategory_label': u'Other Receivables',
                'item.category_label': u'Assets',
                'id': 24,
                'year': 2009,
                'item.line_item': u'Accrued income on loans'
            }, {
                'amount': -3043,
                'item.category': u'e',
                'item.subcategory': u'oe',
                'item.subcategory_label': u'Other',
                'item.category_label': u'Equity',
                'id': 61,
                'year': 2010,
                'item.line_item': u'Accumulated Other Comorehensive Loss'
            }, {
                'amount': -1683,
                'item.category': u'e',
                'item.subcategory': u'oe',
                'item.subcategory_label': u'Other',
                'item.category_label': u'Equity',
                'id': 62,
                'year': 2009,
                'item.line_item': u'Accumulated Other Comorehensive Loss'
            }, {
                'amount': 110040,
                'item.category': u'l',
                'item.subcategory': u'b',
                'item.subcategory_label': u'Borrowings',
                'item.category_label': u'Liabilities',
                'id': 34,
                'year': 2009,
                'item.line_item': u'All'
            }, {
                'amount': 128577,
                'item.category': u'l',
                'item.subcategory': u'b',
                'item.subcategory_label': u'Borrowings',
                'item.category_label': u'Liabilities',
                'id': 33,
                'year': 2010,
                'item.line_item': u'All'
            },
        ])

    def test_multiple_drilldowns(self):
        # "?drilldown=year&drilldown=item&aggregates=amount_sum"
        result = self.browser.aggregate(drilldown=["year", "item"], aggregates=["amount_sum"])
        values = [
            (row.label, row.record)
            for row in result.table_rows("item")
        ]
        six.assertCountEqual(self, values, [
            ('Assets', {'amount_sum': 275420, 'item.category': 'a', 'item.category_label': 'Assets', 'year': 2009}),
            ('Equity', {'amount_sum': 40037, 'item.category': 'e', 'item.category_label': 'Equity', 'year': 2009}),
            ('Liabilities', {'amount_sum': 235383, 'item.category': 'l', 'item.category_label': 'Liabilities', 'year': 2009}),
            ('Assets', {'amount_sum': 283010, 'item.category': 'a', 'item.category_label': 'Assets', 'year': 2010}),
            ('Equity', {'amount_sum': 37555, 'item.category': 'e', 'item.category_label': 'Equity', 'year': 2010}),
            ('Liabilities', {'amount_sum': 245455, 'item.category': 'l', 'item.category_label': 'Liabilities', 'year': 2010})
        ])

    @skip('SetCuts are not yet supported by this backend')
    def test_multiple_drilldowns_with_set_cuts(self):
        # "?drilldown=year&drilldown=item&aggregates=amount_sum&cut=item.category:a;e;l|year:2009;2010"
        cuts = [
            SetCut("item", [['a'], ['e'], ['l']]),
            SetCut("year", [[2009], [2010]]),
        ]
        cell = Cell(self.browser.cube, cuts=cuts)
        result = self.browser.aggregate(cell, drilldown=["year", "item"], aggregates=["amount_sum"])
        values = [
            (row.label, row.record)
            for row in result.table_rows("item")
        ]
        six.assertCountEqual(self, values, [
            ('Assets', {'amount_sum': 275420, 'item.category': 'a', 'item.category_label': 'Assets', 'year': 2009}),
            ('Equity', {'amount_sum': 40037, 'item.category': 'e', 'item.category_label': 'Equity', 'year': 2009}),
            ('Liabilities', {'amount_sum': 235383, 'item.category': 'l', 'item.category_label': 'Liabilities', 'year': 2009}),
            ('Assets', {'amount_sum': 283010, 'item.category': 'a', 'item.category_label': 'Assets', 'year': 2010}),
            ('Equity', {'amount_sum': 37555, 'item.category': 'e', 'item.category_label': 'Equity', 'year': 2010}),
            ('Liabilities', {'amount_sum': 245455, 'item.category': 'l', 'item.category_label': 'Liabilities', 'year': 2010})
        ])
Exemple #40
0
class TweetCube:

    def __init__(self, concept):
        self.createCube()
        self.concept = concept

    def createCube(self):
        self.workspace = Workspace()
        self.workspace.register_default_store("sql",
                                         url="mysql://root:@localhost/datawarehouse")
        model = cubes.read_model_metadata_bundle("../CubeModelisation/model/")
        self.workspace.import_model(model)
        self.browserTweet = self.workspace.browser("tweet")

    def getPieChartSource(self):
        cube = self.workspace.cube("tweet")
        cube.browser = self.browserTweet

        cut = [PointCut("concept", [self.concept])]
        cell = Cell(cube, cut)

        result = self.browserTweet.aggregate(cell, drilldown=["location","source"],aggregates=["numberOfTweets_sum"])
        output = defaultdict(lambda: defaultdict())

        for row in result.table_rows("location"):
            continent = row.record['location.continentName']
            source = row.record['source.sourceName']
            output[continent][source] = row.record['numberOfTweets_sum']
        temp = {'continentName': '',
                'sources': [{'source': '', 'numberOfTweets': ''}, {'source': '', 'numberOfTweets': ''},
                            {'source': '', 'numberOfTweets': ''}, {'source': '', 'numberOfTweets': ''}]}
        print("output ",output)
        i = 0
        data = []
        continentsList = ['Asia','Africa','Australia','Europe','North America','South America']
        for continent in continentsList:
            temp['continentName'] = continent
            if output[continent]:
                temp['sources'][i]['source'] = "iPhone"
                temp['sources'][i]['numberOfTweets'] = output[continent].get('iPhone', 0)
                i += 1
                temp['sources'][i]['source'] = "Android"
                temp['sources'][i]['numberOfTweets'] = output[continent].get('Android', 0)
                i += 1
                temp['sources'][i]['source'] = "Web"
                temp['sources'][i]['numberOfTweets'] = output[continent].get('Web', 0)
                i += 1
                temp['sources'][i]['source'] = "Unknown"
                temp['sources'][i]['numberOfTweets'] = output[continent].get('Unknown', 0)
            else:
                temp['sources'][i]['source'] = "iPhone"
                temp['sources'][i]['numberOfTweets'] = 0
                i += 1
                temp['sources'][i]['source'] = "Android"
                temp['sources'][i]['numberOfTweets'] = 0
                i += 1
                temp['sources'][i]['source'] = "Web"
                temp['sources'][i]['numberOfTweets'] = 0
                i += 1
                temp['sources'][i]['source'] = "Unknown"
                temp['sources'][i]['numberOfTweets'] = 0

            i = 0
            data.append(temp)
            temp = {'continentName': '',
                'sources': [{'source': '', 'numberOfTweets': ''}, {'source': '', 'numberOfTweets': ''},
                            {'source': '', 'numberOfTweets': ''}, {'source': '', 'numberOfTweets': ''}]}
        return data

    def getBarChartRaceByLanguageAndDate(self):
        cube = self.workspace.cube("tweet")
        cube.browser = self.browserTweet

        cut = [PointCut("concept", [self.concept])]
        cell = Cell(cube, cut)

        result = self.browserTweet.aggregate(cell, drilldown=["time:day", "language"],
                                             aggregates=["numberOfTweets_sum"])
        output = []
        for row in result.table_rows("time"):
            output.append(row.record)
        data = defaultdict(lambda: defaultdict(lambda: defaultdict()))
        languagesList = []
        for row in output:
            date = row['time.day'] + "/" + row['time.month'] + "/" + row['time.year']
            language = row['language.languageName']
            languagesList.append(language)
            # creating data structure containing all languages
            data[date][language]['numberOfTweets'] = row['numberOfTweets_sum']

        #GET LIST OF LANGUAGES FROM FILE
        import pickle
        with open('../Docs/languagesStructure.pickle', 'rb') as file:
            languagesList = pickle.load(file)
        print(len(languagesList))
        element = {'date': '', 'languagesList': []}
        dataList = []
        for date in data:
            element['date'] = date
            element['languagesList'] = []
            print(len(languagesList))
            for language in languagesList:
                if language in data[date]:
                    element['languagesList'].append({'language':language,'numberOfTweets':data[date][language]['numberOfTweets']})
                else:
                    element['languagesList'].append({'language':language,'numberOfTweets':0})
            dataList.append(element)
        return dataList


    def getBarChartRaceBySentimentAndDate(self):
        cube = self.workspace.cube("tweet")
        cube.browser = self.browserTweet

        cut = [PointCut("concept", [self.concept])]
        cell = Cell(cube, cut)

        result = self.browserTweet.aggregate(cell, drilldown=["time:day", "sentiment"],
                                             aggregates=["numberOfTweets_sum"])

        output = []
        for row in result.table_rows("time"):
            output.append(row.record)

        data = defaultdict(lambda: defaultdict(lambda: defaultdict()))
        for row in output:
            date = row['time.day'] + "/" + row['time.month'] + "/" + row['time.year']
            sentiment = row['sentiment.sentimentLabel']
            data[date][sentiment]['numberOfTweets'] = row['numberOfTweets_sum']
        dataList = []
        element = {'date': '', 'sentimentsList': []}
        for date in data:
            element['date'] = date
            sentimentElement = {'sentiment': '', 'numberOfTweets': 0}
            mySentimentsList = []
            for sentiment in data[date]:
                sentimentElement['sentiment'] = sentiment
                sentimentElement['numberOfTweets'] = data[date][sentiment]['numberOfTweets']
                mySentimentsList.append(sentimentElement)
                sentimentElement = {'sentiment': '', 'numberOfTweets': 0}
            element['sentimentsList'] = mySentimentsList
            dataList.append(element)
            element = {'date': '', 'sentimentsList': []}
        return dataList
Exemple #41
0
def get_browser():
    global cube_name
    if not cube_name:
        # Get the first cube in the list
        cubes = workspace.list_cubes()
        cube_name = cubes[0]["name"]

    return workspace.browser(cube_name)


if __name__ == "__main__":

    parser = argparse.ArgumentParser(description='Cubes model browser.')
    parser.add_argument('config', help='server configuration .ini file')
    parser.add_argument('cube', nargs='?', default=None, help='cube name')
    args = parser.parse_args()

    config = ConfigParser.SafeConfigParser()
    try:
        config.read(args.config)
    except Exception as e:
        raise Exception("Unable to load config: %s" % e)

    cube_name = args.cube

    workspace = Workspace(config)

    app.debug = True
    app.run()
Exemple #42
0
#                       "IBRD_Balance_Sheet__FY2010.csv",
#                       table_name="ibrd_balance",
#                       fields=[
#                             ("category", "string"),
#                             ("category_label", "string"),
#                             ("subcategory", "string"),
#                             ("subcategory_label", "string"),
#                             ("line_item", "string"),
#                             ("year", "integer"),
#                             ("amount", "integer")],
#                       create_id=True
#                   )

from cubes import Workspace, PointCut, Cell

workspace = Workspace()
workspace.register_default_store(
    "sql", url="postgresql://*****:*****@localhost/willowood")
workspace.import_model("SalesTable.json")

browser = workspace.browser("salestable")

result = browser.aggregate()

print(result.summary["record_count"])

print(result.summary["Qty"])
print(result.summary["Value"])
cube = browser.cube
# result = browser.aggregate(drilldown=["billing_date"])
#
from __future__ import print_function
from cubes import Workspace, Cell, PointCut

# 1. Create a workspace
workspace = Workspace()
workspace.register_default_store("sql", url="sqlite:///data.sqlite")
workspace.import_model("model.json")

# 2. Get a browser
browser = workspace.browser("quake_events")

# 3. Play with aggregates
result = browser.aggregate()

print("Total\n"
      "----------------------")

print("Record count : %8d" % result.summary["record_count"])
print("Total amount : %8d" % result.summary["average_mean"])

#
# 4. Drill-down through a dimension
#
"""
print("\n"
      "Drill Down by Category (top-level Item hierarchy)\n"
      "==================================================")
#
result = browser.aggregate(drilldown=["location"])
#
print(("%-20s%10s%10s%10s\n"+"-"*50) % ("Category", "Count", "Total", "Double"))
Exemple #44
0
from flask import Flask, request
from cubes import Workspace

app = Flask(__name__)
app.debug = True

workspace = Workspace("slicer.ini")
rbrowser = workspace.browser("rating")
tbrowser = workspace.browser("thesis")

def determine_title(url):
    if (url == "/rating"):
        return "Рейтинг. Сводная таблица"
    if (url == "/rating/group"):
        return "Рейтинг по группам"
    if (url == "/rating/group/student"):
        return "Рейтинг по студентам"
    if (url == "/rating/course"):
        return "Рейтинг по дисциплинам"
    if (url == "/rating/course/module"):
        return "Рейтинг по модулям"
    if (url == "/thesis"):
        return "Дипломы. Сводная таблица"
    if (url == "/thesis/group"):
        return "Дипломы по группам"
    if (url == "/thesis/group/student"):
        return "Дипломы по студентам"
    if (url == "/thesis/protected_at"):
        return "Дипломы по дате защиты"
    if (url == "/thesis/teacher"):
        return "Дипломы по преподавателям"
Exemple #45
0
from cubes import Workspace

print("Python Cubes - Test2")

# Stwórz Workspace z pliku konfiguracyjnego:
workspace = Workspace()
workspace.register_default_store("sql", url="sqlite:///baza.sqlite")

# Ładuj model:
workspace.import_model("model.json")

# Twórz obiekt browser:
browser = workspace.browser("POMIAR")

# Twórz wyniki agregacji, agreguj po GRUPA:
res = browser.aggregate(drilldown=["GRUPA"])
res2 = browser.aggregate(drilldown=["ROK"])

# Wyświetl podsumowanie całkowite i dla grupy:
print(res.summary)
for r in res:
    print(r)

print("Ala ma kota!")

print(res2.summary)
for r in res2:
    print(r)
Exemple #46
0
 def test_base_ignorance(self):
     ws = Workspace(load_base_model=False)
     with self.assertRaises(NoSuchDimensionError):
         ws.dimension("base_time")
Exemple #47
0
from cubes import Workspace

print("Python Cubes Test!")

# Stwórz Workspace z pliku konfiguracyjnego:
workspace = Workspace()
workspace.register_default_store("sql", url="sqlite:///db.sqlite3")

# Ładuj model:
workspace.import_model("model.json")

# Twórz obiekt browser:
browser = workspace.browser("analiza_temperatura")

# Twórz wyniki agregacji, agreguj po GRUPA:
res = browser.aggregate(drilldown=["kontynent"])

# Wyświetl podsumowanie całkowite i dla grupy:
print(res.summary)
for r in res:
    print(r)
Exemple #48
0
 def test_base_existence(self):
     ws = Workspace()
     dim = ws.dimension("base_time")
     self.assertEqual(dim.name, "base_time")