Esempio n. 1
0
    def runsql(self):
        """Runs arbitrary sql and returns and html table"""
        session = db.session()
        limit = 1000
        data = json.loads(request.form.get('data'))
        sql = data.get('sql')
        database_id = data.get('database_id')
        mydb = session.query(models.Database).filter_by(id=database_id).first()

        if (
                not self.appbuilder.sm.has_access(
                    'all_datasource_access', 'all_datasource_access')):
            raise Exception(_(
                "This view requires the `all_datasource_access` permission"))
        content = ""
        if mydb:
            eng = mydb.get_sqla_engine()
            if limit:
                sql = sql.strip().strip(';')
                qry = (
                    select('*')
                    .select_from(TextAsFrom(text(sql), ['*']).alias('inner_qry'))
                    .limit(limit)
                )
                sql = str(qry.compile(eng, compile_kwargs={"literal_binds": True}))
            try:
                df = pd.read_sql_query(sql=sql, con=eng)
                content = df.to_html(
                    index=False,
                    na_rep='',
                    classes=(
                        "dataframe table table-striped table-bordered "
                        "table-condensed sql_results").split(' '))
            except Exception as e:
                content = (
                    '<div class="alert alert-danger">'
                    "{}</div>"
                ).format(e.message)
        session.commit()
        return content
Esempio n. 2
0
            'Personal Info',
            {'fields': ['address', 'birthday', 'personal_phone', 'personal_celphone'], 'expanded': False}),
    ]



class GroupMasterView(MasterDetailView):
    datamodel = SQLAInterface(ContactGroup)
    related_views = [ContactGeneralView]


class GroupGeneralView(ModelView):
    datamodel = SQLAInterface(ContactGroup)
    related_views = [ContactGeneralView]


fixed_translations_import = [
    _("List Groups"),
    _("Manage Groups"),
    _("List Contacts"),
    _("Contacts Chart"),
    _("Contacts Birth Chart")]

db.create_all()
fill_gender()
appbuilder.add_view(GroupMasterView, "List Groups", icon="fa-folder-open-o", category="Contacts")
appbuilder.add_separator("Contacts")
appbuilder.add_view(GroupGeneralView, "Manage Groups", icon="fa-folder-open-o", category="Contacts")
appbuilder.add_view(ContactGeneralView, "List Contacts", icon="fa-envelope", category="Contacts")

Esempio n. 3
0
    label_columns = ContactGeneralView.label_columns
    group_by_columns = ["group", "gender"]
    datamodel = SQLAModel(Contact, db.session)


class ContactTimeChartView(TimeChartView):
    chart_title = "Grouped Birth contacts"
    chart_type = "AreaChart"
    label_columns = ContactGeneralView.label_columns
    group_by_columns = ["birthday"]
    datamodel = SQLAModel(Contact, db.session)


class GroupGeneralView(GeneralView):
    datamodel = SQLAModel(Group, db.session)
    related_views = [ContactGeneralView]


fixed_translations_import = [_("List Groups"), _("List Contacts"), _("Contacts Chart"), _("Contacts Birth Chart")]


fill_gender()
genapp = BaseApp(app, db)
genapp.add_view(
    GroupGeneralView(), "List Groups", icon="fa-folder-open-o", category="Contacts", category_icon="fa-envelope"
)
genapp.add_view(ContactGeneralView(), "List Contacts", icon="fa-envelope", category="Contacts")
genapp.add_separator("Contacts")
genapp.add_view(ContactChartView(), "Contacts Chart", icon="fa-dashboard", category="Contacts")
genapp.add_view(ContactTimeChartView(), "Contacts Birth Chart", icon="fa-dashboard", category="Contacts")
Esempio n. 4
0
log = logging.getLogger(__name__)

def aggregate(label=''):
    """
        Use this decorator to set a label for your aggregation functions on charts.

        :param label:
            The label to complement with the column
    """
    def wrap(f):
        f._label = label
        return f
    return wrap


@aggregate(_('Count of'))
def aggregate_count(items, col):
    """
        Function to use on Group by Charts.
        accepts a list and returns the count of the list's items
    """
    return len(list(items))


@aggregate(_('Sum of'))
def aggregate_sum(items, col):
    """
        Function to use on Group by Charts.
        accepts a list and returns the sum of the list's items
    """
    return sum(getattr(item, col) for item in items)
Esempio n. 5
0
    def explore(self, datasource_type, datasource_id):
        error_redirect = '/slicemodelview/list/'
        datasource_class = models.SqlaTable \
            if datasource_type == "table" else models.DruidDatasource
        datasources = (
            db.session
            .query(datasource_class)
            .all()
        )
        datasources = sorted(datasources, key=lambda ds: ds.full_name)
        datasource = [ds for ds in datasources if int(datasource_id) == ds.id]
        datasource = datasource[0] if datasource else None
        slice_id = request.args.get("slice_id")
        slc = None
        slice_add_perm = self.appbuilder.sm.has_access(
            'can_add', 'SliceModelView')
        slice_edit_perm = self.appbuilder.sm.has_access(
            'can_edit', 'SliceModelView')
        slice_download_perm = self.appbuilder.sm.has_access(
            'can_download', 'SliceModelView')

        if slice_id:
            slc = (
                db.session.query(models.Slice)
                .filter_by(id=slice_id)
                .first()
            )
        if not datasource:
            flash(_("The datasource seems to have been deleted"), "alert")
            return redirect(error_redirect)

        all_datasource_access = self.appbuilder.sm.has_access(
            'all_datasource_access', 'all_datasource_access')
        datasource_access = self.appbuilder.sm.has_access(
            'datasource_access', datasource.perm)
        if not (all_datasource_access or datasource_access):
            flash(_("You don't seem to have access to this datasource"), "danger")
            return redirect(error_redirect)

        action = request.args.get('action')
        if action in ('save', 'overwrite'):
            return self.save_or_overwrite_slice(
                request.args, slc, slice_add_perm, slice_edit_perm)

        viz_type = request.args.get("viz_type")
        if not viz_type and datasource.default_endpoint:
            return redirect(datasource.default_endpoint)
        if not viz_type:
            viz_type = "table"
        try:
            obj = viz.viz_types[viz_type](
                datasource,
                form_data=request.args,
                slice_=slc)
        except Exception as e:
            flash(str(e), "danger")
            return redirect(error_redirect)
        if request.args.get("json") == "true":
            status = 200
            if config.get("DEBUG"):
                # Allows for nice debugger stack traces in debug mode
                payload = obj.get_json()
            else:
                try:
                    payload = obj.get_json()
                except Exception as e:
                    logging.exception(e)
                    payload = str(e)
                    status = 500
            resp = Response(
                payload,
                status=status,
                headers=generate_download_headers("json"),
                mimetype="application/json")
            return resp
        elif request.args.get("csv") == "true":
            status = 200
            payload = obj.get_csv()
            return Response(
                payload,
                status=status,
                headers=generate_download_headers("csv"),
                mimetype="application/csv")
        else:
            if request.args.get("standalone") == "true":
                template = "caravel/standalone.html"
            else:
                template = "caravel/explore.html"

            resp = self.render_template(
                template, viz=obj, slice=slc, datasources=datasources,
                can_add=slice_add_perm, can_edit=slice_edit_perm,
                can_download=slice_download_perm)
            try:
                pass
            except Exception as e:
                if config.get("DEBUG"):
                    raise(e)
                return Response(
                    str(e),
                    status=500,
                    mimetype="application/json")
            return resp
Esempio n. 6
0
class ContactTimeChartView(TimeChartView):
    chart_title = 'Grouped Birth contacts'
    chart_type = 'AreaChart'
    label_columns = ContactGeneralView.label_columns
    group_by_columns = ['birthday']
    datamodel = SQLAModel(Contact)


class GroupGeneralView(GeneralView):
    datamodel = SQLAModel(Group)
    related_views = [ContactGeneralView]


fixed_translations_import = [
    _("List Groups"),
    _("List Contacts"),
    _("Contacts Chart"),
    _("Contacts Birth Chart")
]

fill_gender()
appbuilder.add_view(GroupGeneralView,
                    "List Groups",
                    icon="fa-folder-open-o",
                    category="Contacts",
                    category_icon='fa-envelope')
appbuilder.add_view(ContactGeneralView,
                    "List Contacts",
                    icon="fa-envelope",
                    category="Contacts")
Esempio n. 7
0
    }

    def pre_add(self, db):
        conn = sqla.engine.url.make_url(db.sqlalchemy_uri)
        db.password = conn.password
        conn.password = "******" * 10 if conn.password else None
        db.sqlalchemy_uri = str(conn)  # hides the password

    def pre_update(self, db):
        self.pre_add(db)


appbuilder.add_view(
    DatabaseView,
    "Databases",
    label=_("Databases"),
    icon="fa-database",
    category=_("Sources"),
    category_icon='fa-database',)


class TableModelView(CaravelModelView, DeleteMixin):  # noqa
    datamodel = SQLAInterface(models.SqlaTable)
    list_columns = [
        'table_link', 'database', 'sql_link', 'is_featured',
        'changed_by_', 'changed_on_']
    add_columns = [
        'table_name', 'database', 'schema',
        'default_endpoint', 'offset', 'cache_timeout']
    edit_columns = [
        'table_name', 'is_featured', 'database', 'schema', 'description', 'owner',
Esempio n. 8
0
from flask.ext.babelpkg import lazy_gettext as _

"""
This Module is not used.
Just use it to automate Babel extraction
"""

auto_translations_import = [
_("Search"),
_("Back"),
_("Save"),
_("This field is required."),
_("Not a valid date value"),
_("No records found")
]
Esempio n. 9
0
from flask.ext.babelpkg import lazy_gettext as _

"""
This Module is not used.
Just use it to automate Babel extraction
"""

auto_translations_import = [
_("Security"),
_("List Users"),
_("Base Permissions"),
_("Views/Menus"),
_("Permission on Views/Menus"),
_("Search"),
_("Back"),
_("Save"),
_("This field is required."),
_("Not a valid date value"),
_("No records found")
]
Esempio n. 10
0
    }

    def pre_add(self, db):
        conn = sqla.engine.url.make_url(db.sqlalchemy_uri)
        db.password = conn.password
        conn.password = "******" * 10 if conn.password else None
        db.sqlalchemy_uri = str(conn)  # hides the password

    def pre_update(self, db):
        self.pre_add(db)


appbuilder.add_view(
    DatabaseView,
    "Databases",
    label=_("Databases"),
    icon="fa-database",
    category=_("Sources"),
    category_icon='fa-database',
)


class TableModelView(CaravelModelView, DeleteMixin):  # noqa
    datamodel = SQLAInterface(models.SqlaTable)
    list_columns = [
        'table_link', 'database', 'sql_link', 'is_featured', 'changed_by_',
        'changed_on_'
    ]
    add_columns = [
        'table_name', 'database', 'schema', 'default_endpoint', 'offset',
        'cache_timeout'
Esempio n. 11
0
class TableColumnInlineView(CompactCRUDMixin, CaravelModelView):  # noqa
    datamodel = SQLAInterface(models.TableColumn)
    can_delete = False
    edit_columns = [
        'column_name',
        'verbose_name',
        'description',
        'groupby',
        'filterable',
        'table',
        'count_distinct',
        'sum',
        'min',
        'max',
        'expression',
        'is_dttm',
    ]
    add_columns = edit_columns
    list_columns = [
        'column_name', 'type', 'groupby', 'filterable', 'count_distinct',
        'sum', 'min', 'max', 'is_dttm'
    ]
    page_size = 500
    description_columns = {
        'is_dttm':
        (_("Whether to make this column available as a "
           "[Time Granularity] option, column has to be DATETIME or "
           "DATETIME-like")),
        'expression':
        utils.markdown(
            "a valid SQL expression as supported by the underlying backend. "
            "Example: `substr(name, 1, 1)`", True),
    }
    label_columns = {
        'column_name': _("Column"),
        'verbose_name': _("Verbose Name"),
        'description': _("Description"),
        'groupby': _("Groupable"),
        'filterable': _("Filterable"),
        'table': _("Table"),
        'count_distinct': _("Count Distinct"),
        'sum': _("Sum"),
        'min': _("Min"),
        'max': _("Max"),
        'expression': _("Expression"),
        'is_dttm': _("Is temporal"),
    }
Esempio n. 12
0
    def query(  # sqla
            self,
            groupby,
            metrics,
            granularity,
            from_dttm,
            to_dttm,
            filter=None,  # noqa
            is_timeseries=True,
            timeseries_limit=15,
            row_limit=None,
            inner_from_dttm=None,
            inner_to_dttm=None,
            extras=None,
            columns=None):
        """Querying any sqla table from this common interface"""
        # For backward compatibility
        if granularity not in self.dttm_cols:
            granularity = self.main_dttm_col

        cols = {col.column_name: col for col in self.columns}
        qry_start_dttm = datetime.now()

        if not granularity and is_timeseries:
            raise Exception(
                _("Datetime column not provided as part table configuration "
                  "and is required by this type of chart"))

        metrics_exprs = [
            m.sqla_col for m in self.metrics if m.metric_name in metrics
        ]

        if metrics:
            main_metric_expr = [
                m.sqla_col for m in self.metrics if m.metric_name == metrics[0]
            ][0]
        else:
            main_metric_expr = literal_column("COUNT(*)").label("ccount")

        select_exprs = []
        groupby_exprs = []

        if groupby:
            select_exprs = []
            inner_select_exprs = []
            inner_groupby_exprs = []
            for s in groupby:
                col = cols[s]
                outer = col.sqla_col
                inner = col.sqla_col.label(col.column_name + '__')

                groupby_exprs.append(outer)
                select_exprs.append(outer)
                inner_groupby_exprs.append(inner)
                inner_select_exprs.append(inner)
        elif columns:
            for s in columns:
                select_exprs.append(cols[s].sqla_col)
            metrics_exprs = []

        if granularity:
            dttm_expr = cols[granularity].sqla_col.label('timestamp')
            timestamp = dttm_expr

            # Transforming time grain into an expression based on configuration
            time_grain_sqla = extras.get('time_grain_sqla')
            if time_grain_sqla:
                udf = self.database.grains_dict().get(time_grain_sqla, '{col}')
                timestamp_grain = literal_column(
                    udf.function.format(col=dttm_expr)).label('timestamp')
            else:
                timestamp_grain = timestamp

            if is_timeseries:
                select_exprs += [timestamp_grain]
                groupby_exprs += [timestamp_grain]

            tf = '%Y-%m-%d %H:%M:%S.%f'
            time_filter = [
                timestamp >= text(self.database.dttm_converter(from_dttm)),
                timestamp <= text(self.database.dttm_converter(to_dttm)),
            ]
            inner_time_filter = copy(time_filter)
            if inner_from_dttm:
                inner_time_filter[0] = timestamp >= text(
                    self.database.dttm_converter(inner_from_dttm))
            if inner_to_dttm:
                inner_time_filter[1] = timestamp <= text(
                    self.database.dttm_converter(inner_to_dttm))
        else:
            inner_time_filter = []

        select_exprs += metrics_exprs
        qry = select(select_exprs)

        tbl = table(self.table_name)
        if self.schema:
            tbl.schema = self.schema

        if not columns:
            qry = qry.group_by(*groupby_exprs)

        where_clause_and = []
        having_clause_and = []
        for col, op, eq in filter:
            col_obj = cols[col]
            if op in ('in', 'not in'):
                values = eq.split(",")
                cond = col_obj.sqla_col.in_(values)
                if op == 'not in':
                    cond = ~cond
                where_clause_and.append(cond)
        if extras and 'where' in extras:
            where_clause_and += [text(extras['where'])]
        if extras and 'having' in extras:
            having_clause_and += [text(extras['having'])]
        if granularity:
            qry = qry.where(and_(*(time_filter + where_clause_and)))
        else:
            qry = qry.where(and_(*where_clause_and))
        qry = qry.having(and_(*having_clause_and))
        if groupby:
            qry = qry.order_by(desc(main_metric_expr))
        qry = qry.limit(row_limit)

        if timeseries_limit and groupby:
            subq = select(inner_select_exprs)
            subq = subq.select_from(tbl)
            subq = subq.where(and_(*(where_clause_and + inner_time_filter)))
            subq = subq.group_by(*inner_groupby_exprs)
            subq = subq.order_by(desc(main_metric_expr))
            subq = subq.limit(timeseries_limit)
            on_clause = []
            for i, gb in enumerate(groupby):
                on_clause.append(groupby_exprs[i] == column(gb + '__'))

            tbl = tbl.join(subq.alias(), and_(*on_clause))

        qry = qry.select_from(tbl)

        engine = self.database.get_sqla_engine()
        sql = "{}".format(
            qry.compile(
                engine,
                compile_kwargs={"literal_binds": True},
            ), )
        print(sql)
        df = pd.read_sql_query(sql=sql, con=engine)
        sql = sqlparse.format(sql, reindent=True)
        return QueryResult(df=df,
                           duration=datetime.now() - qry_start_dttm,
                           query=sql)
Esempio n. 13
0
    def query(  # druid
        self,
        groupby,
        metrics,
        granularity,
        from_dttm,
        to_dttm,
        filter=None,  # noqa
        is_timeseries=True,
        timeseries_limit=None,
        row_limit=None,
        inner_from_dttm=None,
        inner_to_dttm=None,
        extras=None,  # noqa
        select=None,
    ):  # noqa
        """Runs a query against Druid and returns a dataframe.

        This query interface is common to SqlAlchemy and Druid
        """
        # TODO refactor into using a TBD Query object
        qry_start_dttm = datetime.now()

        inner_from_dttm = inner_from_dttm or from_dttm
        inner_to_dttm = inner_to_dttm or to_dttm

        # add tzinfo to native datetime with config
        from_dttm = from_dttm.replace(tzinfo=config.get("DRUID_TZ"))
        to_dttm = to_dttm.replace(tzinfo=config.get("DRUID_TZ"))

        query_str = ""
        metrics_dict = {m.metric_name: m for m in self.metrics}
        all_metrics = []
        post_aggs = {}

        def recursive_get_fields(_conf):
            _fields = _conf.get('fields', [])
            field_names = []
            for _f in _fields:
                _type = _f.get('type')
                if _type in ['fieldAccess', 'hyperUniqueCardinality']:
                    field_names.append(_f.get('fieldName'))
                elif _type == 'arithmetic':
                    field_names += recursive_get_fields(_f)

            return list(set(field_names))

        for metric_name in metrics:
            metric = metrics_dict[metric_name]
            if metric.metric_type != 'postagg':
                all_metrics.append(metric_name)
            else:
                conf = metric.json_obj
                all_metrics += recursive_get_fields(conf)
                all_metrics += conf.get('fieldNames', [])
                if conf.get('type') == 'javascript':
                    post_aggs[metric_name] = JavascriptPostAggregator(
                        name=conf.get('name'),
                        field_names=conf.get('fieldNames'),
                        function=conf.get('function'))
                else:
                    post_aggs[metric_name] = Postaggregator(
                        conf.get('fn', "/"), conf.get('fields', []),
                        conf.get('name', ''))
        aggregations = {
            m.metric_name: m.json_obj
            for m in self.metrics if m.metric_name in all_metrics
        }
        granularity = granularity or "all"
        if granularity != "all":
            granularity = utils.parse_human_timedelta(
                granularity).total_seconds() * 1000
        if not isinstance(granularity, string_types):
            granularity = {"type": "duration", "duration": granularity}
            origin = extras.get('druid_time_origin')
            if origin:
                dttm = utils.parse_human_datetime(origin)
                granularity['origin'] = dttm.isoformat()

        qry = dict(
            datasource=self.datasource_name,
            dimensions=groupby,
            aggregations=aggregations,
            granularity=granularity,
            post_aggregations=post_aggs,
            intervals=from_dttm.isoformat() + '/' + to_dttm.isoformat(),
        )
        filters = None
        for col, op, eq in filter:
            cond = None
            if op == '==':
                cond = Dimension(col) == eq
            elif op == '!=':
                cond = ~(Dimension(col) == eq)
            elif op in ('in', 'not in'):
                fields = []
                splitted = eq.split(',')
                if len(splitted) > 1:
                    for s in eq.split(','):
                        s = s.strip()
                        fields.append(Filter.build_filter(Dimension(col) == s))
                    cond = Filter(type="or", fields=fields)
                else:
                    cond = Dimension(col) == eq
                if op == 'not in':
                    cond = ~cond
            elif op == 'regex':
                cond = Filter(type="regex", pattern=eq, dimension=col)
            if filters:
                filters = Filter(type="and",
                                 fields=[
                                     Filter.build_filter(cond),
                                     Filter.build_filter(filters)
                                 ])
            else:
                filters = cond

        if filters:
            qry['filter'] = filters

        client = self.cluster.get_pydruid_client()
        orig_filters = filters
        if timeseries_limit and is_timeseries:
            # Limit on the number of timeseries, doing a two-phases query
            pre_qry = deepcopy(qry)
            pre_qry['granularity'] = "all"
            pre_qry['limit_spec'] = {
                "type":
                "default",
                "limit":
                timeseries_limit,
                'intervals': (inner_from_dttm.isoformat() + '/' +
                              inner_to_dttm.isoformat()),
                "columns": [{
                    "dimension":
                    metrics[0] if metrics else self.metrics[0],
                    "direction":
                    "descending",
                }],
            }
            client.groupby(**pre_qry)
            query_str += "// Two phase query\n// Phase 1\n"
            query_str += json.dumps(client.query_builder.last_query.query_dict,
                                    indent=2) + "\n"
            query_str += "//\nPhase 2 (built based on phase one's results)\n"
            df = client.export_pandas()
            if df is not None and not df.empty:
                dims = qry['dimensions']
                filters = []
                for unused, row in df.iterrows():
                    fields = []
                    for dim in dims:
                        f = Filter.build_filter(Dimension(dim) == row[dim])
                        fields.append(f)
                    if len(fields) > 1:
                        filt = Filter(type="and", fields=fields)
                        filters.append(Filter.build_filter(filt))
                    elif fields:
                        filters.append(fields[0])

                if filters:
                    ff = Filter(type="or", fields=filters)
                    if not orig_filters:
                        qry['filter'] = ff
                    else:
                        qry['filter'] = Filter(
                            type="and",
                            fields=[
                                Filter.build_filter(ff),
                                Filter.build_filter(orig_filters)
                            ])
                qry['limit_spec'] = None
        if row_limit:
            qry['limit_spec'] = {
                "type":
                "default",
                "limit":
                row_limit,
                "columns": [{
                    "dimension":
                    metrics[0] if metrics else self.metrics[0],
                    "direction":
                    "descending",
                }],
            }
        client.groupby(**qry)
        query_str += json.dumps(client.query_builder.last_query.query_dict,
                                indent=2)
        df = client.export_pandas()
        if df is None or df.size == 0:
            raise Exception(_("No data was returned."))

        if (not is_timeseries and granularity == "all"
                and 'timestamp' in df.columns):
            del df['timestamp']

        # Reordering columns
        cols = []
        if 'timestamp' in df.columns:
            cols += ['timestamp']
        cols += [col for col in groupby if col in df.columns]
        cols += [col for col in metrics if col in df.columns]
        df = df[cols]
        return QueryResult(df=df,
                           query=query_str,
                           duration=datetime.now() - qry_start_dttm)
Esempio n. 14
0
log = logging.getLogger(__name__)

def aggregate(label=''):
    """
        Use this decorator to set a label for your aggregation functions on charts.

        :param label:
            The label to complement with the column
    """
    def wrap(f):
        f._label = label
        return f
    return wrap


@aggregate(_('Count of'))
def aggregate_count(items, col):
    """
        Function to use on Group by Charts.
        accepts a list and returns the count of the list's items
    """
    return len(list(items))


@aggregate(_('Sum of'))
def aggregate_sum(items, col):
    """
        Function to use on Group by Charts.
        accepts a list and returns the sum of the list's items
    """
    return sum(getattr(item, col) for item in items)
Esempio n. 15
0
    def query(  # druid
            self, groupby, metrics,
            granularity,
            from_dttm, to_dttm,
            filter=None,  # noqa
            is_timeseries=True,
            timeseries_limit=None,
            row_limit=None,
            inner_from_dttm=None, inner_to_dttm=None,
            extras=None,  # noqa
            select=None,):  # noqa
        """Runs a query against Druid and returns a dataframe.

        This query interface is common to SqlAlchemy and Druid
        """
        # TODO refactor into using a TBD Query object
        qry_start_dttm = datetime.now()

        inner_from_dttm = inner_from_dttm or from_dttm
        inner_to_dttm = inner_to_dttm or to_dttm

        # add tzinfo to native datetime with config
        from_dttm = from_dttm.replace(tzinfo=config.get("DRUID_TZ"))
        to_dttm = to_dttm.replace(tzinfo=config.get("DRUID_TZ"))

        query_str = ""
        metrics_dict = {m.metric_name: m for m in self.metrics}
        all_metrics = []
        post_aggs = {}
        for metric_name in metrics:
            metric = metrics_dict[metric_name]
            if metric.metric_type != 'postagg':
                all_metrics.append(metric_name)
            else:
                conf = metric.json_obj
                fields = conf.get('fields', [])
                all_metrics += [
                    f.get('fieldName') for f in fields
                    if f.get('type') == 'fieldAccess']
                all_metrics += conf.get('fieldNames', [])
                if conf.get('type') == 'javascript':
                    post_aggs[metric_name] = JavascriptPostAggregator(
                        name=conf.get('name'),
                        field_names=conf.get('fieldNames'),
                        function=conf.get('function'))
                else:
                    post_aggs[metric_name] = Postaggregator(
                        conf.get('fn', "/"),
                        conf.get('fields', []),
                        conf.get('name', ''))
        aggregations = {
            m.metric_name: m.json_obj
            for m in self.metrics
            if m.metric_name in all_metrics
        }
        granularity = granularity or "all"
        if granularity != "all":
            granularity = utils.parse_human_timedelta(
                granularity).total_seconds() * 1000
        if not isinstance(granularity, string_types):
            granularity = {"type": "duration", "duration": granularity}
            origin = extras.get('druid_time_origin')
            if origin:
                dttm = utils.parse_human_datetime(origin)
                granularity['origin'] = dttm.isoformat()

        qry = dict(
            datasource=self.datasource_name,
            dimensions=groupby,
            aggregations=aggregations,
            granularity=granularity,
            post_aggregations=post_aggs,
            intervals=from_dttm.isoformat() + '/' + to_dttm.isoformat(),
        )
        filters = None
        for col, op, eq in filter:
            cond = None
            if op == '==':
                cond = Dimension(col) == eq
            elif op == '!=':
                cond = ~(Dimension(col) == eq)
            elif op in ('in', 'not in'):
                fields = []
                splitted = eq.split(',')
                if len(splitted) > 1:
                    for s in eq.split(','):
                        s = s.strip()
                        fields.append(Filter.build_filter(Dimension(col) == s))
                    cond = Filter(type="or", fields=fields)
                else:
                    cond = Dimension(col) == eq
                if op == 'not in':
                    cond = ~cond
            if filters:
                filters = Filter(type="and", fields=[
                    Filter.build_filter(cond),
                    Filter.build_filter(filters)
                ])
            else:
                filters = cond

        if filters:
            qry['filter'] = filters

        client = self.cluster.get_pydruid_client()
        orig_filters = filters
        if timeseries_limit and is_timeseries:
            # Limit on the number of timeseries, doing a two-phases query
            pre_qry = deepcopy(qry)
            pre_qry['granularity'] = "all"
            pre_qry['limit_spec'] = {
                "type": "default",
                "limit": timeseries_limit,
                'intervals': (
                    inner_from_dttm.isoformat() + '/' +
                    inner_to_dttm.isoformat()),
                "columns": [{
                    "dimension": metrics[0] if metrics else self.metrics[0],
                    "direction": "descending",
                }],
            }
            client.groupby(**pre_qry)
            query_str += "// Two phase query\n// Phase 1\n"
            query_str += json.dumps(client.query_dict, indent=2) + "\n"
            query_str += "//\nPhase 2 (built based on phase one's results)\n"
            df = client.export_pandas()
            if df is not None and not df.empty:
                dims = qry['dimensions']
                filters = []
                for unused, row in df.iterrows():
                    fields = []
                    for dim in dims:
                        f = Filter.build_filter(Dimension(dim) == row[dim])
                        fields.append(f)
                    if len(fields) > 1:
                        filt = Filter(type="and", fields=fields)
                        filters.append(Filter.build_filter(filt))
                    elif fields:
                        filters.append(fields[0])

                if filters:
                    ff = Filter(type="or", fields=filters)
                    if not orig_filters:
                        qry['filter'] = ff
                    else:
                        qry['filter'] = Filter(type="and", fields=[
                            Filter.build_filter(ff),
                            Filter.build_filter(orig_filters)])
                qry['limit_spec'] = None
        if row_limit:
            qry['limit_spec'] = {
                "type": "default",
                "limit": row_limit,
                "columns": [{
                    "dimension": metrics[0] if metrics else self.metrics[0],
                    "direction": "descending",
                }],
            }
        client.groupby(**qry)
        query_str += json.dumps(client.query_dict, indent=2)
        df = client.export_pandas()
        if df is None or df.size == 0:
            raise Exception(_("No data was returned."))

        if (
                not is_timeseries and
                granularity == "all" and
                'timestamp' in df.columns):
            del df['timestamp']

        # Reordering columns
        cols = []
        if 'timestamp' in df.columns:
            cols += ['timestamp']
        cols += [col for col in groupby if col in df.columns]
        cols += [col for col in metrics if col in df.columns]
        df = df[cols]
        return QueryResult(
            df=df,
            query=query_str,
            duration=datetime.now() - qry_start_dttm)
Esempio n. 16
0
    field1 = StringField(('First Name'),
        description=('Your field number one!'),
        validators = [DataRequired()], widget=BS3TextFieldWidget())
    field2 = StringField(('Last Name'),
        description=('Your field number two!'), widget=BS3TextFieldWidget())
    field3 = DateField (('Date'), format='%m/%d/%Y' ,widget =DatePickerWidget() )

class MyFormView(SimpleFormView):
    form = MyForm
    form_title = 'This is my first form view'
    message = 'My form submitted'

    def form_get(self, form):
        form.field1 = 'This was prefilled'

    def form_post(self, form):
        # post process form
        flash(self.message, 'info')

appbuilder.add_view(MyFormView, "My form View", icon="fa-group", label=_('My form View'),
                     category="My Forms", category_icon="fa-cogs")

db.create_all()



#appbuilder.add_view(MyView(), "Method2", href='/myview/method2/jonh', category='My View')
# Use add link instead there is no need to create MyView twice.
appbuilder.add_link("View Terminals", href='/Terminals/View Terminals/jonh', category='Terminals')
#appbuilder.add_link("Method3", href='/Terminals/method3/jonh', category='Terminals')
appbuilder.add_view(DepartmentView, "Create Terminals", icon="fa-folder-open-o", category="Terminals")
Esempio n. 17
0
    def query(  # sqla
            self, groupby, metrics,
            granularity,
            from_dttm, to_dttm,
            filter=None,  # noqa
            is_timeseries=True,
            timeseries_limit=15, row_limit=None,
            inner_from_dttm=None, inner_to_dttm=None,
            extras=None,
            columns=None):
        """Querying any sqla table from this common interface"""
        # For backward compatibility
        if granularity not in self.dttm_cols:
            granularity = self.main_dttm_col

        cols = {col.column_name: col for col in self.columns}
        qry_start_dttm = datetime.now()

        if not granularity and is_timeseries:
            raise Exception(_(
                "Datetime column not provided as part table configuration "
                "and is required by this type of chart"))

        metrics_exprs = [
            m.sqla_col
            for m in self.metrics if m.metric_name in metrics]

        if metrics:
            main_metric_expr = [
                m.sqla_col for m in self.metrics
                if m.metric_name == metrics[0]][0]
        else:
            main_metric_expr = literal_column("COUNT(*)").label("ccount")

        select_exprs = []
        groupby_exprs = []

        if groupby:
            select_exprs = []
            inner_select_exprs = []
            inner_groupby_exprs = []
            for s in groupby:
                col = cols[s]
                outer = col.sqla_col
                inner = col.sqla_col.label('__' + col.column_name)

                groupby_exprs.append(outer)
                select_exprs.append(outer)
                inner_groupby_exprs.append(inner)
                inner_select_exprs.append(inner)
        elif columns:
            for s in columns:
                select_exprs.append(cols[s].sqla_col)
            metrics_exprs = []

        if granularity:
            dttm_expr = cols[granularity].sqla_col.label('timestamp')
            timestamp = dttm_expr

            # Transforming time grain into an expression based on configuration
            time_grain_sqla = extras.get('time_grain_sqla')
            if time_grain_sqla:
                udf = self.database.grains_dict().get(time_grain_sqla, '{col}')
                timestamp_grain = literal_column(
                    udf.function.format(col=dttm_expr)).label('timestamp')
            else:
                timestamp_grain = timestamp

            if is_timeseries:
                select_exprs += [timestamp_grain]
                groupby_exprs += [timestamp_grain]

            tf = '%Y-%m-%d %H:%M:%S.%f'
            time_filter = [
                timestamp >= from_dttm.strftime(tf),
                timestamp <= to_dttm.strftime(tf),
            ]
            inner_time_filter = copy(time_filter)
            if inner_from_dttm:
                inner_time_filter[0] = timestamp >= inner_from_dttm.strftime(tf)
            if inner_to_dttm:
                inner_time_filter[1] = timestamp <= inner_to_dttm.strftime(tf)
        else:
            inner_time_filter = []

        select_exprs += metrics_exprs
        qry = select(select_exprs)

        tbl = table(self.table_name)
        if self.schema:
            tbl.schema = self.schema

        if not columns:
            qry = qry.group_by(*groupby_exprs)

        where_clause_and = []
        having_clause_and = []
        for col, op, eq in filter:
            col_obj = cols[col]
            if op in ('in', 'not in'):
                values = eq.split(",")
                cond = col_obj.sqla_col.in_(values)
                if op == 'not in':
                    cond = ~cond
                where_clause_and.append(cond)
        if extras and 'where' in extras:
            where_clause_and += [text(extras['where'])]
        if extras and 'having' in extras:
            having_clause_and += [text(extras['having'])]
        if granularity:
            qry = qry.where(and_(*(time_filter + where_clause_and)))
        else:
            qry = qry.where(and_(*where_clause_and))
        qry = qry.having(and_(*having_clause_and))
        if groupby:
            qry = qry.order_by(desc(main_metric_expr))
        qry = qry.limit(row_limit)

        if timeseries_limit and groupby:
            subq = select(inner_select_exprs)
            subq = subq.select_from(tbl)
            subq = subq.where(and_(*(where_clause_and + inner_time_filter)))
            subq = subq.group_by(*inner_groupby_exprs)
            subq = subq.order_by(desc(main_metric_expr))
            subq = subq.limit(timeseries_limit)
            on_clause = []
            for i, gb in enumerate(groupby):
                on_clause.append(
                    groupby_exprs[i] == column("__" + gb))

            tbl = tbl.join(subq.alias(), and_(*on_clause))

        qry = qry.select_from(tbl)

        engine = self.database.get_sqla_engine()
        sql = "{}".format(
            qry.compile(
                engine, compile_kwargs={"literal_binds": True},),
            )
        print(sql)
        df = pd.read_sql_query(
            sql=sql,
            con=engine
        )
        sql = sqlparse.format(sql, reindent=True)
        return QueryResult(
            df=df, duration=datetime.now() - qry_start_dttm, query=sql)
Esempio n. 18
0
from flask.ext.appbuilder.views import GeneralView
from flask_appbuilder.charts.views import DirectChartView
from flask.ext.babelpkg import lazy_gettext as _

from app import app, db
from models import CountryStats



class CountryStatsGeneralView(GeneralView):
    datamodel = SQLAModel(CountryStats, db.session)
    list_columns = ['stat_date','population','unenployed','college']

class CountryStatsDirectChart(DirectChartView):
    chart_title = 'Grouped contacts'
    chart_type = 'LineChart'
    direct_columns = {'General Stats': ('stat_date', 'population','unenployed','college')}
    datamodel = SQLAModel(CountryStats, db.session)
    base_order = ('stat_date', 'asc')

fixed_translations_import = [
    _("List Country Stats"),
    _("Show Country Chart")]


genapp = BaseApp(app, db)
genapp.add_view(CountryStatsGeneralView(), "List Country Stats", icon="fa-folder-open-o", category="Statistics")
genapp.add_separator("Statistics")
genapp.add_view(CountryStatsDirectChart(), "Show Country Chart", icon="fa-dashboard", category="Statistics")