Example #1
0
 def find_members(self, store, subscriber=None, list_id=None, role=None):
     """See `ISubscriptionService`."""
     # If `subscriber` is a user id, then we'll search for all addresses
     # which are controlled by the user, otherwise we'll just search for
     # the given address.
     user_manager = getUtility(IUserManager)
     if subscriber is None and list_id is None and role is None:
         return []
     # Querying for the subscriber is the most complicated part, because
     # the parameter can either be an email address or a user id.
     query = []
     if subscriber is not None:
         if isinstance(subscriber, str):
             # subscriber is an email address.
             address = user_manager.get_address(subscriber)
             user = user_manager.get_user(subscriber)
             # This probably could be made more efficient.
             if address is None or user is None:
                 return []
             query.append(or_(Member.address_id == address.id, Member.user_id == user.id))
         else:
             # subscriber is a user id.
             user = user_manager.get_user_by_id(subscriber)
             address_ids = list(address.id for address in user.addresses if address.id is not None)
             if len(address_ids) == 0 or user is None:
                 return []
             query.append(or_(Member.user_id == user.id, Member.address_id.in_(address_ids)))
     # Calculate the rest of the query expression, which will get And'd
     # with the Or clause above (if there is one).
     if list_id is not None:
         query.append(Member.list_id == list_id)
     if role is not None:
         query.append(Member.role == role)
     results = store.query(Member).filter(and_(*query))
     return sorted(results, key=_membership_sort_key)
Example #2
0
def sa_op(op):
    #if BooleanOp
    if isinstance(op, operators.or_):
        return sa.or_(*[sa_op(o) for o in op.ops])
    elif isinstance(op, operators.and_):
        return sa.and_(*[sa_op(o) for o in op.ops])

    #else, assume op is an instance of op
    if isinstance(op, operators.eq):
        fn = lambda x,y: x == y
    elif isinstance(op, operators.ne):
        fn = lambda x,y: x != y
    elif isinstance(op, operators.gt):
        fn = lambda x,y: x > y
    elif isinstance(op, operators.lt):
        fn = lambda x,y: x < y
    elif isinstance(op, operators.gte):
        fn = lambda x,y: x >= y
    elif isinstance(op, operators.lte):
        fn = lambda x,y: x <= y

    rval = tup(op.rval)

    if not rval:
        return '2+2=5'
    else:
        return sa.or_(*[fn(op.lval, v) for v in rval])
Example #3
0
def constrain_obj_ids(obj, obj_constraint):
  desc = aliased( ObjectDescription )
  no_ids = False
  no_class = False
  no_super = False
  query_constraint = None
  if obj_constraint.ids and obj_constraint.ids != (0,):
    query_constraint = obj.id.in_( obj_constraint.ids )
  else:
    no_ids = True
  if obj_constraint.class_types and obj_constraint.class_types != ['']:
    query_constraint = or_(query_constraint, or_(*[desc.type == type for type in obj_constraint.class_types]) )
  else:
    no_class = True

  if obj_constraint.superclass_types and obj_constraint.superclass_types != ['']:
    query_constraint = or_(query_constraint, or_(*[desc.type.contains(type) for type in obj_constraint.superclass_types]) )
  else:
    no_super = True

  if no_ids and no_class and no_super:
    return db().query( obj.id )
  else:
    return db().query( obj.id ).\
      filter( obj.relative_description_id == desc.id,
              query_constraint )
Example #4
0
def fix_escaped_quotes(dummy_data, admin_override=False):
	if admin_override is False and (not current_user.is_mod()):
		return getResponse(error=True, message="You have to have moderator privileges to do that!")

	# SELECT * FROM series WHERE title LIKE E'%\\\'%';
	bad_title = 0
	bad_desc = 0


	q = Story.query.filter(or_(Story.title.like(r"%'%"), Story.title.like(r"%’%"), Story.title.like(r"%‘%"), Story.title.like(r"%“%"), Story.title.like(r"%”%")))
	items = q.all()
	print("Name fixing processing query resulted in %s items" % len(items))
	for item in items:
		old = item.title
		new = old
		while any([r"\"" in new, r"\'" in new, "’" in new, "‘" in new, "“" in new, "”" in new]):
			new = new.replace(r"\'", "'")
			new = new.replace(r'\"', '"')
			new = new.replace(r"’", "'")
			new = new.replace(r"‘", "'")
			new = new.replace(r"“", '"')
			new = new.replace(r"”", '"')

		have = Story.query.filter(Story.title == new).scalar()
		if old != new:
			if have:
				print("Duplicate item!", (old, new), old==new)
				merge_series_ids(have.id, item.id)
			else:
				print("Fixing title.")
				item.title = new
				db.session.commit()
			bad_title += 1


	# F**K ALL SMART QUOTE BULLSHITS EVER
	q = Story.query.filter(or_(Story.description.like(r"%'%"), Story.description.like(r"%’%"), Story.description.like(r"%‘%"), Story.description.like(r"%“%"), Story.description.like(r"%”%")))

	items = q.all()
	print("Series description processing query resulted in %s items" % len(items))
	for item in items:
		old = item.description
		new = old

		while any([r"\"" in new, r"\'" in new, "’" in new, "‘" in new, "“" in new, "”" in new]):
			new = new.replace(r"\'", "'")
			new = new.replace(r'\"', '"')
			new = new.replace(r"’", "'")
			new = new.replace(r"‘", "'")
			new = new.replace(r"“", '"')
			new = new.replace(r"”", '"')
		if old != new:
			print("Fixing description smart-quotes and over-escapes for series: %s" % item.id)
			item.description = new
			db.session.commit()
			bad_desc += 1

	print("Update complete.")

	return getResponse("%s main titles, %s descriptions required fixing. %s" % (bad_title, bad_desc, conflicts), error=False)
Example #5
0
  def _get_relate_filter(cls, predicate, related_type):
    """Used for filtering by related_assignee.

    Returns:
        Boolean stating whether such an assignee exists.
    """
    # pylint: disable=invalid-name
    # The upper case variables are allowed here to shorthand the class names.
    Rel = relationship.Relationship
    RelAttr = relationship.RelationshipAttr
    Person = person.Person
    return db.session.query(Rel).join(RelAttr).join(
        Person,
        or_(and_(
            Rel.source_id == Person.id,
            Rel.source_type == Person.__name__
        ), and_(
            Rel.destination_id == Person.id,
            Rel.destination_type == Person.__name__
        ))
    ).filter(and_(
        RelAttr.attr_value.contains(related_type),
        RelAttr.attr_name == "AssigneeType",
        or_(and_(
            Rel.source_type == Person.__name__,
            Rel.destination_type == cls.__name__,
            Rel.destination_id == cls.id
        ), and_(
            Rel.destination_type == Person.__name__,
            Rel.source_type == cls.__name__,
            Rel.source_id == cls.id
        )),
        or_(predicate(Person.name), predicate(Person.email))
    )).exists()
Example #6
0
def get_graphs_by_edges_and_nodes_and_names(db_session, group_ids=None, names=None, nodes=None, edges=None, tags=None,
                                            order=desc(Graph.updated_at), page=0, page_size=10, partial_matching=False,
                                            owner_email=None, is_public=None):
	query = db_session.query(Graph)

	edges = [] if edges is None else edges
	nodes = [] if nodes is None else nodes
	names = [] if names is None else names
	tags = [] if tags is None else tags

	edges = [('%%%s%%' % u, '%%%s%%' % v) for u, v in edges] if partial_matching else edges
	nodes = ['%%%s%%' % node for node in nodes] if partial_matching else nodes
	names = ['%%%s%%' % name for name in names] if partial_matching else names
	tags = ['%%%s%%' % tag for tag in tags]

	graph_filter_group = []
	if is_public is not None:
		graph_filter_group.append(Graph.is_public == is_public)
	if owner_email is not None:
		graph_filter_group.append(Graph.owner_email == owner_email)
	if group_ids is not None:
		query = query.filter(Graph.shared_with_groups.any(Group.id.in_(group_ids)))
	if len(graph_filter_group) > 0:
		query = query.filter(*graph_filter_group)

	names_filter_group = [Graph.name.ilike(name) for name in names]
	tags_filter_group = [GraphTag.name.ilike(tag) for tag in tags]
	nodes_filter_group = [Node.label.ilike(node) for node in nodes]
	nodes_filter_group.extend([Node.name.ilike(node) for node in nodes])
	edges_filter_group = [and_(Edge.head_node.has(Node.name.ilike(u)), Edge.tail_node.has(Node.name.ilike(v))) for u, v
	                      in edges]
	edges_filter_group.extend(
		[and_(Edge.tail_node.has(Node.name.ilike(u)), Edge.head_node.has(Node.name.ilike(v))) for u, v in edges])
	edges_filter_group.extend(
		[and_(Edge.head_node.has(Node.label.ilike(u)), Edge.tail_node.has(Node.label.ilike(v))) for u, v in edges])
	edges_filter_group.extend(
		[and_(Edge.tail_node.has(Node.label.ilike(u)), Edge.head_node.has(Node.label.ilike(v))) for u, v in edges])

	options_group = []
	if len(nodes_filter_group) > 0:
		options_group.append(joinedload('nodes'))
	if len(edges_filter_group) > 0:
		options_group.append(joinedload('edges'))
	if len(options_group) > 0:
		query = query.options(*options_group)

	combined_filter_group = []
	if len(nodes_filter_group) > 0:
		combined_filter_group.append(Graph.nodes.any(or_(*nodes_filter_group)))
	if len(edges_filter_group) > 0:
		combined_filter_group.append(Graph.edges.any(or_(*edges_filter_group)))
	if len(names_filter_group) > 0:
		combined_filter_group.append(*names_filter_group)
	if len(tags_filter_group) > 0:
		combined_filter_group.append(*tags_filter_group)

	if len(combined_filter_group) > 0:
		query = query.filter(or_(*combined_filter_group))

	return query.order_by(order).limit(page_size).offset(page * page_size).all()
Example #7
0
def chg_crime():
    # Step Seven: Find updates
    dat_crime_table = Table('dat_chicago_crimes_all', Base.metadata, 
        autoload=True, autoload_with=engine, extend_existing=True)
    src_crime_table = Table('src_chicago_crimes_all', Base.metadata, 
        autoload=True, autoload_with=engine, extend_existing=True)
    chg_crime_table = Table('chg_chicago_crimes_all', Base.metadata, 
        Column('id', Integer, primary_key=True),
        extend_existing=True)
    chg_crime_table.drop(bind=engine, checkfirst=True)
    chg_crime_table.create(bind=engine)
    src_cols = [c for c in src_crime_table.columns if c.name not in ['id', 'start_date', 'end_date']]
    dat_cols = [c for c in dat_crime_table.columns if c.name not in ['id', 'start_date', 'end_date']]
    and_args = []
    for s, d in zip(src_cols, dat_cols):
        ors = or_(s != None, d != None)
        ands = and_(ors, s != d)
        and_args.append(ands)
    ins = chg_crime_table.insert()\
          .from_select(
              ['id'],
              select([src_crime_table.c.id])\
                  .select_from(src_crime_table.join(dat_crime_table,
                      src_crime_table.c.id == dat_crime_table.c.id))\
                  .where(or_(
                          and_(dat_crime_table.c.current_flag == True, 
                                and_(or_(src_crime_table.c.id != None, dat_crime_table.c.id != None), 
                                src_crime_table.c.id != dat_crime_table.c.id)),
                          *and_args))
          )
    conn = engine.contextual_connect()
    conn.execute(ins)
    return 'Changes found'
Example #8
0
    def get_results_empresas(self, term, page, limit):

        offset = int((page-1) * limit)

        results = db.query(
                                models.Empresa
                            ).filter(
                                or_(
                                    models.Empresa.razon_social.ilike('%' + term + '%'),
                                    models.Empresa.ruc.ilike('%' + term + '%')
                                    )
                            ).limit(limit).offset(offset)

        count = db.query(
                            models.Empresa
                        ).filter(
                            or_(
                                models.Empresa.razon_social.ilike('%' + term + '%'),
                                models.Empresa.ruc.ilike('%' + term + '%')
                                )
                        ).count()

        pagination = self.get_pager(offset, count, page, limit)

        return [results, pagination]
Example #9
0
def web_view_user_posts(request, environment, session, username, page=1,
                        posts_per_page=15):
    """
    returns the <page> <posts_per_page> posts created by <username> as 'posts',
    <username>'s user object as 'user', an empty array if there aren't any.

    Possible errortype values are:
    * 'NoSuchUser' if <username> is unknown to the system.

    May raise the following Exceptions:
    * Exception('NoSuchUser')
    """

    u = get_user_obj(username, session)

    own = session.query(post.id).filter(post.owner == u.identity).subquery()
    reposts = session.query(post.id).filter(
        post.reposters.contains(u.identity)).subquery()
    total_num = session.query(model.post).filter(or_(post.id.in_(reposts), post.id.in_(own))).count()
    allposts = session.query(model.post).filter(
        or_(post.id.in_(reposts), post.id.in_(own))).order_by(desc(post.timestamp)).offset((page-1)*posts_per_page).limit(posts_per_page).all()

    posts = [p.downcast() for p in allposts]

    return render_template("web_view_user_posts.htmljinja", environment,
                           posts=posts, page_num=page, total_num=total_num,
                           posts_per_page=posts_per_page, user=u)
Example #10
0
File: base.py Project: katrid/orun
 def search_name(
         self, name=None, count=None, page=None, label_from_instance=None, name_fields=None, *args, exact=False,
         **kwargs
 ):
     params = kwargs.get('params')
     join = []
     if name:
         if name_fields is None:
             name_fields = chain(*(_resolve_fk_search(f, join) for f in self._meta.get_name_fields()))
         if exact:
             q = [sa.or_(*[fld.column == name for fld in name_fields])]
         else:
             q = [sa.or_(*[fld.column.ilike('%' + name + '%') for fld in name_fields])]
         if params:
             q.append(params)
         kwargs = {'params': q}
     kwargs['join'] = join
     qs = self._search(*args, **kwargs)
     if count:
         count = qs.count()
     if page:
         page = int(page)
         qs = qs[(page - 1) * CHOICES_PAGE_LIMIT:page * CHOICES_PAGE_LIMIT]
     else:
         qs = qs[:CHOICES_PAGE_LIMIT]
     if isinstance(label_from_instance, list):
         label_from_instance = lambda obj, label_from_instance=label_from_instance: (obj.pk, ' - '.join([str(getattr(obj, f, '')) for f in label_from_instance if f in self._meta.fields_dict]))
     if callable(label_from_instance):
         res = [label_from_instance(obj) for obj in qs]
     else:
         res = [obj._get_instance_label() for obj in qs]
     return {
         'count': count,
         'items': res,
     }
Example #11
0
    def _apply_filters_to_query(self, query, model, filters, context=None):
        if filters:
            for key, value in six.iteritems(filters):
                column = getattr(model, key, None)
                # NOTE(kevinbenton): if column is a hybrid property that
                # references another expression, attempting to convert to
                # a boolean will fail so we must compare to None.
                # See "An Important Expression Language Gotcha" in:
                # docs.sqlalchemy.org/en/rel_0_9/changelog/migration_06.html
                if column is not None:
                    if not value:
                        query = query.filter(sql.false())
                        return query
                    if isinstance(column, associationproxy.AssociationProxy):
                        # association proxies don't support in_ so we have to
                        # do multiple equals matches
                        query = query.filter(
                            or_(*[column == v for v in value]))
                    else:
                        query = query.filter(column.in_(value))
                elif key == 'shared' and hasattr(model, 'rbac_entries'):
                    # translate a filter on shared into a query against the
                    # object's rbac entries
                    query = query.outerjoin(model.rbac_entries)
                    rbac = model.rbac_entries.property.mapper.class_
                    matches = [rbac.target_tenant == '*']
                    if context:
                        matches.append(rbac.target_tenant == context.tenant_id)
                    # any 'access_as_shared' records that match the
                    # wildcard or requesting tenant
                    is_shared = and_(rbac.action == 'access_as_shared',
                                     or_(*matches))
                    if not value[0]:
                        # NOTE(kevinbenton): we need to find objects that don't
                        # have an entry that matches the criteria above so
                        # we use a subquery to exclude them.
                        # We can't just filter the inverse of the query above
                        # because that will still give us a network shared to
                        # our tenant (or wildcard) if it's shared to another
                        # tenant.
                        # This is the column joining the table to rbac via
                        # the object_id. We can't just use model.id because
                        # subnets join on network.id so we have to inspect the
                        # relationship.
                        join_cols = model.rbac_entries.property.local_columns
                        oid_col = list(join_cols)[0]
                        is_shared = ~oid_col.in_(
                            query.session.query(rbac.object_id).
                            filter(is_shared)
                        )
                    query = query.filter(is_shared)
            for _nam, hooks in six.iteritems(self._model_query_hooks.get(model,
                                                                         {})):
                result_filter = hooks.get('result_filters', None)
                if isinstance(result_filter, six.string_types):
                    result_filter = getattr(self, result_filter, None)

                if result_filter:
                    query = result_filter(query, filters)
        return query
Example #12
0
    def get_tokens(self, client_id=None, user_id=None, scopes=(),
                   exclude_revoked=False, exclude_expired=False):

        tokens = self.tokens

        if exclude_expired:
            tokens = tokens.filter(AccessToken.expires_at > datetime.now())

        if exclude_revoked:
            tokens = tokens.filter(AccessToken.base_token_id == BaseToken._id)
            tokens = tokens.filter(BaseToken.revoked == 0)

        if client_id:
            tokens = tokens.filter(AccessToken.base_token_id == BaseToken._id)
            tokens = tokens.filter(BaseToken.client_id == Client.client_id)
            tokens = tokens.filter(or_(Client.client_id == client_id,
                                       Client.alias == client_id))
        if user_id:
            tokens = tokens.filter(AccessToken.base_token_id == BaseToken._id)
            tokens = tokens.filter(BaseToken.user_id == User.user_id)
            tokens = tokens.filter(or_(User.email == user_id,
                                       User.user_id == user_id))

        for scope in scopes:
            tokens = tokens.filter(AccessToken.base_token_id == BaseToken._id)
            tokens = tokens.filter(BaseToken.scope_objects.any(value=scope))

        return tokens
Example #13
0
def _secure_query(model, *columns):
    query = b.model_query(model, columns)

    if not issubclass(model, mb.MistralSecureModelBase):
        return query

    shared_res_ids = []
    res_type = RESOURCE_MAPPING.get(model, '')

    if res_type:
        shared_res = _get_accepted_resources(res_type)
        shared_res_ids = [res.resource_id for res in shared_res]

    query_criterion = sa.or_(
        model.project_id == security.get_project_id(),
        model.scope == 'public'
    )

    # NOTE(kong): Include IN_ predicate in query filter only if shared_res_ids
    # is not empty to avoid sqlalchemy SAWarning and wasting a db call.
    if shared_res_ids:
        query_criterion = sa.or_(
            query_criterion,
            model.id.in_(shared_res_ids)
        )

    query = query.filter(query_criterion)

    return query
Example #14
0
    def _check_dvr_serviceable_ports_on_host(self, context, host, subnet_ids):
        """Check for existence of dvr serviceable ports on host

        :param context: request context
        :param host: host to look ports on
        :param subnet_ids: IDs of subnets to look ports on
        :return: return True if dvr serviceable port exists on host,
                 otherwise return False
        """
        # db query will return ports for all subnets if subnet_ids is empty,
        # so need to check first
        if not subnet_ids:
            return False

        Binding = ml2_models.PortBinding
        IPAllocation = models_v2.IPAllocation
        Port = models_v2.Port

        query = context.session.query(Binding)
        query = query.join(Binding.port)
        query = query.join(Port.fixed_ips)
        query = query.filter(
            IPAllocation.subnet_id.in_(subnet_ids))
        device_filter = or_(
            models_v2.Port.device_owner.startswith(
                n_const.DEVICE_OWNER_COMPUTE_PREFIX),
            models_v2.Port.device_owner.in_(
                n_utils.get_other_dvr_serviced_device_owners()))
        query = query.filter(device_filter)
        host_filter = or_(
            ml2_models.PortBinding.host == host,
            ml2_models.PortBinding.profile.contains(host))
        query = query.filter(host_filter)
        return query.first() is not None
Example #15
0
    def whereInEquipement(self,fullQueryJoin,criteria):
        sensorObj = list(filter(lambda x:'FK_Sensor'==x['Column'], criteria))[0]
        sensor = sensorObj['Value']

        table = Base.metadata.tables['MonitoredSiteEquipment']
        joinTable = outerjoin(table,Sensor, table.c['FK_Sensor'] == Sensor.ID)

        if sensorObj['Operator'].lower() in ['is','is not'] and sensorObj['Value'].lower() == 'null':
            subSelect = select([table.c['FK_MonitoredSite']]
                ).select_from(joinTable).where(
                and_(MonitoredSite.ID== table.c['FK_MonitoredSite']
                    ,or_(table.c['EndDate'] >= func.now(),table.c['EndDate'] == None)
                        ))
            if sensorObj['Operator'].lower() == 'is':
                fullQueryJoin = fullQueryJoin.where(~exists(subSelect))
            else :
                fullQueryJoin = fullQueryJoin.where(exists(subSelect))
        else :
            subSelect = select([table.c['FK_MonitoredSite']]
                ).select_from(joinTable).where(
                and_(MonitoredSite.ID== table.c['FK_MonitoredSite']
                    ,and_(eval_.eval_binary_expr(Sensor.UnicIdentifier,sensorObj['Operator'],sensor)
                        ,or_(table.c['EndDate'] >= func.now(),table.c['EndDate'] == None))
                        ))
            fullQueryJoin = fullQueryJoin.where(exists(subSelect))
        return fullQueryJoin
Example #16
0
 def predicate(src, dst):
   return and_(
       Relationship.source_type == src.type,
       or_(Relationship.source_id == src.id, src.id == None),  # noqa
       Relationship.destination_type == dst.type,
       or_(Relationship.destination_id == dst.id, dst.id == None),  # noqa
   )
Example #17
0
def main(confdir="/etc/cslbot") -> None:
    config = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation())
    with open(path.join(confdir, 'config.cfg')) as f:
        config.read_file(f)
    session = get_session(config)()
    channel = '#tjhsst'
    type_filter = or_(Log.type == 'privmsg', Log.type == 'pubmsg', Log.type == 'action')
    users = session.query(Log.source).filter(Log.target == channel, type_filter).having(func.count(Log.id) > 500).group_by(Log.source).all()
    freq = []
    for user in users:
        lines = session.query(Log.msg).filter(Log.target == channel, Log.source == user[0],
                                              or_(Log.type == 'privmsg', Log.type == 'pubmsg', Log.type == 'action')).all()
        text = '\n'.join([x[0] for x in lines])
        with open('/tmp/foo', 'w') as f:
            f.write(text)
        try:
            output = subprocess.check_output(['zpaq', 'add', 'foo.zpaq', '/tmp/foo', '-test', '-summary', '1', '-method', '5'],
                                             stderr=subprocess.STDOUT,
                                             universal_newlines=True)
            sizes = output.splitlines()[-2]
            match = re.match(r'.*\((.*) -> .* -> (.*)\).*', sizes)
            if not match:
                raise Exception('oh no')
            before, after = match.groups()
            # 8 bits = 1 byte
            count = 1024 * 1024 * 8 * float(after) / len(text)
            freq.append((user[0], len(lines), float(after) / float(before) * 100, count))
        except subprocess.CalledProcessError as e:
            print(e.stdout)
            raise e
    with open('freq.json', 'w') as f:
        json.dump(freq, f, indent=True)
    for x in sorted(freq, key=lambda x: x[2]):
        print("%s: (%d lines) (%f%% compressed) (%f bits per char)" % x)
Example #18
0
 def update(self):
     now = datetime.datetime.now()
     permit_date = now - datetime.timedelta(days=now.weekday())
     permit_date = permit_date.replace(hour=0, minute=0, second=0)
     permit_date1 = permit_date - datetime.timedelta(days=7)
     last_submitted = self.submit.filter(Submit.submit_time < permit_date, Submit.submit_time > permit_date1)
     last_solved = (
         db.session.query(Submit.oj_name, Submit.pro_id)
         .distinct()
         .filter(
             Submit.user_id == self.id,
             Submit.submit_time < permit_date,
             Submit.submit_time > permit_date1,
             or_(Submit.result == "OK", Submit.result == "Accepted"),
         )
     )
     current_submitted = self.submit.filter(Submit.submit_time > permit_date)
     current_solved = (
         db.session.query(Submit.oj_name, Submit.pro_id)
         .distinct()
         .filter(
             Submit.user_id == self.id,
             Submit.submit_time > permit_date,
             or_(Submit.result == "OK", Submit.result == "Accepted"),
         )
     )
     self.last_week_solved = last_solved.count()
     self.last_week_submit = last_submitted.count()
     self.current_week_solved = current_solved.count()
     self.current_week_submit = current_submitted.count()
     self.update_score()
Example #19
0
    def active_users_total(self, realmlist):
        """
        get the total number of users of active tokens
        for all resolvers which are in allowed realms

        users are counted per resolver, so if resolver is in more than one
        realm, its uers will only be counted once

        :param realmlist: list of (existing and allowed) realms
        :return: number of users in allowed realms who own an active token
        """
        realm_cond = tuple()
        for realm in realmlist:
            realm_cond += (or_(Realm.name == realm),)

        user_and_resolver = Session.query(Token.LinOtpUserid,
                                          Token.LinOtpIdResolver,
                                          Token.LinOtpIdResClass,
                                          Token.LinOtpIsactive)\
            .join(TokenRealm)\
            .join(Realm)\
            .filter(or_(*realm_cond),
                    and_(Token.LinOtpIsactive == True,
                         Token.LinOtpIdResolver != ''))\
            .group_by(Token.LinOtpUserid, Token.LinOtpIdResolver,
                      Token.LinOtpIsactive, Token.LinOtpIdResClass)

        all_server_total = user_and_resolver.count()
        return all_server_total
Example #20
0
def get(key, cls, attrs=(), page=0, per_page=50, local=True):
#
# Local is a flag that determines whether we only return objects local to the
# calling key's namespace, or whether we will permit global objects with identical names
# to local objects in the response.
#
	if page and per_page:
		if key.systemwide:
			return cls.query.filter(or_(cls.key == None, cls.key == key)).paginate(page, per_page).items
		return cls.query.filter(cls.key == key).paginate(page,per_page).items

	if attrs:
		(attr, identifier) = attrs
		attribute = getattr(cls, attr)
		if attribute:
			if key.systemwide:
				item = cls.query.filter(
					or_(and_(attribute==identifier, cls.key == None),
					and_(attribute==identifier, cls.key == key))
				).all()
				if local:
					if len(item) == 1: return item[0]
					for i in item:
						if i.key == key: return i
				return item
			else:
				item = cls.query.filter(and_(attribute==identifier, cls.key == key)).first()
			return item

		raise Exception('Unrecognised attribute "%s" of %s.' % (attr, repr(cls)))

	if key.systemwide:
		return cls.query.filter(or_(cls.key == None, cls.key == key)).all()
	return cls.query.filter(cls.key == key).all()
Example #21
0
 def evidences(cls):  # pylint: disable=no-self-argument
   """Return evidences related for that instance."""
   return db.relationship(
       Evidence,
       primaryjoin=lambda: sa.or_(
           sa.and_(
               cls.id == Relationship.source_id,
               Relationship.source_type == cls.__name__,
               Relationship.destination_type == "Evidence",
           ),
           sa.and_(
               cls.id == Relationship.destination_id,
               Relationship.destination_type == cls.__name__,
               Relationship.source_type == "Evidence",
           )
       ),
       secondary=Relationship.__table__,
       secondaryjoin=lambda: sa.or_(
           sa.and_(
               Evidence.id == Relationship.source_id,
               Relationship.source_type == "Evidence",
           ),
           sa.and_(
               Evidence.id == Relationship.destination_id,
               Relationship.destination_type == "Evidence",
           )
       ),
       viewonly=True,
   )
Example #22
0
 def build_initial_query(self, trans, **kwd):
     clause_list = []
     tool_shed_repository_ids = kwd.get('tool_shed_repository_ids', None)
     if tool_shed_repository_ids:
         if isinstance(tool_shed_repository_ids, string_types):
             try:
                 # kwd['tool_shed_repository_ids'] may be a json dump of repo ids like u'['aebaa141e7243ebf']'
                 tool_shed_repository_ids = json.loads(tool_shed_repository_ids)
             except ValueError:
                 pass
         tool_shed_repository_ids = util.listify(tool_shed_repository_ids)
         for tool_shed_repository_id in tool_shed_repository_ids:
             clause_list.append(self.model_class.table.c.id == trans.security.decode_id(tool_shed_repository_id))
         if clause_list:
             return trans.install_model.context.query(self.model_class) \
                                               .filter(or_(*clause_list))
     for tool_shed_repository in trans.install_model.context.query(self.model_class) \
                                                            .filter(self.model_class.table.c.deleted == false()):
         if tool_shed_repository.status in [trans.install_model.ToolShedRepository.installation_status.NEW,
                                            trans.install_model.ToolShedRepository.installation_status.CLONING,
                                            trans.install_model.ToolShedRepository.installation_status.SETTING_TOOL_VERSIONS,
                                            trans.install_model.ToolShedRepository.installation_status.INSTALLING_TOOL_DEPENDENCIES,
                                            trans.install_model.ToolShedRepository.installation_status.LOADING_PROPRIETARY_DATATYPES]:
             clause_list.append(self.model_class.table.c.id == tool_shed_repository.id)
     if clause_list:
         return trans.install_model.context.query(self.model_class) \
                                           .filter(or_(*clause_list))
     return trans.install_model.context.query(self.model_class) \
                                       .filter(self.model_class.table.c.status == trans.install_model.ToolShedRepository.installation_status.NEW)
Example #23
0
  def get_permissions_query(model_names, permission_type='read',
                            permission_model=None):
    """Prepare the query based on the allowed contexts and resources for
     each of the required objects(models).
    """
    type_queries = []
    for model_name in model_names:
      contexts, resources = query_helpers.get_context_resource(
          model_name=model_name,
          permission_type=permission_type,
          permission_model=permission_model
      )
      if contexts is not None:
        if resources:
          resource_sql = and_(
              MysqlRecordProperty.type == model_name,
              MysqlRecordProperty.key.in_(resources))
        else:
          resource_sql = false()

        type_query = or_(
            and_(
                MysqlRecordProperty.type == model_name,
                context_query_filter(MysqlRecordProperty.context_id, contexts)
            ),
            resource_sql)
        type_queries.append(type_query)

    return and_(
        MysqlRecordProperty.type.in_(model_names),
        or_(*type_queries))
Example #24
0
def getContacts(search,baoxiang):
    start=0
    limit=30
    if search!='':
        search="%"+search+"%"
        if baoxiang!="":
            baoxiang="%"+baoxiang+"%"
            #objs = Contact.objects.filter((Q(hetongbh__icontains=search) | Q(yiqibh__icontains=search)) & Q(baoxiang=baoxiang)).order_by('-yujifahuo_date')[start:start+limit]
            objs=session.query(PartsContact).filter(
                    and_(
                        or_(PartsContact.hetongbh.like(search),PartsContact.yiqibh.like(search)),
                        PartsContact.baoxiang.like(baoxiang)
                    )
                ).order_by(desc(PartsContact.yujifahuo_date))#[start:start+limit]
        else:
             objs=session.query(PartsContact).filter(
                        or_(PartsContact.hetongbh.like(search),PartsContact.yiqibh.like(search))
                ).order_by(desc(PartsContact.yujifahuo_date))#[start:start+limit] # Contact.objects.filter(Q(hetongbh__icontains=search) | Q(yiqibh__icontains=search)).order_by('-yujifahuo_date')[start:start+limit]
    else:
        if baoxiang!="":
            baoxiang="%"+baoxiang+"%"
            objs=session.query(PartsContact).filter(
                        PartsContact.baoxiang.like(baoxiang)
                ).order_by(desc(PartsContact.yujifahuo_date))#[start:start+limit]
        else:
            objs=session.query(PartsContact).order_by(desc(PartsContact.yujifahuo_date))#[start:start+limit]
    return objs
Example #25
0
def get_loan_info(db, movie_id, volume_id=None, collection_id=None):
	"""Returns current collection/volume/movie loan data"""
	from sqlalchemy import and_, or_
	movie = db.Movie.get_by(movie_id=movie_id)
	if movie is None:
		return False
	
	# fix or add volume/collection data:
	if movie.collection_id is not None:
		collection_id = movie.collection_id
	if movie.volume_id is not None:
		volume_id = movie.volume_id
	
	if collection_id>0 and volume_id>0:
		return db.Loan.get_by(
				and_(or_(db.Loan.c.collection_id==collection_id,
						db.Loan.c.volume_id==volume_id,
						db.Loan.c.movie_id==movie_id),
					db.Loan.c.return_date==None))
	elif collection_id>0:
		return db.Loan.get_by(
				and_(or_(db.Loan.c.collection_id==collection_id,
						db.Loan.c.movie_id==movie_id)),
					db.Loan.c.return_date==None)
	elif volume_id>0:
		return db.Loan.get_by(and_(or_(db.Loan.c.volume_id==volume_id,
							db.Loan.c.movie_id==movie_id)),
						db.Loan.c.return_date==None)
	else:
		return db.Loan.get_by(db.Loan.c.movie_id==movie_id,db.Loan.c.return_date==None)
    def filter_single_by_time(cls, type, objects, year=None, week_number=None, day=None):

        assert (week_number and year) or day
        start_date, end_date = None, None

        if year and week_number:
            start_date, end_date = get_start_and_end_date_from_week_and_year(
                year,
                week_number
            )
        if day:
            start_date = day
            end_date = day

        objects = objects.filter(
            or_(
                tuple_(
                    cast(type.start_time, Date), cast(type.end_time, Date)
                ).op('overlaps')(
                    tuple_(
                        start_date, end_date
                    )
                ),
                or_(
                    # First range ends on the start date of the second
                    cast(type.end_time, Date) == start_date,
                    # Second range ends on the start date of the first
                    end_date == cast(type.start_time, Date)
                )
            )
        )

        return objects
Example #27
0
def item_search(classes=None, bodygroups=None, equip_regions=None, item_name=None):
    items_query = TF2Item.query.filter_by(inactive=False)
    wildcards = ["%", "_"]
    if item_name:
        if any([w in item_name for w in wildcards]):
            return
        items_query = items_query.filter(TF2Item.item_name.contains(item_name))
    if len(classes) > 0:
        for class_name in classes:
            items_query = items_query.filter(TF2Item.class_model.any(TF2ClassModel.class_name == class_name))
        sq = db.session.query(TF2ClassModel.defindex, func.count(TF2ClassModel).label("class_count")).group_by(TF2ClassModel.defindex).subquery()
        items_query = items_query.join(sq, TF2Item.defindex == sq.c.defindex)
        if len(classes) == 9:
            pass
        elif len(classes) > 1:
            items_query = items_query.filter(sq.c.class_count > 1).filter(sq.c.class_count < 9)
        elif len(classes) == 1:
            items_query = items_query.filter(sq.c.class_count == 1)
    else:
        return
    if equip_regions:
        items_query = items_query.filter(TF2Item.equip_regions.any(or_(*[TF2EquipRegion.equip_region == equip_region for equip_region in equip_regions])))
    if bodygroups:
        items_query = items_query.filter(TF2Item.bodygroups.any(or_(*[TF2BodyGroup.bodygroup == bodygroup for bodygroup in bodygroups])))
        bodygroup_count = db.session.query(schema_bodygroup.c.defindex, func.count('*').label("bg_count")).group_by(schema_bodygroup.c.defindex).subquery()
        items_query = items_query.join(bodygroup_count, TF2Item.defindex == bodygroup_count.c.defindex).filter(bodygroup_count.c.bg_count == len(bodygroups))
    else:
        items_query = items_query.filter(TF2Item.bodygroups == None)
    return items_query
Example #28
0
 def comments(cls):  # pylint: disable=no-self-argument
   """Comments related to self via Relationship table."""
   return db.relationship(
       Comment,
       primaryjoin=lambda: sa.or_(
           sa.and_(
               cls.id == Relationship.source_id,
               Relationship.source_type == cls.__name__,
               Relationship.destination_type == "Comment",
           ),
           sa.and_(
               cls.id == Relationship.destination_id,
               Relationship.destination_type == cls.__name__,
               Relationship.source_type == "Comment",
           )
       ),
       secondary=Relationship.__table__,
       secondaryjoin=lambda: sa.or_(
           sa.and_(
               Comment.id == Relationship.source_id,
               Relationship.source_type == "Comment",
           ),
           sa.and_(
               Comment.id == Relationship.destination_id,
               Relationship.destination_type == "Comment",
           )
       ),
       viewonly=True,
   )
    def filter_repeating_by_time(cls, type, objects, year=None, week_number=None, day=None):

        assert (week_number and year) or day
        start_date, end_date = None, None

        if year and week_number:
            start_date, end_date = get_start_and_end_date_from_week_and_year(
                year,
                week_number
            )
        if day:
            start_date = day
            end_date = day
            objects = objects.filter(
                type.week_day == day.isoweekday()
            )

        objects = objects.filter(
            or_(
                tuple_(
                    type.start_date, type.end_date
                ).op('overlaps')(
                    tuple_(
                        start_date, end_date
                    )
                ),
                or_(
                    # First range ends on the start date of the second
                    type.end_date == start_date,
                    # Second range ends on the start date of the first
                    end_date == type.start_date
                )
            )
        )
        return objects
Example #30
0
 def removeMinimum(self, m, commit=True):
     """remove a minimum from the database
     
     Remove a minimum and any objects (TransitionState or Distance) 
     pointing to that minimum.
     """
     #delete any distance objects pointing to min2
     candidates = self.session.query(Distance).\
         filter(or_(Distance.minimum1 == m, 
                    Distance.minimum2 == m))
     candidates = list(candidates)
     for d in candidates:
         self.session.delete(d)
         
     #delete any transition states objects pointing to min2
     candidates = self.session.query(TransitionState).\
         filter(or_(TransitionState.minimum1 == m, 
                    TransitionState.minimum2 == m))
     candidates = list(candidates)
     for ts in candidates:
         self.on_ts_removed(ts)
         self.session.delete(ts)
     
     self.on_minimum_removed(m)
     #delete the minimum
     self.session.delete(m)
     if commit:
         self.session.commit()
            now.strftime('%Y-%m-%d %H:%M'))
        mess += """
        <br>
        <table border='1' cellpadding="0" cellspacing="0" ><tr><td>исполнитель</td><td>задачи</td></tr>
        """
        ids = {}
        threads = [
            (th.id, th.fio_perf, th.id_global)
            for th in dbconn.query(Threads).filter(
                Threads.performer_id.in_([user_id, common_perf[title]])
            ).filter(Threads.status == Status.text).filter(
                Status.id == 1).filter(Threads.title_id == Tasktype.id).
            filter(Tasktype.system_id == 0).filter(
                sqlalchemy.or_(
                    Threads.creation_date < delta.strftime('%Y-%m-%d'),
                    sqlalchemy.and_(
                        Threads.creation_date == delta.strftime('%Y-%m-%d'),
                        Threads.creation_time < delta.strftime('%H:%M:%S'))))
        ]
        if threads:
            for (id, fio, glob) in threads:
                link = "<a href='http://helpdesk.renet.ru/hd/thread?id=%s'>%s</a>" % (
                    id, glob)
                if fio in ids:
                    ids[fio].append(link)
                else:
                    ids[fio] = [link]

            for fio, task_ids in ids.items():
                mess += """
                        <tr><td>%s</td><td>%s</td></tr>
Example #32
0
    def test_snapshot_update(self):
        """Test snapshot update with a simple change"""
        program = self.create_object(models.Program,
                                     {"title": "Test Program Snapshot 1"})
        control = self.create_object(models.Control,
                                     {"title": "Test Control Snapshot 1"})

        self.create_mapping(program, control)

        control = self.refresh_object(control)

        self.api.modify_object(control,
                               {"title": "Test Control Snapshot 1 EDIT 1"})

        self.create_audit(program)

        audit = db.session.query(models.Audit).filter(
            models.Audit.title.like("%Snapshotable audit%")).one()

        control_snapshot = db.session.query(models.Snapshot).filter(
            models.Snapshot.child_id == control.id,
            models.Snapshot.child_type == "Control",
            models.Snapshot.parent_type == "Audit",
            models.Snapshot.parent_id == audit.id)

        self.assertEqual(control_snapshot.count(), 1)
        self.assertEqual(control_snapshot.first().revision.content["title"],
                         "Test Control Snapshot 1 EDIT 1")

        # Create a new objective, add it to program and edit control to detect
        # update.
        # Map the objective to the control to check snapshot-to-snapshot mappings.

        objective = self.create_object(
            models.Objective, {"title": "Test Objective Snapshot UNEDITED"})
        self.create_mapping(program, objective)
        self.create_mapping(objective, control)

        control = self.refresh_object(control)
        self.api.modify_object(
            control,
            {"title": "Test Control Snapshot 1 Edit 2 AFTER initial snapshot"})

        audit = self.refresh_object(audit)
        # Initiate update operation
        self.api.modify_object(audit, {"snapshots": {"operation": "upsert"}})

        objective_snapshot_query = db.session.query(models.Snapshot).filter(
            models.Snapshot.child_type == "Objective",
            models.Snapshot.child_id == objective.id,
            models.Snapshot.parent_type == "Audit",
            models.Snapshot.parent_id == audit.id)
        self.assertEqual(objective_snapshot_query.count(), 1)
        objective_snapshot = objective_snapshot_query.first()
        self.assertEqual(objective_snapshot.revision.content["title"],
                         "Test Objective Snapshot UNEDITED")

        control_snapshot_query = db.session.query(models.Snapshot).filter(
            models.Snapshot.child_type == "Control",
            models.Snapshot.child_id == control.id,
            models.Snapshot.parent_type == "Audit",
            models.Snapshot.parent_id == audit.id)
        self.assertEqual(control_snapshot_query.count(), 1)
        control_snapshot = control_snapshot_query.first()
        self.assertEqual(
            control_snapshot.revision.content["title"],
            "Test Control Snapshot 1 Edit 2 AFTER initial snapshot")

        snapshot_mapping = db.session.query(models.Relationship).filter(
            models.Relationship.source_type == "Snapshot",
            models.Relationship.destination_type == "Snapshot",
            sa.or_(
                sa.and_(
                    models.Relationship.source_id == control_snapshot.id,
                    models.Relationship.destination_id ==
                    objective_snapshot.id,
                ),
                sa.and_(
                    models.Relationship.source_id == objective_snapshot.id,
                    models.Relationship.destination_id == control_snapshot.id,
                ),
            ),
        )
        self.assertEqual(snapshot_mapping.count(), 1)

        control_revisions = db.session.query(models.Revision).filter(
            models.Revision.resource_type == control.type,
            models.Revision.resource_id == control.id)
        # 2 revisions are from the initial creation, and 2 are from edits.
        self.assertEqual(control_revisions.count(), 4)

        self.assertEqual(
            control_revisions.order_by(models.Revision.id.desc()).first().id,
            control_snapshot.revision_id,
        )
Example #33
0
    print('name: %s  sex: %s sid: %s' % (stu.stu_name,stu.stu_sex,stu.stu_id))
print('-' * 30)
query8 = session.query(Stuinfo).filter(~Stuinfo.stu_name.in_(['小红','张三']))
for stu in query8:
    print(stu)
    print('name: %s  sex: %s sid: %s' % (stu.stu_name,stu.stu_sex,stu.stu_id))
print('-' * 30)
from sqlalchemy import  and_,or_
query10 = session.query(Stuinfo)\
    .filter(and_(Stuinfo.stu_id>=1,Stuinfo.stu_id<4))
for stu in query10:
    print(stu)
    print('name: %s  sex: %s sid: %s' % (stu.stu_name,stu.stu_sex,stu.stu_id))
print('-' * 30)
query11 = session.query(Stuinfo)\
    .filter(or_(Stuinfo.stu_id<=1,Stuinfo.stu_id>4))
for stu in query11:
    print(stu)
    print('name: %s  sex: %s sid: %s' % (stu.stu_name,stu.stu_sex,stu.stu_id))
print('-' * 30)
query12 = session.query(Stuinfo)
print(query12.all())
# for stu in query12.all():
#     print(stu)
#     print('name: %s  sex: %s sid: %s' % (stu.stu_name, stu.stu_sex, stu.stu_id))
print(query12.first())
print('-' * 30)
query13 = session.query(Stuinfo.stu_id,Stuinfo.stu_name).filter(Stuinfo.stu_id==4)
print(query13.one())
print(query13.scalar())
print('-' * 30)
Example #34
0
def dequeue(queueName, userId, visibility_timeout=None, session=None):
    """
    Dequeue subject to queue configuration (max_outstanding_message and vis timeout).
    If queue's max_outstanding_messages > 0 then messages are hidden and returned with a receipt handle rather than deleting the message.

    :param queueName:
    :param userId:
    :param session:
    :return:
    """
    if not session:
        session = db.Session

    ret = {}

    # Is it cached?
    cached_record = config_cache().lookup(key=(userId, queueName))
    if cached_record:
        outstanding_count_setting = cached_record["max_outstanding_messages"]
    else:
        metarecord = (
            session.query(QueueMeta)
            .filter_by(queueName=queueName, userId=userId)
            .first()
        )
        config_cache().cache_it(
            key=(userId, queueName), obj=copy.deepcopy(_to_dict(metarecord))
        )
        outstanding_count_setting = metarecord.max_outstanding_messages
        metarecord = None

    if outstanding_count_setting < 0:
        metarecord = (
            session.query(QueueMeta)
            .filter_by(queueName=queueName, userId=userId)
            .first()
        )
        result = (
            session.query(Queue)
            .with_for_update(of=Queue)
            .filter_by(queueName=queueName, userId=userId, popped=False)
            .order_by(desc(Queue.priority))
            .order_by(asc(Queue.queueId))
            .first()
        )

        if result:
            result.update({"popped": True})
            dbobj = _to_dict(result)
            ret.update(dbobj)
            ret["data"] = json.loads(dbobj["data"])
            session.delete(result)

            # Only update the count if returning a message
            if ret:
                rcount = (
                    session.query(Queue)
                    .filter_by(queueName=queueName, userId=userId, popped=False)
                    .count()
                )
                metarecord.update({"qlen": rcount})
    else:
        # Flush the record from the session and memory. Then reload it with a lock
        # Refetch with lock
        metarecord = (
            session.query(QueueMeta)
            .with_for_update(of=QueueMeta)
            .filter_by(queueName=queueName, userId=userId)
            .first()
        )

        # Limits are configured on this queue, do appropriate checks
        if _not_visible_msg_count(queueName, userId, session) < int(
            metarecord.max_outstanding_messages
        ):
            # Will select any unpopped or popped-but-expired messages
            result = (
                session.query(Queue)
                .with_for_update(of=Queue)
                .filter_by(queueName=queueName, userId=userId)
                .filter(
                    or_(
                        and_(
                            Queue.visible_at <= datetime.datetime.utcnow(),
                            Queue.popped == True,
                        ),
                        Queue.popped == False,
                    )
                )
                .order_by(desc(Queue.priority))
                .order_by(asc(Queue.queueId))
                .first()
            )

            if result:
                if visibility_timeout is None:
                    visibility_timeout = metarecord.visibility_timeout

                result.update(
                    {
                        "popped": True,
                        "receipt_handle": uuid.uuid4().hex,
                        "visible_at": datetime.datetime.utcnow()
                        + datetime.timedelta(seconds=visibility_timeout),
                    }
                )
                dbobj = _to_dict(result)
                ret.update(dbobj)
                ret["data"] = json.loads(dbobj["data"])

            # Don't update qlen until the message is deleted from the queue with an explicit delete operation
        else:
            # Threshold of outstanding messages exceeded.
            pass

    return ret
Example #35
0
def class2_start_end(class1, class2, class3, start_date, end_date):
    """Return filtered data between start and end date and filter by class."""

    # dates appear in the format : 2012-08-01

    if len(start_date) < 10:
        start_date = start_date + "-01"

    if len(end_date) < 10:
        end_date = end_date + "-31"

    # classification appears in this format : Class I

    if class1 == 'true' and class2 == 'true' and class3 == 'true':
        start_end_class_recalls = db.session.query(food_db).\
            filter(food_db.recall_date >= start_date).\
            filter(food_db.recall_date <= end_date).\
            statement

        df = pd.read_sql_query(start_end_class_recalls, db.session.bind)
        recalls_class_list = df.to_json(orient='records')

        return recalls_class_list

    elif class1 == 'true' and class2 == 'true':
        start_end_class_recalls = db.session.query(food_db).\
            filter(food_db.recall_date >= start_date).\
            filter(food_db.recall_date <= end_date).\
            filter(or_(food_db.classification == "Class I", food_db.classification == "Class II")).\
            statement

        df = pd.read_sql_query(start_end_class_recalls, db.session.bind)
        recalls_class_list = df.to_json(orient='records')

        return recalls_class_list

    elif class1 == 'true' and class3 == 'true':
        start_end_class_recalls = db.session.query(food_db).\
            filter(food_db.recall_date >= start_date).\
            filter(food_db.recall_date <= end_date).\
            filter(or_(food_db.classification == "Class I", food_db.classification == "Class III")).\
            statement

        df = pd.read_sql_query(start_end_class_recalls, db.session.bind)
        recalls_class_list = df.to_json(orient='records')

        return recalls_class_list

    elif class2 == 'true' and class3 == 'true':
        start_end_class_recalls = db.session.query(food_db).\
            filter(food_db.recall_date >= start_date).\
            filter(food_db.recall_date <= end_date).\
            filter(or_(food_db.classification == "Class II", food_db.classification == "Class III")).\
            statement

        df = pd.read_sql_query(start_end_class_recalls, db.session.bind)
        recalls_class_list = df.to_json(orient='records')

        return recalls_class_list

    elif class1 == 'true':
        start_end_class_recalls = db.session.query(food_db).\
            filter(food_db.recall_date >= start_date).\
            filter(food_db.recall_date <= end_date).\
            filter(food_db.classification == "Class I").\
            statement

        df = pd.read_sql_query(start_end_class_recalls, db.session.bind)
        recalls_class_list = df.to_json(orient='records')

        return recalls_class_list

    elif class2 == 'true':
        start_end_class_recalls = db.session.query(food_db).\
            filter(food_db.recall_date >= start_date).\
            filter(food_db.recall_date <= end_date).\
            filter(food_db.classification == "Class II").\
            statement

        df = pd.read_sql_query(start_end_class_recalls, db.session.bind)
        recalls_class_list = df.to_json(orient='records')

        return recalls_class_list

    elif class3 == 'true':
        start_end_class_recalls = db.session.query(food_db).\
            filter(food_db.recall_date >= start_date).\
            filter(food_db.recall_date <= end_date).\
            filter(food_db.classification == "Class III").\
            statement

        df = pd.read_sql_query(start_end_class_recalls, db.session.bind)
        recalls_class_list = df.to_json(orient='records')

        return recalls_class_list

    else:
        # Use Pandas to perform the sql query
        start_end_class_recalls = db.session.query(food_db).\
            filter(food_db.recall_date >= start_date).\
            filter(food_db.recall_date <= end_date).\
            statement

        df = pd.read_sql_query(start_end_class_recalls, db.session.bind)
        recalls_class_list = df.to_json(orient='records')

        return recalls_class_list
# like
session.query(User).filter(User.name.like("%张%"))
# in
session.query(User).filter(User.id.in_([1, 2, 3]))
# not in
session.query(User).filter(~User.id.in_([1, 2, 3]))
# is null
session.query(User).filter(User.age == None)
# is not null
session.query(User).filter(User.age != None)
# and
from sqlalchemy import and_
session.query(User).filter(and_(User.id == 1, User.name == '张三'))
# or
from sqlalchemy import or_
session.query(User).filter(or_(User.id == 1, User.name == '张三'))
# limit
session.query(User).offset(2).limit(3).all()

# =========
from sqlalchemy import func


def test_func():
    session = Session()

    result = session.query(func.count(User.id)).scalar()
    print("count:", result)
    result = session.query(func.max(User.id)).scalar()
    print(result)
    result = session.query(func.min(User.id)).scalar()
Example #37
0
    def recentThreeItems(self):
        item = []
        for u in price.query.filter(or_(Price.symbol == 'SP', Price.symbol == 'BTC', Price.symbol == 'BINDEX')).order_by(Price.date.desc()).limit(2):
            item.append(u.__dict__)

        return item
Example #38
0
def demarrer_scanner(hWaitStop=None, batch=None):
    logginghashscan.info('Starting an Hash scanner instance : ' + threadname)

    print ''
    print '\tPlease log in to launch scan'
    print ''
    username = raw_input('Username: '******'Password: '******''

    # Get user
    u = session.query(User).filter_by(username=username).first()

    # No user or bad password
    if not u or hashPassword(password) != u.password:
        logginghashscan.critical(
            'Username or password incorrect, stopping the initialization...')
        raw_input()
        return

    # Get KEY and decrypt MASTER_KEY
    keyFromPassword = crypto.keyFromText(password,
                                         base64.b64decode(u.b64_kdf_salt))
    MASTER_KEY = crypto.decrypt(u.encrypted_master_key, keyFromPassword)

    mk_cksum = session.query(GlobalConfig).filter_by(
        key='master_key_checksum').first()

    # No checksum in config ???
    if not mk_cksum:
        logginghashscan.critical(
            'Database is broken, please create a new one, stopping the initialization...'
        )
        del MASTER_KEY
        raw_input()
        return

    # Someone has been playing with the database !
    if checksum(MASTER_KEY) != mk_cksum.value:
        logginghashscan.critical(
            'MASTER_KEY may have been altered, stopping the initialization...')
        del MASTER_KEY
        raw_input()
        return

    logginghashscan.info('Login successful !')
    # INITIALIZATION

    # TODO : initialise all IOCs in DB, then link them to CP

    all_xmliocs = session.query(XMLIOC).order_by(XMLIOC.name.asc())
    all_cp = session.query(ConfigurationProfile).order_by(
        ConfigurationProfile.name.asc())

    ioc_by_cp = {}
    for cp in all_cp:
        if cp.ioc_list == '':
            logginghashscan.warning('No IOC defined for profile "%s"' %
                                    cp.name)
            continue

        ioc_by_cp[cp.id] = []
        for e in cp.ioc_list.split(','):
            ioc_by_cp[cp.id].append(int(e))

    tree_by_ioc = {}

    # Retrieves evaluators for current mode
    FLAT_MODE = (IOC_MODE == 'flat')
    allowedElements = {}
    evaluatorList = hash_modules.flatEvaluatorList if FLAT_MODE else hash_modules.logicEvaluatorList

    for name, classname in evaluatorList.items():
        allowedElements[name] = classname.evalList

    # Parse XML Ioc into IOC trees according to what we can do
    for xmlioc in all_xmliocs:

        content = base64.b64decode(xmlioc.xml_content)
        oip = openiocparser.OpenIOCParser(content,
                                          allowedElements,
                                          FLAT_MODE,
                                          fromString=True)
        oip.parse()
        iocTree = oip.getTree()

        # Trees may be stripped from non valid elements
        if iocTree is not None:
            tree_by_ioc[xmlioc.id] = {'name': xmlioc.name, 'tree': iocTree}

    # Each configuration profile has a set of trees
    tree_by_cp = {
        cpid: {i: tree_by_ioc[i]
               for i in ioclist}
        for (cpid, ioclist) in ioc_by_cp.items()
    }

    halt = False
    tache = None
    batchquery = None

    # Batch filtering
    if batch is not None:
        logginghashscan.info('Filtering for batch "%s"' % batch)
        batchquery = session.query(Batch).filter(Batch.name == batch).first()

        if batchquery is None:
            logginghashscan.error('Unknown batch "%s" ...' % batch)
            halt = True

    # LAUNCH
    # Main loop
    while not halt:
        try:

            # Get targets to be scanned
            # and that are not currently being scanned
            # or that don't have any retry left
            queue = session.query(Task).filter_by(
                hashscanned=False, reserved_ioc=False,
                reserved_hash=False).filter(Task.retries_left_hash > 0)

            # Batch filtering
            if batchquery is not None:
                queue = queue.filter_by(batch_id=batchquery.id)

            taille_queue = queue.count()

            # Compute the time after which targets are still recovering from last scan
            # Gets target which last retry is NULL or before that time
            limite_a_reessayer = datetime.datetime.now() - datetime.timedelta(
                0, SECONDES_ENTRE_TENTATIVES)
            a_scanner = queue.filter(
                or_(Task.last_retry_hash <= limite_a_reessayer,
                    Task.last_retry_hash == None))
            taille_a_scanner = a_scanner.count()

            # Reads this list
            while taille_a_scanner > 0:

                # Max priority
                priorite_max = a_scanner.order_by(
                    Task.priority_hash.desc()).first().priority_hash
                taches_priorite_max = a_scanner.filter(
                    Task.priority_hash == priorite_max)
                nbre_taches_priorite_max = taches_priorite_max.count()
                if BASE_DE_DONNEES_QUEUE.startswith('sqlite'):
                    tache = taches_priorite_max.order_by(func.random()).first()
                else:
                    tache = taches_priorite_max.order_by(func.newid()).first()

                # Mutex on the task
                tache.reserved_hash = True
                tache.date_debut = datetime.datetime.now()
                session.commit()

                logginghashscan.debug(
                    '==============================================================================='
                )
                logginghashscan.debug('Wake up, there is work to do !')
                logginghashscan.info('Queue size : ' + str(taille_queue) +
                                     ', including ' + str(taille_a_scanner) +
                                     ' to scan, including ' +
                                     str(nbre_taches_priorite_max) +
                                     ' at top priority (' + str(priorite_max) +
                                     ')')

                logginghashscan.debug('  --------------------------------')
                logginghashscan.info('         Starting Hash Scan')
                logginghashscan.info('        Target : ' + str(tache.ip))
                logginghashscan.debug('  --------------------------------')

                # Recover Windows Credential and Configuration Profile from Batch
                batch = session.query(Batch).filter_by(
                    id=tache.batch_id).first()
                wc = session.query(WindowsCredential).filter_by(
                    id=batch.windows_credential_id).first()
                cp = session.query(ConfigurationProfile).filter_by(
                    id=batch.configuration_profile_id).first()

                if not wc:
                    raise Exception('WindowsCredential %d does not exist' %
                                    tache.windows_credential_id)

                if not cp:
                    raise Exception('ConfigurationProfile %d does not exist' %
                                    tache.configuration_profile_id)

                # Decrypt password using MASTER_KEY and create target object
                targetPassword = crypto.decrypt(wc.encrypted_password,
                                                MASTER_KEY)
                targetObject = {
                    'ip': tache.ip,
                    'login': wc.login,
                    'password': targetPassword,
                    'domain': wc.domain,
                }

                # If high confidentiality is enabled, create local directory if needed
                if cp.host_confidential:
                    logginghashscan.info('"High confidentiality" mode enabled')
                    testdir = os.path.join(IOC_COMPONENT_ROOT,
                                           IOC_CONFIDENTIAL_DIRECTORY)
                    if not os.path.isdir(testdir):
                        logginghashscan.info(
                            'Creating confidential directory %s' % testdir)
                        os.makedirs(testdir)

                # Let the scan begin
                if cp.id in tree_by_cp.keys():
                    resultats_scan = scan(targetObject, tree_by_cp[cp.id],
                                          cp.host_confidential)
                else:
                    logginghashscan.warning('No IOC to scan (profile=%s)' %
                                            cp.name)
                    resultats_scan = {}

                # Analyze the results
                analyse(resultats_scan, tache)

                # Update queue size
                taille_a_scanner = a_scanner.count()

                try:
                    # If launched as a service (probably removed soon, TODO)
                    halt = (win32event.WaitForSingleObject(
                        hWaitStop, 2000) == win32event.WAIT_OBJECT_0)
                except:
                    pass
                if halt:
                    # Stop signal encountered
                    break

            if halt:
                logginghashscan.info('Stopping Hash scanner : ' + threadname)
                break
            logginghashscan.debug('(Hash scanner sleeping for ' + str(SLEEP) + ' seconds...)' \
                + (' (' + str(taille_queue) + ' waiting)' if taille_queue > 0 else ''))
            time.sleep(SLEEP)
        except KeyboardInterrupt:
            halt = True
        except Exception, e:
            logginghashscan.error('Exception caught : %s, %s, %s' %
                                  (repr(e), str(e.message), str(e)))

            # Cancel changes and unreserve task
            session.rollback()
            if tache is not None:
                tache.reserved_hash = False
                tache.retries_left_hash = max(0, tache.retries_left_hash - 1)
            session.commit()
Example #39
0
    def episode_lookup(session=None, only_cached=False, **lookup_params):
        series_name = lookup_params.get('series_name') or lookup_params.get(
            'title')
        show_id = lookup_params.get('tvmaze_id') or lookup_params.get(
            'tvdb_id')
        lookup_type = lookup_params.get('series_id_type')

        season_number = lookup_params.get('series_season')
        episode_number = lookup_params.get('series_episode')

        episode_date = lookup_params.get('series_date')

        # Verify we have enough parameters for search
        if not any([series_name, show_id]):
            raise LookupError('Not enough parameters to lookup episode')
        if lookup_type == 'sequence':
            raise LookupError('TVMaze does not support sequence type searches')
        if lookup_type == 'ep' and not all([season_number, episode_number]):
            raise LookupError('Not enough parameters to lookup episode')
        elif lookup_type == 'date' and not episode_date:
            raise LookupError('Not enough parameters to lookup episode')

        # Get series
        series = APITVMaze.series_lookup(session=session,
                                         only_cached=only_cached,
                                         **lookup_params)
        if not series:
            raise LookupError(
                'Could not find series with the following parameters: {0}'.
                format(lookup_params))

        # See if episode already exists in cache
        log.debug('searching for episode of show {0} in cache'.format(
            series.name))
        episode = session.query(TVMazeEpisodes).filter(
            and_(TVMazeEpisodes.series_id == series.tvmaze_id,
                 TVMazeEpisodes.season_number == season_number,
                 TVMazeEpisodes.number == episode_number)).one_or_none()

        # Logic for cache only mode
        if only_cached:
            if episode:
                log.debug(
                    'forcing cache for episode id {3}, number{0}, season {1} for show {2}'
                    .format(episode.number, episode.season_number, series.name,
                            episode.tvmaze_id))
                return episode
        if episode and not episode.expired:
            log.debug(
                'found episode id {3}, number {0}, season {1} for show {2} in cache'
                .format(episode.number, episode.season_number, series.name,
                        episode.tvmaze_id))

            return episode

        # Lookup episode via its type (number or airdate)
        if lookup_type == 'date':
            episode_date = datetime.strftime(episode_date, '%Y-%m-%d')
            tvmaze_episode = get_episode(series.tvmaze_id,
                                         date=episode_date)[0]
        else:
            # TODO will this match all series_id types?
            log.debug(
                'fetching episode {0} season {1} for series_id {2} for tvmaze'.
                format(episode_number, season_number, series.tvmaze_id))
            tvmaze_episode = get_episode(series.tvmaze_id,
                                         season=season_number,
                                         number=episode_number)
        # See if episode exists in DB
        try:
            episode = session.query(TVMazeEpisodes).filter(
                or_(
                    TVMazeEpisodes.tvmaze_id == tvmaze_episode['id'],
                    and_(
                        TVMazeEpisodes.number == tvmaze_episode['number'],
                        TVMazeEpisodes.season_number ==
                        tvmaze_episode['season'], TVMazeEpisodes.series_id ==
                        series.tvmaze_id))).one_or_none()
        except MultipleResultsFound:
            # TVMaze must have f****d up and now we have to clean up that mess. Delete any row for this season
            # that hasn't been updated in the last hour. Can't trust any of the cached data, but deleting new data
            # might have some unintended consequences.
            log.warning(
                'Episode lookup in cache returned multiple results. Deleting the cached data.'
            )
            deleted_rows = session.query(TVMazeEpisodes).filter(
                and_(TVMazeEpisodes.season_number == tvmaze_episode['season'],
                     TVMazeEpisodes.series_id == series.tvmaze_id)).filter(
                         TVMazeEpisodes.last_update <= datetime.now() -
                         timedelta(hours=1)).delete()
            log.debug('Deleted %s rows', deleted_rows)
            episode = None

        if episode:
            log.debug(
                'found expired episode {0} in cache, refreshing data.'.format(
                    episode.tvmaze_id))
            episode.update(tvmaze_episode)
        else:
            log.debug('creating new episode for show {0}'.format(series.name))
            episode = TVMazeEpisodes(tvmaze_episode, series.tvmaze_id)
            session.add(episode)

        return episode
Example #40
0
    def get_query(cls, model, info, **args):
        from sqlalchemy import or_
        from sqlalchemy.orm import load_only
        query = super(FilteringConnectionField, cls).get_query(model, info)
        from sqlalchemy.orm import joinedload
        distinct_filter = False  # default value for distinct
        op = 'eq'
        jsonkey_input = None
        ALLOWED_OPS = ['gt', 'lt', 'le', 'ge', 'eq', 'ne',
                       '=',  '>',  '<',  '>=', '<=', '!=']

        cont_fields = ['edges', 'node']
        skip_fields = ['totalCount', 'pageInfo']
        fields = info.field_asts# [0].selection_set.selections
        load_fields = {}
        field_names = []

        def convert(name):
            import re
            s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
            return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()

        include_all = False
        while isinstance(fields, list):
            for field in fields:
                name = field.name.value
                if name in cont_fields:
                    fields = field.selection_set.selections
                elif name in skip_fields:# or name[0].isupper():
                    continue
                else:
                    if name[0].isupper():  # hybrid property
                        names = models.hybrid_prop_parameters(name)
                        if 'all' in names:
                            include_all = True
                            fields = None
                            continue
                    else:
                        names = [name]
                    for name in names:
                        if field.selection_set is not None:
                            keyname = name
                            field_names.append(name)
                            load_fields.update({keyname: []})
                            fields = field.selection_set.selections
                        else:
                            load_fields[keyname].append(convert(name))
                            fields = None

        if not include_all:
            query = query.options(load_only(*load_fields[field_names[0]]))

            if len(field_names) > 1:
                column = getattr(model, convert(field_names[1]), None)
                query = query.options(joinedload(column, innerjoin=True).load_only(*load_fields[field_names[1]]))

        for field, value in args.items():
            if field == 'distinct':
                distinct_filter = value
            elif field == 'op':
                if value in ALLOWED_OPS:
                    op = value
            elif field == 'jsonkey':
                jsonkey_input = value
            elif field == 'order':
                ascending = not value.startswith('-')
                column_name = value if not value.startswith('-') else value[1:]
                column = getattr(model, convert(column_name), None)

                if ascending:
                    query = query.order_by(column)
                else:
                    query = query.order_by(column.desc())

        for field, value in args.items():
            if field not in (cls.RELAY_ARGS + cls.SPECIAL_ARGS):
                from sqlalchemy.sql.expression import func, cast
                jsonb = False
                jsonkey = None
                if '__' in field:
                    field, jsonkey = field.split('__')
                elif '->' in str(value):
                    jsonkey, value = value.split('->')
                if jsonkey is None:
                    jsonkey = jsonkey_input

                column = getattr(model, field, None)

                if str(column.type) == "TSVECTOR":
                    query = query.filter(column.match("'{}'".format(value)))

                elif str(column.type) == "JSONB":
                    jsonb = True

                    if jsonkey is not None:
                        query = query.filter(column.has_key(jsonkey))
                        column = column[jsonkey].astext
                    values = value.split('+')

                    for value in values:
                        value = value.strip()
                        if value.startswith("~"):
                            column = cast(column, sqlalchemy.String)
                            # if field == 'reactants' or field == 'products':
                            #    column = func.replace(func.replace(column, 'gas', ''), 'star', '')

                            search_string = '%' + value[1:] + '%'

                            if not value == "~":
                                query = query.filter(
                                    column.ilike(search_string))
                            # else:
                            #    query = query.group_by(column)

                        else:
                            if field == 'reactants' or field == 'products':
                                if not 'star' in value and not 'gas' in value:
                                    or_statement = or_(column.has_key(value),
                                                       column.has_key(value +
                                                                      'gas'),
                                                       column.has_key(value +
                                                                      'star'))

                                    query = query.filter(or_statement)
                                else:
                                    query = query.filter(column.has_key(value))
                            else:
                                if jsonkey is not None:
                                    query = query.filter(column == value)
                                else:
                                    query = query.filter(column.has_key(value))

                    # if distinct_filter:
                        # TO DO: SELECT DISTINCT jsonb_object_keys(reactants) FROM reaction

                elif isinstance(value, six.string_types):
                    if value.startswith("~") or '+' in value:
                        values = value.split('+')
                        for value in values:
                            value = value.replace('~', '')
                            search_string = '%' + value + '%'
                            if not query == "~":
                                query = query.filter(column.ilike(search_string))
                    else:
                        query = query.filter(column == value)

                    # if distinct_filter:
                    #     query = query.distinct(column)#.group_by(column)

                else:
                    if op in ['ge', '>=']:
                        query = query.filter(column >= value)
                    elif op in ['gt', '>']:
                        query = query.filter(column > value)
                    elif op in ['lt', '<']:
                        query = query.filter(column < value)
                    elif op in ['le', '<=']:
                        query = query.filter(column <= value)
                    elif op in ['ne', '!=']:
                        query = query.filter(column != value)
                    else:
                        query = query.filter(column == value)

                if distinct_filter:
                    query = query.distinct(column)  # .group_by(getattr(model, field))

        return query
    def packages(self,
                 with_private=False,
                 limit=None,
                 return_query=False,
                 context=None):
        '''Return this group's active and pending packages.

        Returns all packages in this group with VDM revision state ACTIVE or
        PENDING.

        :param with_private: if True, include the group's private packages
        :type with_private: boolean

        :param limit: the maximum number of packages to return
        :type limit: int

        :param return_query: if True, return the SQLAlchemy query object
            instead of the list of Packages resulting from the query
        :type return_query: boolean

        :returns: a list of this group's packages
        :rtype: list of ckan.model.package.Package objects

        '''
        user_is_org_member = False
        context = context or {}
        user_is_admin = context.get('user_is_admin', False)
        user_id = context.get('user_id')
        if user_is_admin:
            user_is_org_member = True

        elif self.is_organization and user_id:
            query = meta.Session.query(Member) \
                    .filter(Member.state == 'active') \
                    .filter(Member.table_name == 'user') \
                    .filter(Member.group_id == self.id) \
                    .filter(Member.table_id == user_id)
            user_is_org_member = len(query.all()) != 0

        query = meta.Session.query(_package.Package).\
            filter(
                or_(_package.Package.state == core.State.ACTIVE,
                    _package.Package.state == core.State.PENDING)). \
            filter(group_table.c.id == self.id).\
            filter(member_table.c.state == 'active')

        # orgs do not show private datasets unless the user is a member
        if self.is_organization and not user_is_org_member:
            query = query.filter(_package.Package.private == False)
        # groups (not orgs) never show private datasets
        if not self.is_organization:
            query = query.filter(_package.Package.private == False)

        query = query.join(member_table,
                           member_table.c.table_id == _package.Package.id)
        query = query.join(group_table,
                           group_table.c.id == member_table.c.group_id)

        if limit is not None:
            query = query.limit(limit)

        if return_query:
            return query
        else:
            return query.all()
Example #42
0
def test_consistent_hashing_uniformly_distributed():
    fetcher_amount = 3
    fqdn_amount = 50

    rest.delete_full_database(full=True)
    rest.create_database(fetcher_amount=fetcher_amount,
                         fqdn_amount=fqdn_amount)

    fetcher_hashes = database.get_fetcher_hashes(db)
    hashes_sorted = sorted(fetcher_hashes, key=lambda k: k["hash"])

    fetcher_hash_range = []
    for i in range(len(hashes_sorted) - 1):
        fetcher_hash_range.append(
            dict(
                uuid=hashes_sorted[i]["uuid"],
                min_hash=hashes_sorted[i]["hash"],
                max_hash=hashes_sorted[i + 1]["hash"],
            ))
    fetcher_hash_range.append(
        dict(
            uuid=hashes_sorted[-1]["uuid"],
            min_hash=hashes_sorted[-1]["hash"],
            max_hash=hashes_sorted[0]["hash"],
        ))

    fetcher_hash_range_sorted_by_min_hash = sorted(fetcher_hash_range,
                                                   key=lambda k: k["min_hash"])

    for fetcher_hash_range in fetcher_hash_range_sorted_by_min_hash:
        if fetcher_hash_range["min_hash"] < fetcher_hash_range["max_hash"]:
            fetcher_hash_range["url_count"] = (db.query(
                func.count(db_models.Frontier.fqdn)).filter(
                    and_(
                        db_models.Frontier.fqdn_hash >=
                        fetcher_hash_range["min_hash"],
                        db_models.Frontier.fqdn_hash <
                        fetcher_hash_range["max_hash"],
                    ))).first()[0]
        else:
            fetcher_hash_range["url_count"] = (db.query(
                func.count(db_models.Frontier.fqdn)).filter(
                    or_(
                        db_models.Frontier.fqdn_hash >=
                        fetcher_hash_range["min_hash"],
                        db_models.Frontier.fqdn_hash <
                        fetcher_hash_range["max_hash"],
                    )).first()[0])

    return_list = defaultdict(int)
    for d in fetcher_hash_range_sorted_by_min_hash:
        return_list[d["uuid"]] += d["url_count"]

    group_summed_hash_list = [{
        "id": id_,
        "count": count_
    } for id_, count_ in return_list.items()]

    url_counts = [f["count"] for f in group_summed_hash_list]

    assert (len(fetcher_hash_range_sorted_by_min_hash) == fetcher_amount *
            c.ch_hash_amount)
    mean = sum(url_counts) / len(url_counts)
    variance = sum((xi - mean)**2 for xi in url_counts) / len(url_counts)

    assert variance <= 5 * mean
for name, in session.query(User.name). \
                            filter(User.fullname == 'Ed Jones'):
    print(name)

# +------------------------------------------------------------------+
# | conjunctions can be passed to filter() as well                   |
# +----------------------------------------------------- (35 / 72) --+
#
# >>> from sqlalchemy import or_
# >>> for name, in session.query(User.name). \
#         ...                 filter(or_(User.fullname == 'Ed Jones', User.id < 5)):
#     ...     print(name)

from sqlalchemy import or_
for name, in session.query(User.name). \
                    filter(or_(User.fullname == 'Ed Jones', User.id < 5)):
    print(name)

# +------------------------------------------------------------------+
# | multiple filter() calls join by AND just like select().where()   |
# +----------------------------------------------------- (36 / 72) --+
#
# >>> for user in session.query(User). \
#         ...                         filter(User.name == 'ed'). \
#         ...                         filter(User.fullname == 'Ed Jones'):
#     ...     print(user)
# [SQL]: SELECT user.id AS user_id,

for user in session.query(User). \
                    filter(User.name == 'ed'). \
                    filter(User.fullname == 'Ed Jones'):
Example #44
0
    def check2(inData):
        '''
        :param name:
        :param number:
        :param stationId:
        1: success
        2: username repeat
        ret: {
            0: 'error',
            1: success,
            2:'no name in inData',
            3:'name is empty',
            4:'no number in inData',
            5:'number is empty',
            6:'no ip in inData',
            7:'ip is empty',
            8:'no mac in inData',
            9:'mac is empty',
            10:'no stationId in inData',
            11:'stationId is illegal',
            12:'name repeat',
            13:'number repeat',
            14:'mac repeat',
        }
        '''
        ret = 1
        if (not inData.has_key('name')):
            ret = 2
            return ret
        else:
            if (inData['name'] == ''):
                ret = 3
                return ret
        if (not inData.has_key('number')):
            ret = 4
            return ret
        else:
            if (inData['number'] == ''):
                ret = 5
                return ret
        if (not inData.has_key('ip')):
            ret = 6
            return 6
        else:
            if (inData['ip'] == ''):
                ret = 7
                return ret
        if (not inData.has_key('mac')):
            ret = 8
            return ret
        else:
            if (inData['mac'] == ''):
                ret = 9
                return ret

        if (not inData.has_key('stationId')):
            ret = 10
            return ret
        else:
            #print('111111111111', inData['stationId'])
            if (inData['stationId'] == ''):
                ret = 11
                return ret
            if (int(inData['stationId']) != 0):
                tmpAllStation = Station.select(
                    {'id': int(inData['stationId'])})
                if (len(tmpAllStation['data']) == 0):
                    ret = 11
                    return ret

        query = session.query(Sensor)
        tmpSensor = query.filter(
            or_(Sensor.name == inData['name'],
                Sensor.number == inData['number'],
                Sensor.mac == inData['mac'])).all()
        for tSensor in tmpSensor:
            if (tSensor.name == inData['name']):
                ret = 12
                return ret
            if (tSensor.number == inData['number']):
                ret = 13
                return ret
            if (tSensor.mac == inData['mac']):
                ret = 14
                return ret

        ret = 1
        return ret
  def get_platform_data(self, platform_handle, variable, uom, start_date, wq_tests_data):
    start_time = time.time()
    try:
      self.logger.debug("Platform: %s Obs: %s(%s) Date: %s query" % (platform_handle, variable, uom, start_date))

      station = platform_handle.replace('.', '_')
      var_name = '%s_avg_%s' % (station, variable)
      end_date = start_date
      begin_date = start_date - timedelta(hours=24)
      dir_id = None
      sensor_id = self.xenia_obs_db.sensorExists(variable, uom, platform_handle, 1)
      if variable == 'wind_speed':
        dir_id = self.xenia_obs_db.sensorExists('wind_from_direction', 'degrees_true', platform_handle, 1)

      if sensor_id is not -1 and sensor_id is not None:
        recs = self.xenia_obs_db.session.query(sl_multi_obs) \
          .filter(sl_multi_obs.m_date >= begin_date.strftime('%Y-%m-%dT%H:%M:%S')) \
          .filter(sl_multi_obs.m_date < end_date.strftime('%Y-%m-%dT%H:%M:%S')) \
          .filter(sl_multi_obs.sensor_id == sensor_id) \
          .filter(or_(sl_multi_obs.qc_level == qaqcTestFlags.DATA_QUAL_GOOD, sl_multi_obs.qc_level == None)) \
          .order_by(sl_multi_obs.m_date).all()
        if dir_id is not None:
          dir_recs = self.xenia_obs_db.session.query(sl_multi_obs) \
            .filter(sl_multi_obs.m_date >= begin_date.strftime('%Y-%m-%dT%H:%M:%S')) \
            .filter(sl_multi_obs.m_date < end_date.strftime('%Y-%m-%dT%H:%M:%S')) \
            .filter(sl_multi_obs.sensor_id == dir_id) \
            .filter(or_(sl_multi_obs.qc_level == qaqcTestFlags.DATA_QUAL_GOOD, sl_multi_obs.qc_level == None)) \
            .order_by(sl_multi_obs.m_date).all()

        if len(recs):
          if variable == 'wind_speed':
            if sensor_id is not None and dir_id is not None:
              wind_dir_tuples = []
              direction_tuples = []
              scalar_speed_avg = None
              speed_count = 0
              for wind_speed_row in recs:
                for wind_dir_row in dir_recs:
                  if wind_speed_row.m_date == wind_dir_row.m_date:
                    # self.logger.debug("Building tuple for Speed(%s): %f Dir(%s): %f" % (
                    # wind_speed_row.m_date, wind_speed_row.m_value, wind_dir_row.m_date, wind_dir_row.m_value))
                    if scalar_speed_avg is None:
                      scalar_speed_avg = 0
                    scalar_speed_avg += wind_speed_row.m_value
                    speed_count += 1
                    # Vector using both speed and direction.
                    wind_dir_tuples.append((wind_speed_row.m_value, wind_dir_row.m_value))
                    # Vector with speed as constant(1), and direction.
                    direction_tuples.append((1, wind_dir_row.m_value))
                    break

              if len(wind_dir_tuples):
                avg_speed_dir_components = calcAvgSpeedAndDir(wind_dir_tuples)
                self.logger.debug("Platform: %s Avg Wind Speed: %f(m_s-1) %f(mph) Direction: %f" % (platform_handle,
                                                                                                    avg_speed_dir_components[
                                                                                                      0],
                                                                                                    avg_speed_dir_components[
                                                                                                      0],
                                                                                                    avg_speed_dir_components[
                                                                                                      1]))

                # Unity components, just direction with speeds all 1.
                avg_dir_components = calcAvgSpeedAndDir(direction_tuples)
                scalar_speed_avg = scalar_speed_avg / speed_count
                wq_tests_data[var_name] = scalar_speed_avg
                wind_dir_var_name = '%s_avg_%s' % (station, 'wind_from_direction')
                wq_tests_data[wind_dir_var_name] = avg_dir_components[1]
                self.logger.debug(
                  "Platform: %s Avg Scalar Wind Speed: %f(m_s-1) %f(mph) Direction: %f" % (platform_handle,
                                                                                           scalar_speed_avg,
                                                                                           scalar_speed_avg,
                                                                                           avg_dir_components[1]))
          #Calculate vector direction.
          elif variable == 'sea_surface_wave_to_direction':
            direction_tuples = []
            for dir_row in recs:
              # Vector with speed as constant(1), and direction.
              direction_tuples.append((1, dir_row.m_value))

            if len(direction_tuples):
              # Unity components, just direction with speeds all 1.
              avg_dir_components = calcAvgSpeedAndDir(direction_tuples)
              wq_tests_data[var_name] = avg_dir_components[1]
              self.logger.debug(
                "Platform: %s Avg Scalar Direction: %f" % (platform_handle,
                                                           avg_dir_components[1]))

          else:
            wq_tests_data[var_name] = sum(rec.m_value for rec in recs) / len(recs)
            self.logger.debug("Platform: %s Avg %s: %f Records used: %d" % (
              platform_handle, variable, wq_tests_data[var_name], len(recs)))

            if variable == 'water_conductivity':
              water_con = wq_tests_data[var_name]
              #if uom == 'uS_cm-1':
              water_con = water_con / 1000.0
              salinity_var = '%s_avg_%s' % (station, 'salinity')
              wq_tests_data[salinity_var] = 0.47413 / (math.pow((1 / water_con), 1.07) - 0.7464 * math.pow(10, -3))
              self.logger.debug("Platform: %s Avg %s: %f Records used: %d" % (
                platform_handle, 'salinity', wq_tests_data[salinity_var], len(recs)))
        else:
          self.logger.error(
            "Platform: %s sensor: %s(%s) Date: %s had no data" % (platform_handle, variable, uom, start_date))
      else:
        self.logger.error("Platform: %s sensor: %s(%s) does not exist" % (platform_handle, variable, uom))
      self.logger.debug("Platform: %s query finished in %f seconds" % (platform_handle, time.time()-start_time))
    except Exception as e:
      self.logger.exception(e)
      return False

    return True
Example #46
0
 def Run(self):
     with self.protocol.lockCmd:
         CBaseCommand.Run(self)
         user_name = self.body.get(BaseCommand.PN_USERNAME)
         if user_name is not None:
             user_name = user_name.strip()
         password = self.body[BaseCommand.PN_PASSWORD]
         email = self.body.get(BaseCommand.PN_EMAIL)
         if email is not None:
             email = email.strip()
         mobile_phone = self.body.get(BaseCommand.PN_MOBLEPHONE)
         if mobile_phone is not None:
             mobile_phone = mobile_phone.strip()
         respond = self.GetResp()
         with SBDB.session_scope() as session:
             if user_name is None and password is None and email is None:
                 respond.SetErrorCode(BaseCommand.CS_PARAMLACK)
             elif user_name is not None and (
                     session.query(SBDB_ORM.Account).filter(
                         or_(SBDB_ORM.Account.user_name == user_name,
                             SBDB_ORM.Account.email == user_name,
                             SBDB_ORM.Account.mobile_phone == user_name))
                     .first() is not None or len(user_name) < 2):
                 respond.SetErrorCode(BaseCommand.CS_USERNAME)
             elif email is not None and (
                     session.query(SBDB_ORM.Account).filter(
                         or_(SBDB_ORM.Account.user_name == email,
                             SBDB_ORM.Account.email == email,
                             SBDB_ORM.Account.mobile_phone == email))
                     .first() is not None or not Util.validateEmail(email)):
                 respond.SetErrorCode(BaseCommand.CS_EMAIL)
             elif mobile_phone is not None and (
                     session.query(SBDB_ORM.Account).filter(
                         or_(SBDB_ORM.Account.user_name == mobile_phone,
                             SBDB_ORM.Account.email == mobile_phone,
                             SBDB_ORM.Account.mobile_phone == mobile_phone))
                     .first() is not None
                     or not Util.validateMobilePhone(mobile_phone)):
                 respond.SetErrorCode(BaseCommand.CS_MOBILEPHONE)
             else:
                 try:
                     account = SBDB_ORM.Account()
                     account.language_id = 2
                     account.email = email
                     account.password = Util.hash_password(password)
                     account.user_name = user_name
                     account.mobile_phone = mobile_phone
                     account.version = 0
                     apartment = SBDB_ORM.Apartment()
                     apartment.arm_state = BaseCommand.PV_ARM_OFF
                     apartment.name = "Home"
                     apartment.scene_id = None
                     apartment.version = 0
                     account.apartments.append(apartment)
                     session.add(account)
                     session.commit()
                     respond.body[
                         BaseCommand.PN_VERSION] = apartment.version
                     respond.body[BaseCommand.PN_APARTMENTID] = apartment.id
                     respond.body[BaseCommand.PN_NAME] = apartment.name
                 except SQLAlchemyError as e:
                     respond.SetErrorCode(BaseCommand.CS_DBEXCEPTION)
                     logging.error("transport %d:%s",
                                   id(self.protocol.transport), e)
                     session.rollback()
         respond.Send()
Example #47
0
 def exists(session: Session, name: str) -> bool:
     return session.query(Location).filter(
         or_(Location.full_name.like(name),
             Location.alias.like(name))).count() > 0
Example #48
0
def query_data(session, agency_code, period, year, page_start, page_stop):
    """ Request A file data

        Args:
            session: DB session
            agency_code: FREC or CGAC code for generation
            period: The period for which to get GTAS data
            year: The year for which to get GTAS data
            page_start: Beginning of pagination
            page_stop: End of pagination

        Returns:
            The rows using the provided dates and page size for the given agency.
    """
    # set a boolean to determine if the original agency code is frec or cgac
    frec_provided = len(agency_code) == 4
    tas_gtas = tas_gtas_combo(session, period, year)
    # Make a list of FRECs to compare to for 011 AID entries
    frec_list = []
    if not frec_provided:
        frec_list = session.query(FREC.frec_code).select_from(outerjoin(CGAC, FREC, CGAC.cgac_id == FREC.cgac_id)).\
            filter(CGAC.cgac_code == agency_code).all()
    # Group agencies together that need to be grouped
    agency_array = []
    if agency_code == '097':
        agency_array = ['017', '021', '057', '097']
    elif agency_code == '1601':
        agency_array = ['1601', '016']
    elif agency_code == '1125':
        agency_array = ['1125', '011']

    # Save the ATA filter
    agency_filters = []
    if not agency_array:
        agency_filters.append(
            tas_gtas.c.allocation_transfer_agency == agency_code)
    else:
        agency_filters.append(
            tas_gtas.c.allocation_transfer_agency.in_(agency_array))

    # Save the AID filter
    if agency_code == '097' and not frec_provided:
        agency_filters.append(
            and_(tas_gtas.c.allocation_transfer_agency.is_(None),
                 tas_gtas.c.agency_identifier.in_(agency_array)))
    elif not frec_provided:
        agency_filters.append(
            and_(tas_gtas.c.allocation_transfer_agency.is_(None),
                 tas_gtas.c.agency_identifier == agency_code))
    else:
        agency_filters.append(
            and_(tas_gtas.c.allocation_transfer_agency.is_(None),
                 tas_gtas.c.fr_entity_type == agency_code))

    # If we're checking a CGAC, we want to filter on all of the related FRECs for AID 011, otherwise just filter on
    # that FREC
    if frec_list:
        agency_filters.append(
            and_(tas_gtas.c.allocation_transfer_agency.is_(None),
                 tas_gtas.c.agency_identifier == '011',
                 tas_gtas.c.fr_entity_type.in_(frec_list)))
    elif not frec_provided:
        agency_filters.append(
            and_(tas_gtas.c.allocation_transfer_agency.is_(None),
                 tas_gtas.c.agency_identifier == '011',
                 tas_gtas.c.fr_entity_type == agency_code))

    rows = initial_query(session, tas_gtas.c).\
        filter(func.coalesce(tas_gtas.c.financial_indicator2, '') != 'F').\
        filter(or_(*agency_filters)).\
        group_by(tas_gtas.c.allocation_transfer_agency,
                 tas_gtas.c.agency_identifier,
                 tas_gtas.c.beginning_period_of_availa,
                 tas_gtas.c.ending_period_of_availabil,
                 tas_gtas.c.availability_type_code,
                 tas_gtas.c.main_account_code,
                 tas_gtas.c.sub_account_code)

    # Slice the final query
    rows = rows.slice(page_start, page_stop)

    return rows
Example #49
0
    def get_alarms(self, tenant_id, query_parms=None, offset=None, limit=None):
        if not query_parms:
            query_parms = {}

        with self._db_engine.connect() as conn:
            parms = {}
            ad = self.ad
            am = self.am
            mdd = self.mdd
            md = self.md
            a = self.a

            query = (self.base_subquery_list.where(
                ad.c.tenant_id == bindparam('b_tenant_id')))

            parms['b_tenant_id'] = tenant_id

            if 'alarm_definition_id' in query_parms:
                query = query.where(
                    ad.c.id == bindparam('b_alarm_definition_id'))
                parms['b_alarm_definition_id'] = query_parms[
                    'alarm_definition_id']

            if 'metric_name' in query_parms:
                query = query.where(a.c.id.in_(self.get_a_am_query))
                parms['b_md_name'] = query_parms['metric_name'].encode('utf8') if six.PY2 else \
                    query_parms['metric_name']

            if 'severity' in query_parms:
                severities = query_parms['severity'].split('|')
                query = query.where(
                    or_(ad.c.severity == bindparam('b_severity' + str(i))
                        for i in range(len(severities))))
                for i, s in enumerate(severities):
                    parms['b_severity' +
                          str(i)] = s if six.PY3 else s.encode('utf-8')

            if 'state' in query_parms:
                query = query.where(a.c.state == bindparam('b_state'))
                parms['b_state'] = query_parms['state']

            if 'lifecycle_state' in query_parms:
                query = (query.where(
                    a.c.lifecycle_state == bindparam('b_lifecycle_state')))
                parms['b_lifecycle_state'] = query_parms['lifecycle_state'] \
                    if six.PY3 else query_parms['lifecycle_state'].encode('utf8')

            if 'link' in query_parms:
                query = query.where(a.c.link == bindparam('b_link'))
                parms['b_link'] = query_parms['link'] if six.PY3 \
                    else query_parms['link'].encode('utf8')

            if 'state_updated_start_time' in query_parms:
                query = (query.where(
                    a.c.state_updated_at >= bindparam('b_state_updated_at')))

                date_str = query_parms['state_updated_start_time'] if six.PY3 \
                    else query_parms['state_updated_start_time'].encode('utf8')
                date_param = datetime.strptime(date_str,
                                               '%Y-%m-%dT%H:%M:%S.%fZ')
                parms['b_state_updated_at'] = date_param

            if 'metric_dimensions' in query_parms:
                sub_query = select([a.c.id])
                sub_query_from = (a.join(am, am.c.alarm_id == a.c.id).join(
                    mdd, mdd.c.id == am.c.metric_definition_dimensions_id))

                sub_query_md_base = select([md.c.dimension_set_id
                                            ]).select_from(md)

                for i, metric_dimension in enumerate(
                        query_parms['metric_dimensions'].items()):

                    md_name = "b_md_name_{}".format(i)

                    values_cond = None
                    values_cond_flag = False

                    if metric_dimension and metric_dimension[1]:
                        if '|' in metric_dimension[1]:
                            values = metric_dimension[1].encode('utf8').split('|') if six.PY2 else \
                                metric_dimension[1].split('|')
                            sub_values_cond = []
                            for j, value in enumerate(values):
                                sub_md_value = "b_md_value_{}_{}".format(i, j)
                                sub_values_cond.append(
                                    md.c.value == bindparam(sub_md_value))
                                parms[sub_md_value] = value
                            values_cond = or_(*sub_values_cond)
                            values_cond_flag = True
                        else:
                            md_value = "b_md_value_{}".format(i)
                            values_cond = (md.c.value == bindparam(md_value))
                            values_cond_flag = True
                            parms[md_value] = metric_dimension[1]

                    sub_query_md = (sub_query_md_base.where(
                        md.c.name == bindparam(md_name)))
                    if values_cond_flag:
                        sub_query_md = (sub_query_md.where(values_cond))

                    sub_query_md = (sub_query_md.distinct().alias(
                        'md_{}'.format(i)))

                    sub_query_from = (sub_query_from.join(
                        sub_query_md, sub_query_md.c.dimension_set_id ==
                        mdd.c.metric_dimension_set_id))

                    parms[md_name] = metric_dimension[0].encode('utf8') if six.PY2 else \
                        metric_dimension[0]

                    sub_query = (
                        sub_query.select_from(sub_query_from).distinct())
                    query = query.where(a.c.id.in_(sub_query))
            order_columns = []
            if 'sort_by' in query_parms:
                columns_mapper = \
                    {'alarm_id': a.c.id,
                     'alarm_definition_id': ad.c.id,
                     'alarm_definition_name': ad.c.name,
                     'state_updated_timestamp': a.c.state_updated_at,
                     'updated_timestamp': a.c.updated_at,
                     'created_timestamp': a.c.created_at,
                     'severity': models.field_sort(ad.c.severity, list(map(text, ["'LOW'",
                                                                                  "'MEDIUM'",
                                                                                  "'HIGH'",
                                                                                  "'CRITICAL'"]))),
                     'state': models.field_sort(a.c.state, list(map(text, ["'OK'",
                                                                           "'UNDETERMINED'",
                                                                           "'ALARM'"])))}

                order_columns, received_cols = self._remap_columns(
                    query_parms['sort_by'], columns_mapper)

                if not received_cols.get('alarm_id', False):
                    order_columns.append(a.c.id)
            else:
                order_columns = [a.c.id]

            if limit:
                query = query.limit(bindparam('b_limit'))
                parms['b_limit'] = limit + 1

            if offset:
                query = query.offset(bindparam('b_offset'))
                parms['b_offset'] = offset

            query = (query.order_by(*order_columns).alias('alarm_id_list'))

            main_query = (self.base_query.select_from(
                self.base_query_from.join(query,
                                          query.c.id == a.c.id)).distinct())

            main_query = main_query.order_by(*order_columns)

            return [
                dict(row)
                for row in conn.execute(main_query, parms).fetchall()
            ]
Example #50
0
 def get(session: Session, name: str) -> 'Location':
     return session.query(Location).filter(
         or_(Location.full_name.like(name),
             Location.alias.like(name))).first()
Example #51
0
    def raw_data_for_geos(self, geos, db_table=None):
        """ Pull raw data for a list of geo models.

        Returns a dict mapping the geo ids to table data.
        """
        # initial values
        data = {('%s-%s' % (geo.geo_level, geo.geo_code)): {
            'estimate': {},
            'error': {}
        }
                for geo in geos}

        db_table = db_table or self.get_db_table()

        session = get_session()
        try:
            geo_values = None
            fields = [getattr(db_table.model, f) for f in self.fields]
            rows = session\
                .query(db_table.model.geo_level,
                       db_table.model.geo_code,
                       func.sum(db_table.model.total).label('total'),
                       *fields)\
                .group_by(db_table.model.geo_level, db_table.model.geo_code, *fields)\
                .order_by(db_table.model.geo_level, db_table.model.geo_code, *fields)\
                .filter(or_(and_(
                    db_table.model.geo_level == geo.geo_level,
                    db_table.model.geo_code == geo.geo_code,
                    db_table.model.geo_version == geo.version)
                    for geo in geos))\
                .all()

            def permute(level, field_keys, rows):
                field = self.fields[level]
                total = None
                denominator = 0

                for key, rows in groupby(rows, lambda r: getattr(r, field)):
                    new_keys = field_keys + [key]
                    col_id = self.column_id(new_keys)

                    if level + 1 < len(self.fields):
                        value = permute(level + 1, new_keys, rows)
                    else:
                        # we've bottomed out

                        rows = list(rows)
                        if all(row.total is None for row in rows):
                            value = None
                        else:
                            value = sum(row.total or 0 for row in rows)

                        if self.denominator_key and self.denominator_key == key:
                            # this row must be used as the denominator total,
                            # rather than as an entry in the table
                            denominator = value
                            continue

                    if value is not None:
                        total = (total or 0) + value
                    geo_values['estimate'][col_id] = value
                    geo_values['error'][col_id] = 0

                if self.denominator_key:
                    total = denominator

                return total

            # rows for each geo
            for geo_id, geo_rows in groupby(
                    rows, lambda r: (r.geo_level, r.geo_code)):
                geo_values = data['%s-%s' % geo_id]
                total = permute(0, [], geo_rows)

                # total
                if self.total_column:
                    geo_values['estimate'][self.total_column] = total
                    geo_values['error'][self.total_column] = 0

        finally:
            session.close()

        return data
Example #52
0
    def get_alarms_count(self,
                         tenant_id,
                         query_parms=None,
                         offset=None,
                         limit=None):
        if not query_parms:
            query_parms = {}

        with self._db_engine.connect() as conn:
            parms = {}
            ad = self.ad
            am = self.am
            mdd = self.mdd
            mde = self.mde
            md = self.md
            a = self.a

            query_from = a.join(ad, ad.c.id == a.c.alarm_definition_id)

            parms['b_tenant_id'] = tenant_id

            group_by_columns = []

            if 'group_by' in query_parms:
                group_by_columns = query_parms['group_by']
                sub_group_by_columns = []
                metric_group_by = {
                    'metric_name', 'dimension_name', 'dimension_value'
                }.intersection(set(query_parms['group_by']))
                if metric_group_by:
                    sub_query_columns = [am.c.alarm_id]
                    if 'metric_name' in metric_group_by:
                        sub_group_by_columns.append(
                            mde.c.name.label('metric_name'))
                    if 'dimension_name' in metric_group_by:
                        sub_group_by_columns.append(
                            md.c.name.label('dimension_name'))
                    if 'dimension_value' in metric_group_by:
                        sub_group_by_columns.append(
                            md.c.value.label('dimension_value'))

                    sub_query_columns.extend(sub_group_by_columns)

                    sub_query_from = (mde.join(
                        mdd, mde.c.id == mdd.c.metric_definition_id).join(
                            md, mdd.c.metric_dimension_set_id ==
                            md.c.dimension_set_id).join(
                                am, am.c.metric_definition_dimensions_id ==
                                mdd.c.id))

                    sub_query = (select(sub_query_columns).select_from(
                        sub_query_from).distinct().alias('metrics'))

                    query_from = query_from.join(
                        sub_query, sub_query.c.alarm_id == a.c.id)

            query_columns = [func.count().label('count')]
            query_columns.extend([column(col) for col in group_by_columns])

            query = (select(query_columns).select_from(query_from).where(
                ad.c.tenant_id == bindparam('b_tenant_id')))

            parms['b_tenant_id'] = tenant_id

            if 'alarm_definition_id' in query_parms:
                parms['b_alarm_definition_id'] = query_parms[
                    'alarm_definition_id']
                query = query.where(
                    ad.c.id == bindparam('b_alarm_definition_id'))

            if 'state' in query_parms:
                parms['b_state'] = query_parms['state'] if six.PY3 else \
                    query_parms['state'].encode('utf8')
                query = query.where(a.c.state == bindparam('b_state'))

            if 'severity' in query_parms:
                severities = query_parms['severity'].split('|')
                query = query.where(
                    or_(ad.c.severity == bindparam('b_severity' + str(i))
                        for i in range(len(severities))))
                for i, s in enumerate(severities):
                    parms['b_severity' +
                          str(i)] = s if six.PY3 else s.encode('utf8')

            if 'lifecycle_state' in query_parms:
                parms['b_lifecycle_state'] = query_parms['lifecycle_state'] if six.PY3 else \
                    query_parms['lifecycle_state'].encode('utf8')
                query = query.where(
                    a.c.lifecycle_state == bindparam('b_lifecycle_state'))

            if 'link' in query_parms:
                parms['b_link'] = query_parms['link'] if six.PY3 else \
                    query_parms['link'].encode('utf8')
                query = query.where(a.c.link == bindparam('b_link'))

            if 'state_updated_start_time' in query_parms:
                date_str = query_parms['state_updated_start_time'] if six.PY3 \
                    else query_parms['state_updated_start_time'].encode('utf8')
                date_param = datetime.strptime(date_str,
                                               '%Y-%m-%dT%H:%M:%S.%fZ')
                parms['b_state_updated_at'] = date_param
                query = query.where(
                    a.c.state_updated_at >= bindparam('b_state_updated_at'))

            if 'metric_name' in query_parms:
                query = query.where(a.c.id.in_(self.get_a_am_query))
                parms['b_md_name'] = query_parms['metric_name'] if six.PY3 else \
                    query_parms['metric_name'].encode('utf8')

            if 'metric_dimensions' in query_parms:
                sub_query = select([a.c.id])
                sub_query_from = (a.join(am, am.c.alarm_id == a.c.id).join(
                    mdd, mdd.c.id == am.c.metric_definition_dimensions_id))

                sub_query_md_base = select([md.c.dimension_set_id
                                            ]).select_from(md)

                for i, metric_dimension in enumerate(
                        query_parms['metric_dimensions'].items()):
                    dimension_value = metric_dimension[1] if six.PY3 else \
                        metric_dimension[1].encode('utf8')

                    if '|' in dimension_value:
                        dimension_value = tuple(dimension_value.split('|'))

                    md_name = "b_md_name_{}".format(i)
                    md_value = "b_md_value_{}".format(i)

                    sub_query_md = (sub_query_md_base.where(
                        md.c.name == bindparam(md_name)))

                    if isinstance(dimension_value, tuple):
                        sub_query_md = (sub_query_md.where(
                            md.c.value.op('IN')(bindparam(md_value))))
                    else:
                        sub_query_md = (sub_query_md.where(
                            md.c.value == bindparam(md_value)))

                    sub_query_md = (sub_query_md.distinct().alias(
                        'md_{}'.format(i)))

                    sub_query_from = (sub_query_from.join(
                        sub_query_md, sub_query_md.c.dimension_set_id ==
                        mdd.c.metric_dimension_set_id))

                    parms[md_name] = metric_dimension[0] if six.PY3 else \
                        metric_dimension[0].encode('utf8')
                    parms[md_value] = dimension_value

                    sub_query = (
                        sub_query.select_from(sub_query_from).distinct())
                    query = query.where(a.c.id.in_(sub_query))

            if group_by_columns:
                query = (query.order_by(*group_by_columns).group_by(
                    *group_by_columns))

            if limit:
                query = query.limit(bindparam('b_limit'))
                parms['b_limit'] = limit + 1

            if offset:
                query = query.offset(bindparam('b_offset'))
                parms['b_offset'] = offset

            query = query.distinct()
            return [dict(row) for row in conn.execute(query, parms).fetchall()]
Example #53
0
def or_operation(exp, object_class, target_class, query):
  """Operator generate sqlalchemy for or operation"""
  return sqlalchemy.or_(
      build_expression(exp["left"], object_class, target_class, query),
      build_expression(exp["right"], object_class, target_class, query))
    def search(cls, *, limit=100, page=1, accounts=None, locations=None,
               properties=None, include_disabled=False, return_query=False):
        """Search for resources based on the provided filters. If `return_query` a sub-class of `sqlalchemy.orm.Query`
        is returned instead of the resource list.

        Args:
            limit (`int`): Number of results to return. Default: 100
            page (`int`): Pagination offset for results. Default: 1
            accounts (`list` of `int`): A list of account id's to limit the returned resources to
            locations (`list` of `str`): A list of locations as strings to limit the search for
            properties (`dict`): A `dict` containing property name and value pairs. Values can be either a str or a list
            of strings, in which case a boolean OR search is performed on the values
            include_disabled (`bool`): Include resources from disabled accounts. Default: False
            return_query (`bool`): Returns the query object prior to adding the limit and offset functions. Allows for
            sub-classes to amend the search feature with extra conditions. The calling function must handle pagination
            on its own

        Returns:
            `list` of `Resource`, `sqlalchemy.orm.Query`
        """
        qry = db.Resource.order_by(Resource.resource_id).filter(
            Resource.resource_type_id == ResourceType.get(cls.resource_type).resource_type_id
        )

        if not include_disabled:
            qry = qry.join(Account, Resource.account_id == Account.account_id).filter(Account.enabled == 1)

        if session:
            qry = qry.filter(Resource.account_id.in_(session['accounts']))

        if accounts:
            qry = qry.filter(Resource.account_id.in_([Account.get(acct).account_id for acct in accounts]))

        if locations:
            qry = qry.filter(Resource.location.in_(locations))

        if properties:
            for prop_name, value in properties.items():
                alias = aliased(ResourceProperty)

                qry = qry.join(alias, Resource.resource_id == alias.resource_id)

                if type(value) == list:
                    where_clause = []
                    for item in value:
                        where_clause.append(alias.value == item)

                    qry = qry.filter(
                        and_(
                            alias.name == prop_name,
                            or_(*where_clause)
                        ).self_group()
                    )
                else:
                    qry = qry.filter(
                        and_(
                            alias.name == prop_name,
                            alias.value == value
                        ).self_group()
                    )

        if return_query:
            return qry

        total = qry.count()
        qry = qry.limit(limit)
        qry = qry.offset((page - 1) * limit if page > 1 else 0)

        return total, [cls(x) for x in qry.all()]
Example #55
0
def get_tie_match(teamid):
    amountie = models.Result.query.join(models.Match).filter(
        or_(models.Match.awayteam_id == teamid,
            models.Match.hometeam_id == teamid),
        models.Result.typeresult == models.ETypeResult.Tie).all()
    return amountie
Example #56
0
def all_appointments(status="",
                     typ="",
                     emergency="",
                     location="",
                     sequence=""):
    employee_in_db = g.employee

    request_type = typ
    request_status = status
    request_sequence = sequence

    if employee_in_db:
        if request_type == "all":
            if request_status == "all":
                if emergency == "all":
                    if location == "all":
                        appointments = Appointment.query.filter().all()
                    else:
                        appointments = Appointment.query.filter(
                            Appointment.location == location).all()
                else:
                    if location == "all":
                        appointments = Appointment.query.filter(
                            Appointment.emergency == emergency).all()
                    else:
                        appointments = Appointment.query.filter(
                            and_(Appointment.location == location,
                                 Appointment.emergency == emergency)).all()

            elif request_status == "Processing&Operating&Discharged":
                if emergency == "all":
                    if location == "all":
                        appointments = Appointment.query.filter(
                            or_(Appointment.status == "Processing",
                                Appointment.status == "Operating",
                                Appointment.status == "Discharged")).all()
                    else:
                        appointments = Appointment.query.filter(
                            and_(
                                Appointment.location == location,
                                or_(Appointment.status == "Processing",
                                    Appointment.status == "Operating",
                                    Appointment.status ==
                                    "Discharged"))).all()
                else:
                    if location == "all":
                        appointments = Appointment.query.filter(
                            and_(
                                Appointment.emergency == emergency,
                                or_(Appointment.status == "Processing",
                                    Appointment.status == "Operating",
                                    Appointment.status ==
                                    "Discharged"))).all()
                    else:
                        appointments = Appointment.query.filter(
                            and_(
                                Appointment.location == location,
                                Appointment.emergency == emergency,
                                or_(Appointment.status == "Processing",
                                    Appointment.status == "Operating",
                                    Appointment.status ==
                                    "Discharged"))).all()
            elif request_status == "Waiting":
                if emergency == "all":
                    if location == "all":
                        appointments = Appointment.query.filter(
                            Appointment.status == "").all()
                    else:
                        appointments = Appointment.query.filter(
                            and_(Appointment.location == location,
                                 Appointment.status == "")).all()
                else:
                    if location == "all":
                        appointments = Appointment.query.filter(
                            and_(Appointment.emergency == emergency,
                                 Appointment.status == "")).all()
                    else:
                        appointments = Appointment.query.filter(
                            and_(Appointment.location == location,
                                 Appointment.emergency == emergency,
                                 Appointment.status == "")).all()
            else:
                if emergency == "all":
                    if location == "all":
                        appointments = Appointment.query.filter(
                            Appointment.status == request_status).all()
                    else:
                        appointments = Appointment.query.filter(
                            and_(Appointment.location == location,
                                 Appointment.status == request_status)).all()
                else:
                    if location == "all":
                        appointments = Appointment.query.filter(
                            and_(Appointment.emergency == emergency,
                                 Appointment.status == request_status)).all()
                    else:
                        appointments = Appointment.query.filter(
                            and_(Appointment.location == location,
                                 Appointment.emergency == emergency,
                                 Appointment.status == request_status)).all()
        elif request_type == "":
            appointments = Appointment.query.filter().all()
        else:
            if request_status == "all":
                if emergency == "all":
                    if location == "all":
                        appointments = Appointment.query.filter(
                            Appointment.pet_type == request_type).all()
                    else:
                        appointments = Appointment.query.filter(
                            and_(Appointment.location == location,
                                 Appointment.pet_type == request_type)).all()
                else:
                    if location == "all":
                        appointments = Appointment.query.filter(
                            and_(Appointment.emergency == emergency,
                                 Appointment.pet_type == request_type)).all()
                    else:
                        appointments = Appointment.query.filter(
                            and_(Appointment.location == location,
                                 Appointment.emergency == emergency,
                                 Appointment.pet_type == request_type)).all()
            elif request_status == "Processing&Operating&Discharged":
                if emergency == "all":
                    if location == "all":
                        appointments = Appointment.query.filter(
                            and_(
                                Appointment.pet_type == request_type,
                                or_(Appointment.status == "Processing",
                                    Appointment.status == "Operating",
                                    Appointment.status ==
                                    "Discharged"))).all()
                    else:
                        appointments = Appointment.query.filter(
                            and_(
                                Appointment.pet_type == request_type,
                                Appointment.location == location,
                                or_(Appointment.status == "Processing",
                                    Appointment.status == "Operating",
                                    Appointment.status ==
                                    "Discharged"))).all()
                else:
                    if location == "all":
                        appointments = Appointment.query.filter(
                            and_(
                                Appointment.pet_type == request_type,
                                Appointment.emergency == emergency,
                                or_(Appointment.status == "Processing",
                                    Appointment.status == "Operating",
                                    Appointment.status ==
                                    "Discharged"))).all()
                    else:
                        appointments = Appointment.query.filter(
                            and_(
                                Appointment.pet_type == request_type,
                                Appointment.location == location,
                                Appointment.emergency == emergency,
                                or_(Appointment.status == "Processing",
                                    Appointment.status == "Operating",
                                    Appointment.status ==
                                    "Discharged"))).all()
            elif request_status == "Waiting":
                if emergency == "all":
                    if location == "all":
                        appointments = Appointment.query.filter(
                            and_(Appointment.pet_type == request_type,
                                 Appointment.status == "")).all()
                    else:
                        appointments = Appointment.query.filter(
                            and_(Appointment.pet_type == request_type,
                                 Appointment.location == location,
                                 Appointment.status == "")).all()
                else:
                    if location == "all":
                        appointments = Appointment.query.filter(
                            and_(Appointment.pet_type == request_type,
                                 Appointment.emergency == emergency,
                                 Appointment.status == "")).all()
                    else:
                        appointments = Appointment.query.filter(
                            and_(Appointment.pet_type == request_type,
                                 Appointment.location == location,
                                 Appointment.emergency == emergency,
                                 Appointment.status == "")).all()
            else:
                if emergency == "all":
                    if location == "all":
                        appointments = Appointment.query.filter(
                            and_(Appointment.pet_type == request_type,
                                 Appointment.status == request_status)).all()
                    else:
                        appointments = Appointment.query.filter(
                            and_(Appointment.location == location,
                                 Appointment.pet_type == request_type,
                                 Appointment.status == request_status)).all()
                else:
                    if location == "all":
                        appointments = Appointment.query.filter(
                            and_(Appointment.emergency == emergency,
                                 Appointment.pet_type == request_type,
                                 Appointment.status == request_status)).all()
                    else:
                        appointments = Appointment.query.filter(
                            and_(Appointment.location == location,
                                 Appointment.emergency == emergency,
                                 Appointment.pet_type == request_type,
                                 Appointment.status == request_status)).all()

        appointment_list = []
        for item in appointments:
            list_item = {}
            list_item["id"] = item.id
            list_item["type"] = item.pet_type
            list_item["symptom"] = item.symptom
            list_item["date"] = str(item.date)
            list_item["location"] = item.location
            list_item["message"] = item.message
            list_item["priority"] = item.priority
            if item.emergency == "false":
                list_item["emergency"] = False
            elif item.emergency == "true":
                list_item["emergency"] = True
            if item.status == "":
                list_item["status"] = "Waiting"
            else:
                list_item["status"] = item.status
            if item.attendingDoctor == "":
                list_item["attendingDoctor"] = "Undetermined"
            else:
                list_item["attendingDoctor"] = item.attendingDoctor
            list_item["employeeId"] = item.employee_id
            if str(item.operationTime) == "1970-01-01":
                list_item["operationTime"] = "Undetermined"
            else:
                list_item["operationTime"] = str(item.operationTime)
            if str(item.dischargeDate) == "1970-01-01":
                list_item["dischargeDate"] = "Undetermined"
            else:
                list_item["dischargeDate"] = str(item.dischargeDate)
            appointment_list.append(list_item)

        if request_sequence == "date":
            appointment_list.sort(key=lambda item: item["date"])
        elif request_sequence == "priority":
            appointment_list.sort(key=lambda item: item["priority"])

        return jsonify({
            "code": 200,
            "msg": "Success",
            "data": {
                "appointments": appointment_list
            }
        })
    else:
        return jsonify({"code": 401, "msg": "Unauthorized"})
Example #57
0
def get_user(username):
    """Convenience method to look up the id for a username."""
    if not username:
        return None
    return m.session.query(User).filter(
        or_(User.email == username, User.username == username)).first()
Example #58
0
def countmatch(teamid):
    count = models.Match.query.filter(
        or_(models.Match.awayteam_id == teamid,
            models.Match.hometeam_id == teamid)).count()
    return count
def content(site_id,
            g_supply_id,
            user,
            compression,
            finish_year,
            finish_month,
            months,
            now=None):
    if now is None:
        now = ct_datetime_now()
    report_context = {}
    sess = None
    month_list = list(
        c_months_u(finish_year=finish_year,
                   finish_month=finish_month,
                   months=months))
    start_date, finish_date = month_list[0][0], month_list[-1][-1]

    try:
        sess = Session()
        base_name = [
            "g_monthly_duration",
            hh_format(start_date).replace(" ",
                                          "_").replace(":",
                                                       "").replace("-", ""),
            "for",
            str(months),
            "months",
        ]

        forecast_from = chellow.computer.forecast_date()

        sites = (sess.query(Site).join(SiteGEra).join(GEra).filter(
            SiteGEra.is_physical == true()).distinct().order_by(Site.code))
        if site_id is not None:
            site = Site.get_by_id(sess, site_id)
            sites = sites.filter(Site.id == site.id)
            base_name.append("site")
            base_name.append(site.code)
        if g_supply_id is not None:
            g_supply = GSupply.get_by_id(sess, g_supply_id)
            base_name.append("g_supply")
            base_name.append(str(g_supply.id))
            sites = sites.filter(GEra.g_supply == g_supply)

        running_name, finished_name = chellow.dloads.make_names(
            "_".join(base_name) + ".ods", user)

        rf = open(running_name, "wb")
        site_rows = []
        g_era_rows = []

        era_header_titles = [
            "creation_date",
            "mprn",
            "supply_name",
            "exit_zone",
            "msn",
            "unit",
            "contract",
            "site_id",
            "site_name",
            "associated_site_ids",
            "month",
        ]
        site_header_titles = [
            "creation_date",
            "site_id",
            "site_name",
            "associated_site_ids",
            "month",
        ]
        summary_titles = ["kwh", "gbp", "billed_kwh", "billed_gbp"]

        vb_titles = []
        conts = (sess.query(GContract).join(GEra).join(GSupply).filter(
            GEra.start_date <= finish_date,
            or_(GEra.finish_date == null(), GEra.finish_date >= start_date),
        ).distinct().order_by(GContract.id))
        if g_supply_id is not None:
            conts = conts.filter(GEra.g_supply_id == g_supply_id)
        for cont in conts:
            title_func = chellow.computer.contract_func(
                report_context, cont, "virtual_bill_titles")
            if title_func is None:
                raise Exception("For the contract " + cont.name +
                                " there doesn't seem " +
                                "to be a 'virtual_bill_titles' function.")
            for title in title_func():
                if title not in vb_titles:
                    vb_titles.append(title)

        g_era_rows.append(era_header_titles + summary_titles + vb_titles)
        site_rows.append(site_header_titles + summary_titles)

        for month_start, month_finish in month_list:
            for site in sites.filter(
                    GEra.start_date <= month_finish,
                    or_(GEra.finish_date == null(),
                        GEra.finish_date >= month_start),
            ):
                site_kwh = site_gbp = site_billed_kwh = site_billed_gbp = 0

                for g_era in (sess.query(GEra).join(SiteGEra).filter(
                        SiteGEra.site == site,
                        SiteGEra.is_physical == true(),
                        GEra.start_date <= month_finish,
                        or_(GEra.finish_date == null(),
                            GEra.finish_date >= month_start),
                ).options(
                        joinedload(GEra.g_contract),
                        joinedload(GEra.g_supply),
                        joinedload(GEra.g_supply).joinedload(
                            GSupply.g_exit_zone),
                ).order_by(GEra.id)):

                    g_supply = g_era.g_supply

                    if g_supply_id is not None and g_supply.id != g_supply_id:
                        continue

                    ss_start = hh_max(g_era.start_date, month_start)
                    ss_finish = hh_min(g_era.finish_date, month_finish)

                    ss = GDataSource(
                        sess,
                        ss_start,
                        ss_finish,
                        forecast_from,
                        g_era,
                        report_context,
                        None,
                    )

                    contract = g_era.g_contract
                    vb_function = contract_func(report_context, contract,
                                                "virtual_bill")
                    if vb_function is None:
                        raise BadRequest(
                            "The contract " + contract.name +
                            " doesn't have the virtual_bill() function.")
                    vb_function(ss)
                    bill = ss.bill

                    try:
                        gbp = bill["net_gbp"]
                    except KeyError:
                        gbp = 0
                        bill["problem"] += (
                            "For the supply " + ss.mprn +
                            " the virtual bill " + str(bill) +
                            " from the contract " + contract.name +
                            " does not contain the net_gbp key.")
                    try:
                        kwh = bill["kwh"]
                    except KeyError:
                        kwh = 0
                        bill["problem"] += ("For the supply " + ss.mprn +
                                            " the virtual bill " + str(bill) +
                                            " from the contract " +
                                            contract.name +
                                            " does not contain the 'kwh' key.")

                    billed_kwh = billed_gbp = 0

                    g_era_associates = {
                        s.site.code
                        for s in g_era.site_g_eras if not s.is_physical
                    }

                    for g_bill in sess.query(GBill).filter(
                            GBill.g_supply == g_supply,
                            GBill.start_date <= ss_finish,
                            GBill.finish_date >= ss_start,
                    ):
                        bill_start = g_bill.start_date
                        bill_finish = g_bill.finish_date
                        bill_duration = (bill_finish - bill_start
                                         ).total_seconds() + (30 * 60)
                        overlap_duration = (min(bill_finish, ss_finish) - max(
                            bill_start, ss_start)).total_seconds() + (30 * 60)
                        overlap_proportion = overlap_duration / bill_duration
                        billed_kwh += overlap_proportion * float(g_bill.kwh)
                        billed_gbp += overlap_proportion * float(g_bill.net)

                    associated_site_ids = ",".join(sorted(g_era_associates))
                    g_era_rows.append([
                        make_val(v) for v in [
                            now,
                            g_supply.mprn,
                            g_supply.name,
                            g_supply.g_exit_zone.code,
                            g_era.msn,
                            g_era.g_unit.code,
                            contract.name,
                            site.code,
                            site.name,
                            associated_site_ids,
                            month_finish,
                            kwh,
                            gbp,
                            billed_kwh,
                            billed_gbp,
                        ]
                    ] + [make_val(bill.get(t)) for t in vb_titles])

                    site_kwh += kwh
                    site_gbp += gbp
                    site_billed_kwh += billed_kwh
                    site_billed_gbp += billed_gbp

                linked_sites = ", ".join(s.code
                                         for s in site.find_linked_sites(
                                             sess, month_start, month_finish))

                site_rows.append([
                    make_val(v) for v in [
                        now,
                        site.code,
                        site.name,
                        linked_sites,
                        month_finish,
                        site_kwh,
                        site_gbp,
                        site_billed_kwh,
                        site_billed_gbp,
                    ]
                ])
                sess.rollback()
            write_spreadsheet(rf, compression, site_rows, g_era_rows)

    except BadRequest as e:
        site_rows.append(["Problem " + e.description])
        write_spreadsheet(rf, compression, site_rows, g_era_rows)
    except BaseException:
        msg = traceback.format_exc()
        sys.stderr.write(msg + "\n")
        site_rows.append(["Problem " + msg])
        write_spreadsheet(rf, compression, site_rows, g_era_rows)
    finally:
        if sess is not None:
            sess.close()
        try:
            rf.close()
            os.rename(running_name, finished_name)
        except BaseException:
            msg = traceback.format_exc()
            r_name, f_name = chellow.dloads.make_names("error.txt", user)
            ef = open(r_name, "w")
            ef.write(msg + "\n")
            ef.close()
Example #60
0
    def fetch_data(self, **kwargs):
        self._parse_parameters(kwargs['parameters_dic'])
        self._get_available_chart_type()

        portfolios = self._parameters.get(TagTypeChart.PORTFOLIO)
        esg_factors = self._parameters.get(TagTypeChart.ESG_METRICS)
        esg_details = self._parameters.get(TagTypeChart.DETAILS)
        date_parameters = self._parameters.get(TagTypeChart.DATE)[0]['name']
        metrics = self._parameters.get('metrics')

        metric_id = metrics[0]['id']
        self.xAxis['title'] = 'Buckets'
        if metric_id == -3 or (metrics[0]['tag_type_id'] == 8
                               and metric_id == 1):
            metric_id = 14
            self.yAxis['title'] = 'Sum of the Securities Weight'
        elif metric_id == 2 and metrics[0]['tag_type_id'] == 3:
            metric_id = 12
            self.yAxis['title'] = 'Total Value of Securities'
        elif metric_id == 1 and metrics[0]['tag_type_id'] == 3:
            metric_id = 13
            self.yAxis['title'] = 'Number of Securities'
        elif metric_id == 3 and metrics[0]['tag_type_id'] == 3:
            metric_id = 10
            self.yAxis['title'] = 'Cumulative Return'
        elif metric_id == 4 and metrics[0]['tag_type_id'] == 3:
            metric_id = 11
            self.yAxis['title'] = 'Return Daily'
        else:
            print('No such metric')

        if esg_details and len(esg_details) == 1:
            if esg_details[0]['id'] == 20:
                esg_factor_results = EsgFactorModel.query.filter(
                    or_(EsgFactorModel.level == 1,
                        EsgFactorModel.level == 2)).filter_by(
                            data_provider_id='AE')
            else:
                esg_factor_results = EsgFactorModel.query.filter(
                    or_(EsgFactorModel.level == 1,
                        EsgFactorModel.level == 2)).filter_by(
                            data_provider_id='AU')

            esg_factors = [{
                "id": factor.id,
                "name": factor.name
            } for factor in esg_factor_results]

        grouping_ids, _ = zip(
            *self._get_esg_grouping_ids(esg_factors[0]['id']))
        date = get_last_available_day_until_date_in_grouping_metric(
            date_parameters, portfolios[0], grouping_ids).strftime("%Y-%m-%d")
        if date:
            self.subtitle = 'Source date: ' + str(date)
            if len(metrics) > 1:
                self.message = 'Two or more metrics was selected, Only the first one will be displayed'

            for factor in esg_factors:
                grouping_ids, _ = zip(
                    *self._get_esg_grouping_ids(factor['id']))
                portfolio_grouping_metric_result = PortfolioGroupingMetricModel.query. \
                 filter_by(metric_id=metric_id). \
                 filter(PortfolioGroupingMetricModel.grouping_id.in_(grouping_ids)). \
                 join(PortfolioGroupingMetricModel.portfolio_metric_date). \
                 filter_by(portfolio_id=portfolios[0]['id']). \
                 filter_by(date_key=date). \
                 order_by(PortfolioGroupingMetricModel.grouping_id.asc()).all()

                esg_data = {}
                esg_data['name'] = factor['name']
                esg_data['data'] = [
                    portfolio_grouping_metric.value
                    for portfolio_grouping_metric in
                    portfolio_grouping_metric_result
                ]

                if self._is_empty(esg_data['data']):
                    raise NoDataException

                self.data.append(esg_data)

            self.title = 'Multiple Channel ESG Score Distribution Histogram of ' + portfolios[
                0]['name']
        else:
            raise 'No data available on this day.'