def birth_time(cls):
        hour = cast(func.extract("hour", cls.birth_datetime), String)
        minute = cast(func.extract("minute", cls.birth_datetime), String)

        hour = case([(func.length(hour) == 1, "0" + hour)], else_=hour)
        minute = case([(func.length(minute) == 1, "0" + minute)], else_=minute)
        return hour + ":" + minute
Esempio n. 2
0
    def _calc():
        stats = {}
        if day_from is not None and day_to is not None:
            # fill in gaps
            for day in util.daterange(day_from,day_to):
                stats[day] = dict(total=0,done=0,help=0)

        # TODO: do the work in the database.
        q = session.query(cast(Article.pubdate,Date), Article)

        if day_from is not None:
            q = q.filter(cast(Article.pubdate, Date) >= day_from)
        if day_to is not None:
            q = q.filter(cast(Article.pubdate, Date) <= day_to)

        for day,art in q:
            if day not in stats:
                stats[day] = dict(total=0,done=0,help=0)
            stats[day]['total'] += 1
            if not art.needs_sourcing:
                stats[day]['done'] += 1

        stats = sorted([(day,row) for day,row in stats.iteritems()], key=lambda x: x[0], reverse=True )

        return [DailyStats(x[0], x[1]['total'], x[1]['done']) for x in stats]
Esempio n. 3
0
    def get(self, country_id):
        """
        Gather specified country from the database with its data
        country_id a non-zero, positive int
        return a json object representing the country
        """
        session = db.loadSession()

        assert type(country_id) == int

        # Make the sql query
        result = session.query(
            # What to select
            # outerjoin defaults to a LEFT outer join, NOT full outer join
            db.Country.id,
            db.Country.name,
            func.array_agg_cust(array([cast(db.Olympics.id, String), cast(db.Olympics.year, String), db.Olympics.season, db.City.name]))
            )\
            .select_from(db.Country)\
            .outerjoin(db.City)\
            .outerjoin(db.Olympics)\
            .filter(
                # What to filter by (where clause)
                db.Country.id==country_id)\
            .group_by(db.Country.id,
            db.Country.name)\
            .first() # Actually executes the query and returns a tuple
        
        session.close()
        
        keys = ('id', 'name', ('olympics-hosted', ('id', 'year', 'season', 'city')))

        country_dict = add_keys(keys, result)

        return jsonify(country_dict)
Esempio n. 4
0
    def selectables(cls, bag, agg_spec):
        """ Create a list of statements from spec

        :type bag: mongosql.bag.ModelPropertyBags
        :rtype: list[sqlalchemy.sql.elements.ColumnElement]
        """
        # TODO: calculation expressions for selection: http://docs.mongodb.org/manual/meta/aggregation-quick-reference/
        selectables = []
        for comp_field, comp_expression in agg_spec.items():
            # Column reference
            if isinstance(comp_expression, basestring):
                selectables.append(bag.columns[comp_expression].label(comp_field))
                continue

            # Computed expression
            assert isinstance(comp_expression, dict), 'Aggregate: Expression should be either a column name, or an object'
            assert len(comp_expression) == 1, 'Aggregate: expression can only contain a single operator'
            operator, expression = comp_expression.popitem()

            # Expression statement
            if isinstance(expression, int) and operator == '$sum':
                # Special case for count
                expression_stmt = expression
            elif isinstance(expression, basestring):
                # Column name
                expression_stmt = bag.columns[expression]
                # Json column?
                if bag.columns.is_column_json(expression):
                    # PostgreSQL always returns text values from it, and for aggregation we usually need numbers :)
                    expression_stmt = cast(expression_stmt, Float)
            elif isinstance(expression, dict):
                # Boolean expression
                expression_stmt = MongoCriteria.statement(bag, expression)
                # Need to cast it to int
                expression_stmt = cast(expression_stmt, Integer)
            else:
                raise AssertionError('Aggregate: expression should be either a column name, or an object')

            # Operator
            if operator == '$max':
                comp_stmt = func.max(expression_stmt)
            elif operator == '$min':
                comp_stmt = func.min(expression_stmt)
            elif operator == '$avg':
                comp_stmt = func.avg(expression_stmt)
            elif operator == '$sum':
                if isinstance(expression_stmt, int):
                    # Special case for count
                    comp_stmt = func.count()
                    if expression_stmt != 1:
                        comp_stmt *= expression_stmt
                else:
                    comp_stmt = func.sum(expression_stmt)
            else:
                raise AssertionError('Aggregate: unsupported operator "{}"'.format(operator))

            # Append
            selectables.append(comp_stmt.label(comp_field))

        return selectables
Esempio n. 5
0
    def get(self):
        """
        Gathers all events from the database with their data
        return a json object representing the events
        """
        
        session = db.loadSession()

        # Make the sql query
        result = session.query(
            # What to select
            # distinct because of multiple medals per event
            distinct(db.Event.id),
            db.Event.name,
            db.Sport.name,
            func.array_agg_cust(distinct(array([cast(db.Olympics.id, String), cast(db.Olympics.year, String), db.Olympics.season])))
            )\
            .select_from(db.Event)\
            .join(db.Sport)\
            .join(db.Medal)\
            .join(db.Olympics)\
            .group_by(db.Event.id,
            db.Event.name,
            db.Sport.name)\
            .all() # Actually executes the query and returns a list of tuples
        
        session.close()
        
        keys = ('id', 'name', 'sport', ('olympics', ('id', 'year', 'season')))
        
        all_events_dict = list_of_dict_to_dict_of_dict(add_keys(keys, row) for row in result)
        
        return jsonify(all_events_dict)
Esempio n. 6
0
    def filtering(self):
        search_value = self.request_values.get('sSearch')
        condition = None


        def search(idx, col):
            tmp_column_name = col.column_name.split('.')
            for tmp_name in tmp_column_name:
                if tmp_column_name.index(tmp_name) == 0:
                    obj = getattr(self.sqla_object, tmp_name)
                    parent = self.sqla_object
                elif isinstance(obj.property, RelationshipProperty):
                    parent = obj.property.mapper.class_
                    obj = getattr(parent, tmp_name)
                if not hasattr(obj, 'property'):
                    sqla_obj = parent
                    column_name = tmp_name
                elif isinstance(obj.property, RelationshipProperty):
                    sqla_obj = obj.mapper.class_
                    column_name = tmp_name
                    if not column_name:
                        column_name = obj.property.table.primary_key.columns \
                            .values()[0].name
                else:
                    sqla_obj = parent
                    column_name = tmp_name
            return sqla_obj, column_name
        if search_value:
            search_value_list = str(search_value).split()
            for search_val in search_value_list:
                conditions = []
                for idx, col in enumerate(self.columns):
                    if self.request_values.get('bSearchable_%s' % idx) in (
                            True, 'true') and col.searchable:
                        sqla_obj, column_name = search(idx, col)
                        conditions.append(
                            cast(get_attr(sqla_obj, column_name), String).ilike('%%%s%%' % search_val))
                condition = or_(*conditions)
                if condition is not None:
                    self.query = self.query.filter(condition)
        conditions = []
        for idx, col in enumerate(self.columns):
            search_value2 = self.request_values.get('sSearch_%s' % idx)
            if search_value2:
                sqla_obj, column_name = search(idx, col)
                if col.search_like:
                    conditions.append(
                        cast(get_attr(sqla_obj, column_name), String).ilike('%%%s%%' % search_value2))
                else:
                    conditions.append(
                        cast(get_attr(sqla_obj, column_name), String).__eq__(search_value2))
                if condition is not None:
                    condition = and_(condition, and_(*conditions))
                else:
                    condition = and_(*conditions)
        if condition is not None:
            self.query = self.query.filter(condition)
            self.cardinality_filtered = self.query.count()
        else:
            self.cardinality_filtered = self.cardinality
Esempio n. 7
0
    def get(self):
        """
        Gathers all countries from the database with their data
        return a json object representing the countries
        """
        
        session = db.loadSession()

        # Make the sql query
        result = session.query(
            # What to select
            # outerjoin defaults to a LEFT outer join, NOT full outer join
            db.Country.id,
            db.Country.name,
            func.array_agg_cust(array([cast(db.Olympics.id, String), cast(db.Olympics.year, String), db.Olympics.season, db.City.name]))
            )\
            .select_from(db.Country)\
            .outerjoin(db.City)\
            .outerjoin(db.Olympics)\
            .group_by(db.Country.id,
            db.Country.name)\
            .all() # Actually executes the query and returns a list of tuples
        
        session.close()
        
        keys = ('id', 'name', ('olympics-hosted', ('id', 'year', 'season', 'city')))
        
        all_countries_dict = list_of_dict_to_dict_of_dict(add_keys(keys, row) for row in result)
        
        return jsonify(all_countries_dict)
Esempio n. 8
0
    def get_clustered_locations(location_column,
                                threshold_radius=1000, filter=None):
        """
        SELECT ST_Centroid(
            (ST_Dump(
                ST_Union(
                    ST_Buffer(
                        takeoff_location_wkt::geography, 1000
                    )::geometry
                )
            )
        ).geom) FROM flights WHERE pilot_id=31;
        """

        # Cast the takeoff_location_wkt column to Geography
        geography = cast(location_column, Geography)

        # Add a metric buffer zone around the locations
        buffer = cast(geography.ST_Buffer(threshold_radius), Geometry)

        # Join the locations into one MultiPolygon
        union = buffer.ST_Union()

        # Split the MultiPolygon into separate polygons
        dump = union.ST_Dump().geom

        # Calculate center points of each polygon
        locations = func.ST_Centroid(dump)

        query = db.session.query(locations.label('location'))

        if filter is not None:
            query = query.filter(filter)

        return [Location.from_wkb(row.location) for row in query]
Esempio n. 9
0
    def lookup(self):
        q = request.params["q"]
        # Assume that the SQL library handles the SQL attack vectors

        person_query = meta.Session.query(
            Person.id, sa.func.concat(Person.fullname, " - ", Person.email_address).label("pretty")
        ).filter(
            sa.or_(Person.lastname.ilike(q + "%"), Person.fullname.ilike(q + "%"), Person.email_address.ilike(q + "%"))
        )

        personid_query = meta.Session.query(Person.id, cast(Person.id, sa.String).label("pretty")).filter(
            cast(Person.id, sa.String).like(q + "%")
        )

        boarding_query = meta.Session.query(FulfilmentGroup.person_id, FulfilmentGroup.code.label("pretty")).filter(
            FulfilmentGroup.code.ilike(q + "%")
        )

        badge_query = meta.Session.query(Fulfilment.person_id, Fulfilment.code.label("pretty")).filter(
            Fulfilment.code.ilike(q + "%")
        )

        union_query = person_query.union(personid_query, boarding_query, badge_query).order_by("pretty").limit(5)

        return dict(r=list(union_query.all()))
Esempio n. 10
0
 def get_info(cls, location):
     '''Returns a query object of mountain waves around the location'''
     return DBSession.query(cls) \
         .filter(func.ST_DWithin(
             cast(WKTElement(location.to_wkt(), srid=4326), Geography),
             cast(cls.location, Geography),
             5000))
Esempio n. 11
0
 def by_location(cls, location):
     '''Returns a query object of mountain waves around the location'''
     return cls.query() \
         .filter(db.func.ST_DWithin(
             cast(location.make_point(), Geography),
             cast(cls.location, Geography),
             5000))
def demographic_etl(config):
    # set up
    connection = get_connection(config)
    pedsnet_session = init_pedsnet(connection)
    init_pcornet(connection)

    # multiple aliases for pedsnet_pcornet_valueset_map
    # to allow the three named joins
    gender_value_map = aliased(ValueSetMap)
    ethnicity_value_map = aliased(ValueSetMap)
    race_value_map = aliased(ValueSetMap)

    # extract the data from the person table
    person = pedsnet_session.query(Person.person_id,
                                   Person.birth_date,
                                   Person.birth_time,
                                   coalesce(gender_value_map.target_concept, 'OT'),
                                   coalesce(ethnicity_value_map.target_concept, 'OT'),
                                   coalesce(race_value_map.target_concept, 'OT'),
                                   bindparam("biobank_flag", "N"),
                                   Person.gender_source_value,
                                   Person.ethnicity_source_value,
                                   Person.race_source_value,
                                   Person.site,
                                   bindparam("gender_identity", None),
                                   bindparam("raw_gender_identity", None),
                                   bindparam("sexual_orientation", None),
                                   bindparam("raw_sexual_orientation", None)
                                   ). \
        outerjoin(gender_value_map,
                  and_(gender_value_map.source_concept_class == 'Gender',
                       case([(and_(Person.gender_concept_id == None,
                                   gender_value_map.source_concept_id == None), True)],
                            else_=cast(Person.gender_concept_id, String(200)) ==
                                  gender_value_map.source_concept_id))). \
        outerjoin(ethnicity_value_map,
                  and_(ethnicity_value_map.source_concept_class == 'Hispanic',
                       case([(and_(Person.ethnicity_concept_id == None,
                                   ethnicity_value_map.source_concept_id == None), True)],
                            else_=cast(Person.ethnicity_concept_id, String(200)) ==
                                  ethnicity_value_map.source_concept_id))). \
        outerjoin(race_value_map,
                  and_(race_value_map.source_concept_class == 'Race',
                       case([(and_(Person.race_concept_id == None,
                                   race_value_map.source_concept_id == None), True)],
                            else_=cast(Person.race_concept_id, String(200)) ==
                                  race_value_map.source_concept_id))).all()

    # transform data to pcornet names and types
    # load to demographic table
    odo(person, Demographic.__table__,
        dshape='var * {patid: string, birth_date: date, birth_time: string, sex: string,'
               'hispanic: string, race: string, biobank_flag: string, raw_sex: string,'
               'raw_hispanic: string, raw_race:string, site: string, gender_identity: string,'
               'raw_gender_identity: string, sexual_orientation: string, raw_sexual_orientation: string}'
        )
    # close session

    pedsnet_session.close()
Esempio n. 13
0
    def get_proxy_address(
        self,
        user_id,
        ip_address=None,
        best=4,
        conn_factor=0.2
    ):
        """Get a usable proxy address for audio resource of user by user_id.
        If there is no available server, None will be returned

        We sort the connection by

            user_rate - (have_conn*conn_factor) then
            res_rate - (have_conn*conn_factor)

        Which means, less user proxy will be selected first, also, if there
        is already a proxy connection there, they will have higher priority
        (introduced by the conn_factor).

        """
        from sqlalchemy.sql.expression import or_, and_, cast, case
        from sqlalchemy.types import Float

        Port = tables.Port
        Proxy = tables.Proxy
        ProxyConnection = tables.ProxyConnection

        # calculate the connection factor
        factor_case = case([
            (ProxyConnection.server_id, conn_factor)
        ], else_=0)

        # Cast the type to make sure the result will be float
        res_rate = (Proxy.resource_count / cast(Proxy.resource_limit, Float))
        res_rate -= factor_case

        user_rate = (Proxy.user_count / cast(Proxy.user_limit, Float))
        user_rate -= factor_case

        query = self.session \
            .query(Port) \
            .join((Proxy, Proxy.id == Port.server_id)) \
            .outerjoin((ProxyConnection,
                        and_(ProxyConnection.server_id == Proxy.id,
                             ProxyConnection.user_id == user_id))) \
            .order_by(user_rate) \
            .order_by(res_rate) \
            .filter(or_(Proxy.user_count < Proxy.user_limit,
                        Proxy.user_limit == 0)) \
            .filter(Proxy.alive) \
            .filter(Proxy.active) \
            .filter(Port.name == 'web')

        # find a random proxy
        ports = query.limit(best).all()
        if not ports:
            return None
        port = random.choice(ports)
        return port.address
Esempio n. 14
0
    def _build_query(self, table, filter_values):
        having = []
        filter_cols = []
        external_cols = _get_grouping(filter_values)

        for fil in self.filters:
            if isinstance(fil, ANDFilter):
                filter_cols.append(fil.filters[0].column_name)
                having.append(fil)
            elif isinstance(fil, RawFilter):
                having.append(fil)
            elif fil.column_name not in ['group', 'gender', 'group_leadership', 'disaggregate_by',
                                         'table_card_group_by']:
                if fil.column_name not in external_cols and fil.column_name != 'maxmin':
                    filter_cols.append(fil.column_name)
                having.append(fil)

        group_having = ''
        having_group_by = []
        if ('disaggregate_by' in filter_values and filter_values['disaggregate_by'] == 'group') or \
                (filter_values.get('table_card_group_by') == 'group_leadership'):
            having_group_by.append('group_leadership')
        elif 'group_leadership' in filter_values and filter_values['group_leadership']:
            group_having = "(MAX(CAST(gender as int4)) + MIN(CAST(gender as int4))) " \
                           "= :group_leadership and group_leadership=\'Y\'"
            having_group_by.append('group_leadership')
            filter_cols.append('group_leadership')
        elif 'gender' in filter_values and filter_values['gender']:
            group_having = "(MAX(CAST(gender as int4)) + MIN(CAST(gender as int4))) = :gender"

        table_card_group = []
        if 'group_name' in self.group_by:
            table_card_group.append('group_name')
        s1 = alias(select([table.c.doc_id, table.c.group_case_id, table.c.group_name, table.c.group_id,
                           (sqlalchemy.func.max(table.c.prop_value) +
                            sqlalchemy.func.min(table.c.prop_value)).label('maxmin')] + filter_cols +
                          external_cols, from_obj=table,
                          group_by=([table.c.doc_id, table.c.group_case_id, table.c.group_name, table.c.group_id] +
                                    filter_cols + external_cols)), name='x')
        s2 = alias(
            select(
                [table.c.group_case_id,
                 sqlalchemy.cast(
                     cast(func.max(table.c.gender), Integer) + cast(func.min(table.c.gender), Integer), VARCHAR
                 ).label('gender')] + table_card_group,
                from_obj=table,
                group_by=[table.c.group_case_id] + table_card_group + having_group_by, having=group_having
            ), name='y'
        )
        group_by = list(self.group_by)
        if 'group_case_id' in group_by:
            group_by[group_by.index('group_case_id')] = s1.c.group_case_id
            group_by[group_by.index('group_name')] = s1.c.group_name
        return select(
            [sqlalchemy.func.count(s1.c.doc_id).label(self.key)] + group_by,
            group_by=[s1.c.maxmin] + filter_cols + group_by,
            having=AND(having).build_expression(s1),
            from_obj=join(s1, s2, s1.c.group_case_id == s2.c.group_case_id)
        ).params(filter_values)
Esempio n. 15
0
	def filtering(self):
		"""Construct the query, by adding filtering(LIKE) on all columns when the datatable's search box is used
		"""

		def resolve_column(column):
			tmp_name = column.data.split('.')
			obj = getattr(self.sqla_object, tmp_name[0], None)
			if obj is None:
				raise DataTablesException('Invalid column data: ' + tmp_name[0])
			if not hasattr(obj, "property"): # Ex: hybrid_property or property
				sqla_obj = self.sqla_object
				column_name = "".join(tmp_name[1:])
			elif isinstance(obj.property, RelationshipProperty): # Ex: ForeignKey
		 		# Ex: address.description
				sqla_obj = obj.mapper.class_
				column_name = "".join(tmp_name[1:])
				if not column_name:
					# Find first primary key
					column_name = obj.property.table.primary_key.columns.values()[0].name
			else: #-> ColumnProperty
				sqla_obj = self.sqla_object
				column_name = column.data
			return sqla_obj, column_name

		condition = None

		search_value = self.request_values.get('search[value]')
		if search_value != "":
			conditions = []
			for column in self.columns:
				# ignore null columns (javascript placeholder) or unsearchable
				if column.data != "" and column.searchable:
					sqla_obj, column_name = resolve_column(column)
					conditions.append(cast(get_attr(sqla_obj, column_name), String).ilike('%%%s%%' % search_value))
			condition = or_(*conditions)

		conditions = []
		for column in self.columns:
			# ignore null columns (javascript placeholder) or unsearchable
			if column.data != "" and column.search_value != "" and column.searchable:
				sqla_obj, column_name = resolve_column(column)

				#if col.search_like:
				#	conditions.append(cast(get_attr(sqla_obj, column_name), String).like(col.search_like % search_value2))
				#else:
				#	conditions.append(cast(get_attr(sqla_obj, column_name), String).__eq__(search_value2))
				conditions.append(cast(get_attr(sqla_obj, column_name), String).__eq__(column.search_value))

				if condition is not None:
					condition = and_(condition, and_(*conditions))
				else:
					condition= and_(*conditions)

		if condition is not None:
			self.query = self.query.filter(condition)
			# count after filtering
			self.cardinality_filtered = self.query.count()
		else:
			self.cardinality_filtered = self.cardinality
Esempio n. 16
0
def refine_with_user_area(query):
    """Takes a query and refines it with a spatial constraint
    based on user setting"""
    if 'lon' and 'lat' and 'radius' in session:
        return query.filter(ST_DWithin(
            cast(Task.location, Geography),
            cast(from_shape(Point(session["lon"], session["lat"])), Geography),
            session["radius"]))
    else:
        return query
Esempio n. 17
0
    def by_location(cls, location):
        """Returns a query object of mountain waves around the location"""
        if not isinstance(location, Location):
            raise TypeError('Invalid `location` parameter.')

        return cls.query() \
            .filter(db.func.ST_DWithin(
                cast(location.make_point(), Geography),
                cast(cls.location, Geography),
                5000))
Esempio n. 18
0
    def search(self, keywords):
        criteria = []

        for keyword in keywords_split(keywords):
            if keyword:
                keyword = '%{0}%'.format(keyword)
                criteria.append(cast(Article.title, Unicode).ilike(keyword))
                criteria.append(cast(Article.keywords, Unicode).ilike(keyword))
                
        return self.public().filter(db.or_(*criteria))
    def birth_date(cls):
        year = cast(cls.year_of_birth, String)
        month = cast(cls.month_of_birth, String)
        day = cast(cls.day_of_birth, String)

        month = case([(month == "", "01")],
                     else_=case([(func.length(month) == 1, "0" + month)], else_=month))
        day = case([(day == "", "01")],
                   else_=case([(func.length(day) == 1, "0" + day)], else_=day))

        return year + "-" + month + "-" + day
Esempio n. 20
0
    def preprocess_value_and_column(cls, column, value):
        value_array = is_array(value)

        # Coerce operand
        if column.is_array and value_array:
            value = cast(pg.array(value), pg.ARRAY(column.sql_col.type.item_type))
        if column.is_json:
            coerce_type = column.sql_col.type.coerce_compared_value('=', value)  # HACKY: use sqlalchemy type coercion
            column.sql_col = cast(column.sql_col, coerce_type)

        return column, value
Esempio n. 21
0
    def filtering(self):
        """Construct the query, by adding filtering(LIKE) on all columns when the datatable's search box is used
        """
        search_value = self.request_values.get('sSearch')
        condition = None
        def search(idx, col):
            tmp_column_name = col.column_name.split('.')
            obj = getattr(self.sqla_object, tmp_column_name[0])
            if not hasattr(obj, "property"): # Ex: hybrid_property or property
                sqla_obj = self.sqla_object
                column_name = col.column_name
            elif isinstance(obj.property, RelationshipProperty): #Ex: ForeignKey
                # Ex: address.description
                sqla_obj = obj.mapper.class_
                column_name = "".join(tmp_column_name[1:])
                if not column_name:
                    # find first primary key
                    column_name = obj.property.table.primary_key.columns \
                        .values()[0].name
            else:
                sqla_obj = self.sqla_object
                column_name = col.column_name
            return sqla_obj, column_name

        if search_value:
            conditions = []
            for idx, col in enumerate(self.columns):
                if self.request_values.get('bSearchable_%s' % idx) in (
                        True, 'true'):
                    sqla_obj, column_name = search(idx, col)
                    conditions.append(cast(get_attr(sqla_obj, column_name), String).ilike('%%%s%%' % search_value))
            condition = or_(*conditions)
        conditions = []
        for idx, col in enumerate(self.columns):
            if self.request_values.get('sSearch_%s' % idx) in (True, 'true'):
                search_value2 = self.request_values.get('sSearch_%s' % idx)
                sqla_obj, column_name = search(idx, col)
                
                if col.search_like:
                    conditions.append(cast(get_attr(sqla_obj, column_name), String).like(col.search_like % search_value2))
                else:
                    conditions.append(cast(get_attr(sqla_obj, column_name), String).__eq__(search_value2))

                if condition is not None:
                    condition = and_(condition, and_(*conditions))
                else:
                    condition= and_(*conditions)

        if condition is not None:
            self.query = self.query.filter(condition)
            # count after filtering
            self.cardinality_filtered = self.query.count()
        else:
            self.cardinality_filtered = self.cardinality
Esempio n. 22
0
    def all(self, *args, **kwargs):
        (_u, _p, _e, _l) = [getattr(self.db, options)._table for options in
                ('userfeatures', 'phonefunckey', 'extenumbers', 'linefeatures')]

        conds = [
                _l.c.iduserfeatures == _p.c.iduserfeatures,
                _p.c.typeextenumbers != None,
                _p.c.typevalextenumbers != None,
                _p.c.supervision == 1,
                _p.c.progfunckey == 1,
                cast(_p.c.typeextenumbers, VARCHAR(255)) == cast(_e.c.type, VARCHAR(255)),
                _p.c.typevalextenumbers != 'user',
                _p.c.typevalextenumbers == _e.c.typeval
        ]
        if 'context' in kwargs:
            conds.append(_l.c.context == kwargs['context'])

        q = select(
            [_p.c.iduserfeatures, _p.c.exten, _p.c.typeextenumbers,
             _p.c.typevalextenumbers, _p.c.typeextenumbersright,
             _p.c.typevalextenumbersright, _e.c.exten.label('leftexten')],

            and_(*conds)
        )

        """
        _l2 = alias(_l)

        conds = [
                _l.c.iduserfeatures      == _p.c.iduserfeatures, 
                _p.c.typeextenumbers     != None,
                _p.c.typevalextenumbers  != None,
                _p.c.supervision         == 1, 
                _p.c.progfunckey         == 1,
                cast(_p.c.typeextenumbers,VARCHAR(255)) == cast(_e.c.type,VARCHAR(255)),
                _p.c.typevalextenumbers  == 'user',
                _p.c.typevalextenumbers  == cast(_l2.c.iduserfeatures,VARCHAR(255)),
                _e.c.typeval             == cast(_l2.c.id,VARCHAR(255))
        ]
        if 'context' in kwargs:
            conds.append(_l.c.context == kwargs['context'])

        q2 = select(
            [_p.c.iduserfeatures, _p.c.exten, _p.c.typeextenumbers,
             _p.c.typevalextenumbers, _p.c.typeextenumbersright,
             _p.c.typevalextenumbersright, _e.c.exten.label('leftexten')],

            and_(*conds)
        )

        return self.execute(q1.union(q2)).fetchall()
        """
        return self.execute(q).fetchall()
Esempio n. 23
0
def upgrade_severity_levels(session, severity_map):
    """
    Updates the potentially changed severities at the reports.
    """
    LOG.debug("Upgrading severity levels started...")

    # Create a sql query from the severity map.
    severity_map_q = union_all(*[
        select([cast(bindparam('checker_id' + str(i), str(checker_id))
                .label('checker_id'), sqlalchemy.String),
                cast(bindparam('severity' + str(i), Severity._NAMES_TO_VALUES[
                    severity_map[checker_id]])
               .label('severity'), sqlalchemy.Integer)])
        for i, checker_id in enumerate(severity_map)]) \
        .alias('new_severities')

    checker_ids = severity_map.keys()

    # Get checkers which has been changed.
    changed_checker_q = select([Report.checker_id, Report.severity]) \
        .group_by(Report.checker_id, Report.severity) \
        .where(Report.checker_id.in_(checker_ids)) \
        .except_(session.query(severity_map_q)).alias('changed_severites')

    changed_checkers = session.query(changed_checker_q.c.checker_id,
                                     changed_checker_q.c.severity)

    # Update severity levels of checkers.
    if changed_checkers:
        updated_checker_ids = set()
        for checker_id, severity_old in changed_checkers:
            severity_new = severity_map.get(checker_id, 'UNSPECIFIED')
            severity_id = Severity._NAMES_TO_VALUES[severity_new]

            LOG.info("Upgrading severity level of '%s' checker from %s to %s",
                     checker_id,
                     Severity._VALUES_TO_NAMES[severity_old],
                     severity_new)

            if checker_id in updated_checker_ids:
                continue

            session.query(Report) \
                .filter(Report.checker_id == checker_id) \
                .update({Report.severity: severity_id})

            updated_checker_ids.add(checker_id)

        session.commit()

    LOG.debug("Upgrading of severity levels finished...")
Esempio n. 24
0
def top_n(session, day_from, day_to, action_kinds=['src_add',], num_results=5):
    """ helper for league tables """
    cnts = session.query(Action.user_id, func.count('*').label('cnt')).\
            filter(Action.what.in_(action_kinds)).\
            filter(cast(Action.performed, Date) >= day_from).\
            filter(cast(Action.performed, Date) <= day_to).\
            group_by(Action.user_id).\
            subquery()

    return session.query(UserAccount, cnts.c.cnt).\
        join(cnts, UserAccount.id==cnts.c.user_id).\
        order_by(cnts.c.cnt.desc()).\
        limit(num_results).\
        all()
Esempio n. 25
0
def leaderboard_query(session, start_date, until_date):
    """
    This is, admittedly, a really ugly sql query. Query optimization has not
    been performed, but it shouldn't be anything more complicated than a few
    indices. Good luck.
    """
    #start_date = datetime.strptime(start_date, '%Y-%m-%d')
    #until_date = datetime.strptime(until_date_str, '%Y-%m-%d')
    subq = session\
        .query(
            Instance,
            InstanceType,
            User,
            case([(Instance.end_date != None, Instance.end_date)], else_=now()).label('stop_date'))\
        .join(Instance.user)\
        .join(Instance.type)\
        .subquery()

    uptime_column = case(
        [
            (subq.c.created_date > until_date, 0),
            (subq.c.stop_date < start_date, 0)
        ],
        else_=extract('epoch',
            func.LEAST(subq.c.stop_date, cast(until_date, DateTime)) -
            func.GREATEST(subq.c.created_date, cast(start_date, DateTime))
        )
    )

    print subq.c
    subq2 = session.query(
        subq.c.user_id,
        sum(case([(uptime_column == 0, 0)], else_=1)).label('instance_count'),
        #func.count(subq.c.instance_id).label('instance_count'),
        sum(uptime_column).label('uptime'),
        sum(uptime_column * subq.c.cpu).label('cpu_seconds')
    ).group_by(subq.c.user_id).order_by(desc('cpu_seconds')).subquery()

    q = session.query(
        subq2.c.user_id,
        subq2.c.uptime,
        subq2.c.cpu_seconds,
        subq2.c.instance_count,
        User.username,
        User.is_staff,
        User.name
    ).join(User)

    return q
    def handle(self, *args, **options):
        # set up
        config = get_config()
        if config is None:
            raise CommandError('Unable to process configuration file p_to_p.yml')

        connection = get_connection(config)
        pedsnet_session = init_pedsnet(connection)
        init_pcornet(connection)
        init_vocab(connection)

        pedsnet_pcornet_valueset_map = aliased(ValueSetMap)

        # extract the data from the death table
        death_cause = pedsnet_session.query(DeathPedsnet.person_id,
                                            func.left(DeathPedsnet.cause_source_value, 8),
                                            coalesce(pedsnet_pcornet_valueset_map.target_concept, 'OT'),
                                            bindparam("death_cause_type", "NI"),
                                            bindparam("death_cause_source", "L"),
                                            bindparam("death_cause_confidence", None),
                                            min(DeathPedsnet.site)
                                            ) \
            .join(Demographic, Demographic.patid == cast(DeathPedsnet.person_id, String(256)), ) \
            .join(VocabularyConcept, VocabularyConcept.concept_id == DeathPedsnet.cause_concept_id) \
            .outerjoin(pedsnet_pcornet_valueset_map,
                       and_(pedsnet_pcornet_valueset_map.source_concept_class == 'death cause code',
                            cast(VocabularyConcept.vocabulary_id, String(200)) ==
                            pedsnet_pcornet_valueset_map.source_concept_id)) \
            .filter(and_(DeathPedsnet.cause_source_value != None,
                         DeathPedsnet.cause_source_concept_id != 44814650)) \
            .group_by(DeathPedsnet.person_id, func.left(DeathPedsnet.cause_source_value, 8),
                      coalesce(pedsnet_pcornet_valueset_map.target_concept, 'OT')) \
            .all()

        # transform data to pcornet names and types
        # load to demographic table
        odo(death_cause, DeathCause.__table__,
            dshape='var * {patid: string, death_cause: string, death_cause_code: string,'
                   'death_cause_type: string, death_cause_source:string, '
                   'death_cause_confidence: string, site: string}'
            )

        # close session
        pedsnet_session.close()

        # output result
        self.stdout.ending = ''
        print('Death Cause ETL completed successfully', end='', file=self.stdout)
Esempio n. 27
0
    def extend_query_with_textfilter(self, query, text):
        """Extends the given `query` with text filters. This is only done when
        config's `filter_text` is set.
        """

        if len(text):
            if isinstance(text, str):
                text = text.decode('utf-8')

            # remove trailing asterisk
            if text.endswith('*'):
                text = text[:-1]

            # lets split up the search term into words, extend them with
            # the default wildcards and then search for every word
            # seperately
            for word in text.strip().split(' '):
                term = '%%%s%%' % word

                # XXX check if the following hack is still necessary

                # Fixed Problems with the collation with the Oracle DB
                # the case insensitive worked just every second time
                # now it works fine
                # Issue #759
                query.session

                query = query.filter(or_(
                    *[cast(field, String).ilike(term)
                      for field in self.searchable_columns]))

        return query
Esempio n. 28
0
File: DB.py Progetto: serc/wave
def getSpectraBinID(freq_bins = None, dir_bins = None):
  spectra = _session.query(SpectraRecord).filter(and_(
    SpectraRecord.spcfreq == cast(freq_bins, ARRAY(DOUBLE_PRECISION)),
    SpectraRecord.spcdir == cast(dir_bins, ARRAY(DOUBLE_PRECISION))
  )).first()

  if spectra:
    return spectra.id
  else:
    # Create a record for the spectra.
    spectra = SpectraRecord(spcFreq = freq_bins, spcDir = dir_bins)

    _session.add(spectra)
    _session.commit()

    return spectra.id
Esempio n. 29
0
def find_sccp_speeddial_settings(session):
    query = (session.query(FuncKeyMapping.position.label('fknum'),
                           FuncKeyMapping.label.label('label'),
                           cast(FuncKeyMapping.blf, Integer).label('supervision'),
                           FuncKeyDestCustom.exten.label('exten'),
                           UserFeatures.id.label('user_id'),
                           SCCPDevice.device.label('device'))
             .join(UserFeatures,
                   FuncKeyMapping.template_id == UserFeatures.func_key_private_template_id)
             .join(FuncKeyDestCustom,
                   FuncKeyDestCustom.func_key_id == FuncKeyMapping.func_key_id)
             .join(UserLine,
                   and_(
                       UserLine.user_id == UserFeatures.id,
                       UserLine.main_user == True))
             .join(LineFeatures,
                   UserLine.line_id == LineFeatures.id)
             .join(SCCPLine,
                   and_(
                       LineFeatures.protocol == 'sccp',
                       LineFeatures.protocolid == SCCPLine.id))
             .join(SCCPDevice,
                   SCCPLine.name == SCCPDevice.line)
             .filter(LineFeatures.commented == 0))

    keys = [{'exten': row.exten,
             'fknum': row.fknum,
             'label': row.label,
             'supervision': row.supervision,
             'user_id': row.user_id,
             'device': row.device}
            for row in query]

    return keys
Esempio n. 30
0
def _story_build_summary_query():
    # first create a subquery for task statuses
    select_items = []
    select_items.append(Story)
    select_items.append(
        expr.case(
            [(func.sum(Task.status.in_(
                ['todo', 'inprogress', 'review'])) > 0,
              'active'),
             ((func.sum(Task.status == 'merged')) > 0, 'merged')],
            else_='invalid'
        ).label('status')
    )
    for task_status in Task.TASK_STATUSES:
        select_items.append(expr.cast(
            func.sum(Task.status == task_status), Integer
        ).label(task_status))
    select_items.append(expr.null().label('task_statuses'))

    result = select(select_items, None,
                    expr.Join(Story, Task, onclause=Story.id == Task.story_id,
                              isouter=True)) \
        .group_by(Story.id) \
        .alias('story_summary')

    return result
Esempio n. 31
0
def paginate_query(query,
                   model,
                   limit,
                   sort_keys,
                   marker=None,
                   sort_dir=None,
                   sort_dirs=None):
    """Returns a query with sorting / pagination criteria added.

    Pagination works by requiring a unique sort_key, specified by sort_keys.
    (If sort_keys is not unique, then we risk looping through values.)
    We use the last row in the previous page as the 'marker' for pagination.
    So we must return values that follow the passed marker in the order.
    With a single-valued sort_key, this would be easy: sort_key > X.
    With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
    the lexicographical ordering:
    (k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)

    We also have to cope with different sort_directions and cases where k2,
    k3, ... are nullable.

    Typically, the id of the last row is used as the client-facing pagination
    marker, then the actual marker object must be fetched from the db and
    passed in to us as marker.

    The "offset" parameter is intentionally avoided. As offset requires a
    full scan through the preceding results each time, criteria-based
    pagination is preferred. See http://use-the-index-luke.com/no-offset
    for further background.

    :param query: the query object to which we should add paging/sorting
    :param model: the ORM model class
    :param limit: maximum number of items to return
    :param sort_keys: array of attributes by which results should be sorted
    :param marker: the last item of the previous page; we returns the next
                    results after this value.
    :param sort_dir: direction in which results should be sorted (asc, desc)
                     suffix -nullsfirst, -nullslast can be added to defined
                     the ordering of null values
    :param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys

    :rtype: sqlalchemy.orm.query.Query
    :return: The query with sorting/pagination added.
    """
    if _stable_sorting_order(model, sort_keys) is False:
        LOG.warning('Unique keys not in sort_keys. '
                    'The sorting order may be unstable.')

    assert (not (sort_dir and sort_dirs))

    # Default the sort direction to ascending
    if sort_dirs is None and sort_dir is None:
        sort_dir = 'asc'

    # Ensure a per-column sort direction
    if sort_dirs is None:
        sort_dirs = [sort_dir for _sort_key in sort_keys]

    assert (len(sort_dirs) == len(sort_keys))

    # Add sorting
    for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
        try:
            inspect(model).all_orm_descriptors[current_sort_key]
        except KeyError:
            raise exception.InvalidSortKey(current_sort_key)
        else:
            sort_key_attr = getattr(model, current_sort_key)

        try:
            main_sort_dir, __, null_sort_dir = current_sort_dir.partition("-")
            sort_dir_func = {
                'asc': sqlalchemy.asc,
                'desc': sqlalchemy.desc,
            }[main_sort_dir]

            null_order_by_stmt = {
                "": None,
                "nullsfirst": sort_key_attr.is_(None),
                "nullslast": sort_key_attr.isnot(None),
            }[null_sort_dir]
        except KeyError:
            raise ValueError(
                _("Unknown sort direction, "
                  "must be one of: %s") % ", ".join(_VALID_SORT_DIR))

        if null_order_by_stmt is not None:
            query = query.order_by(sqlalchemy.desc(null_order_by_stmt))
        query = query.order_by(sort_dir_func(sort_key_attr))

    # Add pagination
    if marker is not None:
        marker_values = []
        for sort_key in sort_keys:
            v = getattr(marker, sort_key)
            marker_values.append(v)

        # Build up an array of sort criteria as in the docstring
        criteria_list = []
        for i in range(len(sort_keys)):
            crit_attrs = []
            # NOTE: We skip the marker value comparison if marker_values[i] is
            #       None, for two reasons: 1) the comparison operators below
            #       ('<', '>') are not applicable on None value; 2) this is
            #       safe because we can assume the primary key is included in
            #       sort_key, thus checked as (one of) marker values.
            if marker_values[i] is not None:
                for j in range(i):
                    model_attr = getattr(model, sort_keys[j])
                    crit_attrs.append((model_attr == marker_values[j]))

                model_attr = getattr(model, sort_keys[i])
                val = marker_values[i]
                # sqlalchemy doesn't like booleans in < >. bug/1656947
                if isinstance(model_attr.type, Boolean):
                    val = int(val)
                    model_attr = cast(model_attr, Integer)
                if sort_dirs[i].startswith('desc'):
                    crit_attrs.append((model_attr < val))
                else:
                    crit_attrs.append((model_attr > val))
                criteria = sqlalchemy.sql.and_(*crit_attrs)
                criteria_list.append(criteria)

        f = sqlalchemy.sql.or_(*criteria_list)
        query = query.filter(f)

    if limit is not None:
        query = query.limit(limit)

    return query
Esempio n. 32
0
 def perm(cls) -> str:  # pylint: disable=no-self-argument
     return ("[" + cls.database_name + "].(id:" +
             expression.cast(cls.id, String) + ")")
Esempio n. 33
0
def chat_list(request):

    current_page = int(request.GET.get("page", 1))

    if request.matched_route.name.startswith("chat_list_unanswered"):
        current_status = "unanswered"
    elif request.matched_route.name.startswith("chat_list_ongoing"):
        current_status = "ongoing"
    elif request.matched_route.name.startswith("chat_list_ended"):
        current_status = "ended"
    else:
        current_status = None

    if request.matched_route.name.startswith("chat_list_label"):
        current_label = request.matchdict["label"].lower().strip().replace(
            " ", "_")
        if current_label != request.matchdict["label"]:
            raise HTTPFound(
                request.route_path("chat_list_label", label=current_label))
    else:
        current_label = None

    chats = Session.query(ChatUser, Chat, Message).join(Chat).outerjoin(
        Message,
        Message.id == Session.query(func.min(Message.id), ).filter(
            Message.chat_id == Chat.id, ).correlate(Chat),
    ).filter(ChatUser.user_id == request.user.id, )

    chat_count = Session.query(func.count('*')).select_from(ChatUser).filter(
        ChatUser.user_id == request.user.id, )

    if current_status == "unanswered":
        chats = chats.filter(
            and_(
                Chat.last_user_id is not None,
                Chat.last_user_id != request.user.id,
            ))
        chat_count = chat_count.join(Chat).filter(
            and_(
                Chat.last_user_id is not None,
                Chat.last_user_id != request.user.id,
            ))
    elif current_status is not None:
        chats = chats.filter(Chat.status == current_status)
        chat_count = chat_count.join(Chat).filter(
            Chat.status == current_status)

    if current_label is not None:
        label_array = cast([current_label], ARRAY(Unicode(500)))
        chats = chats.filter(ChatUser.labels.contains(label_array))
        chat_count = chat_count.filter(ChatUser.labels.contains(label_array))

    chats = chats.order_by(Chat.updated.desc()).limit(25).offset(
        (current_page - 1) * 25).all()

    # 404 on empty pages, unless it's the first page.
    if current_page != 1 and len(chats) == 0:
        raise HTTPNotFound

    chat_count = chat_count.scalar()

    if request.matchdict.get("fmt") == "json":
        return render_to_response("json", {
            "chats": [{
                "chat_user": chat_user,
                "chat": chat,
                "prompt": prompt,
            } for chat_user, chat, prompt in chats],
            "chat_count":
            chat_count,
        },
                                  request=request)

    paginator = paginate.Page(
        [],
        page=current_page,
        items_per_page=25,
        item_count=chat_count,
        url=paginate.PageURL(
            request.route_path(request.matched_route.name,
                               label=current_label), {"page": current_page}),
    )

    labels = (Session.query(
        func.unnest(ChatUser.labels),
        func.count("*")).filter(ChatUser.user_id == request.user.id).group_by(
            func.unnest(ChatUser.labels)).order_by(
                func.count("*").desc(),
                func.unnest(ChatUser.labels).asc()).all())

    template = "layout2/chat_list.mako" if request.user.layout_version == 2 else "chat_list.mako"
    return render_to_response(template, {
        "chats": chats,
        "paginator": paginator,
        "labels": labels,
        "current_status": current_status,
        "current_label": current_label,
        "symbols": symbols,
    },
                              request=request)
Esempio n. 34
0
 def tracking_delay_interval(cls):
     return cast(cast(cls.tracking_delay, String) + ' minutes', Interval)
Esempio n. 35
0
 def __eq__(self, other):
     return self._case() == cast(other, String)
Esempio n. 36
0
 def user_name(cls):
     decrypted = func.aes_decrypt(func.unhex(cls.enc_user_name),
                                  current_app.config['AES_KEY'])
     return cast(decrypted, db.String(64))
Esempio n. 37
0
class User(BaseModel, DatedModel):
    """Holds users' data"""

    __tablename__ = "users"
    username = Column(String, nullable=False, unique=True, comment="User's identifier")
    active = Column(
        "is_active",
        BOOLEAN(),
        nullable=False,
        server_default=cast(1, BOOLEAN),
        comment="Denotes active users",
    )

    _password = Column("password", String, nullable=False, comment="Password hash")

    # User identifiers
    email = Column(
        String, nullable=True, unique=True, comment="User's personal unique email"
    )

    # meta data
    _photo = Column("photo", String, nullable=True, comment="User's avatar url")
    phone = Column(String, nullable=True, comment="Contact number")

    # User information
    first_name = Column(String, nullable=False, comment="First Name")
    last_name = Column(String, nullable=False, server_default="", comment="Last Name")

    manager_id = Column(Integer, ForeignKey("users.id"), nullable=True)
    manager: "User" = relationship(
        "User", foreign_keys=[manager_id], lazy=True, uselist=False
    )

    # Relationships

    # Define the relationship to Role via UserRoles
    roles = relationship("Role", secondary="user_roles")
    # user sessions
    sessions = relationship(
        "Session", order_by="Session.created_at.asc()", uselist=True
    )

    affiliation: "UserAffiliation" = relationship("UserAffiliation", uselist=False)

    token = None

    def set_password(self, val: str):
        regx = re.compile(current_app.config["PASSWORD_RULE"])
        if not regx.match(val):
            raise UserExceptions.password_check_invalid()
        self._password = generate_password_hash(val)

    def get_password(self):
        return PasswordHelper(self._password)

    password = property(get_password, set_password)

    def get_photo(self):
        return (
            self.__photo_handler
            if getattr(self, "__photo_handler", None) is not None
            else FileHandler(url=self._photo)
            if self._photo
            else None
        )

    def set_photo(self, val: FileHandler):
        self.__photo_handler = val
        self._photo = getattr(val, "url", None)

    photo = property(get_photo, set_photo)

    def __init__(
        self,
        username: str,
        password: str,
        password_check: str,
        active: bool = True,
        email: str = None,
        photo: "FileHandler" = None,
        phone: str = None,
        first_name: str = "",
        last_name: str = "",
        **kwargs,
    ) -> None:
        if password != password_check:
            raise UserExceptions.password_check_invalid()
        self.username = username
        self.password = password
        self.active = active
        self.email = email
        self.photo = photo
        self.phone = phone
        self.first_name = first_name
        self.last_name = last_name

    @hybrid_property
    def name(self) -> str:
        """concatenates user's name"""
        return f"{self.first_name} {self.last_name}"

    def add_roles(self, roles: Union[List["Role"], "Role"]):
        """add roles to user

        Args:
            roles: A list of or a single role instances
        """
        from ._UserRoles import UserRoles

        new_roles = [
            UserRoles(user=self, role=role)
            for role in (roles if isinstance(roles, list) else [roles])
        ]

        db.session.add_all(new_roles)

    def delete(self, persist=False):
        """Delete user's record"""
        if self.photo:
            self.photo.delete()
        super().delete(persist=persist)

    def add_entity(self, entity: "Entity", create: bool = False, edit: bool = False):
        from ._UserEntityPermission import UserEntityPermission

        permission = UserEntityPermission(
            entity=entity, user=self, create=create, edit=edit
        )
        db.session.add(permission)
        db.session.commit()

    @hybrid_property
    def employees(self) -> List["User"]:

        return User.query.filter(User.manager_id == User.id).all()

    @hybrid_property
    def assets(self) -> List[AssetStorage]:

        return AssetStorage.query.filter(AssetStorage.added_by_id == self.id).all()
Esempio n. 38
0
    def get(self):
        """
            .. http:get:: /api/1/items

            Get a list of items matching the given criteria.

            **Example Request**:

            .. sourcecode:: http

                GET /api/1/items HTTP/1.1
                Host: example.com
                Accept: application/json

            **Example Response**:

            .. sourcecode:: http

                HTTP/1.1 200 OK
                Vary: Accept
                Content-Type: application/json

                {
                    "items": [
                        {
                            "account": "example_account",
                            "region": "us-east-1",
                            "technology": "sqs",
                            "id": 14414,
                            "name": "example_name",
                            "num_issues": 3,
                            "issue_score": 9,
                            "unjustified_issue_score": 3,
                            "active" true,
                            "first_seen": "2014-06-17 19:47:07.299760",
                            "last_seen": "2014-06-18 11:53:16.467709"
                        }
                    ],
                    "total": 144,
                    "page": 1,
                    "auth": {
                        "authenticated": true,
                        "user": "******"
                    }
                }

            :statuscode 200: no error
            :statuscode 401: Authenciation Error. Please Login.
        """

        (auth, retval) = __check_auth__(self.auth_dict)
        if auth:
            return retval

        self.reqparse.add_argument('count', type=int, default=30, location='args')
        self.reqparse.add_argument('page', type=int, default=1, location='args')
        self.reqparse.add_argument('regions', type=str, default=None, location='args')
        self.reqparse.add_argument('accounts', type=str, default=None, location='args')
        self.reqparse.add_argument('active', type=str, default=None, location='args')
        self.reqparse.add_argument('names', type=str, default=None, location='args')
        self.reqparse.add_argument('technologies', type=str, default=None, location='args')
        self.reqparse.add_argument('searchconfig', type=str, default=None, location='args')
        self.reqparse.add_argument('ids', type=int, default=None, location='args')
        args = self.reqparse.parse_args()

        page = args.pop('page', None)
        count = args.pop('count', None)
        for k, v in args.items():
            if not v:
                del args[k]

        # Read more about filtering:
        # http://docs.sqlalchemy.org/en/rel_0_7/orm/query.html
        query = Item.query.join((ItemRevision, Item.latest_revision_id == ItemRevision.id))
        if 'regions' in args:
            regions = args['regions'].split(',')
            query = query.filter(Item.region.in_(regions))
        if 'accounts' in args:
            accounts = args['accounts'].split(',')
            query = query.join((Account, Account.id == Item.account_id))
            query = query.filter(Account.name.in_(accounts))
        if 'technologies' in args:
            technologies = args['technologies'].split(',')
            query = query.join((Technology, Technology.id == Item.tech_id))
            query = query.filter(Technology.name.in_(technologies))
        if 'names' in args:
            names = args['names'].split(',')
            query = query.filter(Item.name.in_(names))
        if 'ids' in args:
            ids = args['ids'].split(',')
            query = query.filter(Item.id.in_(ids))
        if 'active' in args:
            active = args['active'].lower() == "true"
            query = query.filter(ItemRevision.active == active)
        if 'searchconfig' in args:
            searchconfig = args['searchconfig']
            query = query.filter(cast(ItemRevision.config, String).ilike('%{}%'.format(searchconfig)))

        query = query.order_by(ItemRevision.date_created.desc())

        items = query.paginate(page, count)

        marshaled_dict = {}
        marshaled_dict['page'] = items.page
        marshaled_dict['total'] = items.total
        marshaled_dict['auth'] = self.auth_dict

        marshaled_items = []
        for item in items.items:
            num_issues = len(item.issues)

            issue_score = 0
            unjustified_issue_score = 0
            for issue in item.issues:
                issue_score = issue_score + issue.score

                if not issue.justified:
                    unjustified_issue_score += issue.score

            first_seen = str(item.revisions[-1].date_created)
            last_seen = str(item.revisions[0].date_created)
            active = item.revisions[0].active

            item_marshaled = {}
            item_marshaled = marshal(item.__dict__, ITEM_FIELDS)
            item_marshaled = dict(item_marshaled.items() +
                                  {
                                      'account': item.account.name,
                                      'technology': item.technology.name,
                                      'num_issues': num_issues,
                                      'issue_score': issue_score,
                                      'unjustified_issue_score': unjustified_issue_score,
                                      'active': active,
                                      'first_seen': first_seen,
                                      'last_seen': last_seen
                                      #'last_rev': item.revisions[0].config,
                                  }.items())

            marshaled_items.append(item_marshaled)

        marshaled_dict['items'] = marshaled_items
        marshaled_dict['count'] = len(marshaled_items)

        return marshaled_dict, 200
Esempio n. 39
0
    def filtering(self):
        search_value = self.request_values.get('sSearch')
        condition = None

        def search(idx, col):
            tmp_column_name = col.column_name.split('.')
            for tmp_name in tmp_column_name:
                if tmp_column_name.index(tmp_name) == 0:
                    obj = getattr(self.sqla_object, tmp_name)
                    parent = self.sqla_object
                elif isinstance(obj.property, RelationshipProperty):
                    parent = obj.property.mapper.class_
                    obj = getattr(parent, tmp_name)
                if not hasattr(obj, 'property'):
                    sqla_obj = parent
                    column_name = tmp_name
                elif isinstance(obj.property, RelationshipProperty):
                    sqla_obj = obj.mapper.class_
                    column_name = tmp_name
                    if not column_name:
                        column_name = obj.property.table.primary_key.columns \
                            .values()[0].name
                else:
                    sqla_obj = parent
                    column_name = tmp_name
            return sqla_obj, column_name

        if search_value:
            search_value_list = str(search_value).split()
            for search_val in search_value_list:
                conditions = []
                for idx, col in enumerate(self.columns):
                    if self.request_values.get('bSearchable_%s' % idx) in (
                            True, 'true') and col.searchable:
                        sqla_obj, column_name = search(idx, col)
                        conditions.append(
                            cast(get_attr(sqla_obj, column_name),
                                 String).ilike('%%%s%%' % search_val))
                condition = or_(*conditions)
                if condition is not None:
                    self.query = self.query.filter(condition)
        conditions = []
        for idx, col in enumerate(self.columns):
            search_value2 = self.request_values.get('sSearch_%s' % idx)
            if search_value2:
                sqla_obj, column_name = search(idx, col)
                if col.search_like:
                    conditions.append(
                        cast(get_attr(sqla_obj, column_name),
                             String).ilike('%%%s%%' % search_value2))
                else:
                    conditions.append(
                        cast(get_attr(sqla_obj, column_name),
                             String).__eq__(search_value2))
                if condition is not None:
                    condition = and_(condition, and_(*conditions))
                else:
                    condition = and_(*conditions)
        if condition is not None:
            self.query = self.query.filter(condition)
            self.cardinality_filtered = self.query.count()
        else:
            self.cardinality_filtered = self.cardinality
Esempio n. 40
0
        def read(self, query, data=None, order=None, limit=None, offset=0):
            """
            Get the model objects matching the supplied query parameters,
            optionally setting which part of the objects are in the returned dictionary
            using the supplied data parameter

            Arguments:
                session (sqlalchemy.Session): SQLAlchemy session to use.
                query: one or more queries (as c{dict} or [c{dict}]), corresponding
                    to the format of the query parameter described in the module-level
                    docstrings. This query parameter will be normalized
                data: one or more data specification (as c{dict} or [c{dict}]),
                    corresponding to the format of the data specification parameter
                    described in the module-level docstrings. The length of the data
                    parameter should either be 1 which will be the spec for each query
                    specified, OR of length N, where N is the number of queries after
                    normalization. If not provided the _data parameter will be expected
                    in each query
                limit: The limit parameter, when provided with positive integer "L"
                    at most "L" results will be returned. Defaults to no limit
                offset: The offset parameter, when provided with positive integer
                    "F", at most "L" results will be returned after skipping the first "F"
                    results (first based on ordering)

            Returns:
                list: one or more data specification dictionaries with models that
                    match the provided queries including all readable fields without
                    following foreign keys (the default if no data parameter is included),
                    OR the key/values specified by the data specification parameter. The
                    number of items returned and the order in which they appear are
                    controlled by the limit, offset and order parameters. Represented as::

                        return {
                            total: <int> # count of ALL matching objects, separate from <limit>
                            results: [c{dict}, c{dict}, ... , c{dict}] # subject to <limit>
                        }

            """
            with Session() as session:
                filters = normalize_query(query)
                data = normalize_data(data, len(filters))
                if len(filters) == 1:
                    filter = filters[0]
                    model = Session.resolve_model(filter['_model'])
                    total = 0
                    results = []
                    if getattr(model, '_crud_perms', {}).get('read', True):
                        total = CrudApi._filter_query(session.query(model),
                                                      model, filter).count()
                        results = CrudApi._filter_query(
                            session.query(model), model, filter, limit, offset,
                            order).all()

                    return {
                        'total': total,
                        'results': [r.crud_read(data[0]) for r in results]
                    }

                elif len(filters) > 1:
                    queries = []
                    count_queries = []
                    queried_models = []
                    sort_field_types = {}
                    for filter_index, filter in enumerate(filters):
                        model = Session.resolve_model(filter['_model'])
                        if getattr(model, '_crud_perms', {}).get('read', True):
                            queried_models.append(model)
                            query_fields = [
                                model.id,
                                cast(literal(model.__name__),
                                     Text).label("_table_name"),
                                cast(literal(filter_index), Integer)
                            ]

                            for sort_index, sort in enumerate(
                                    normalize_sort(model, order)):
                                sort_field = getattr(model, sort['field'])
                                sort_field_types[sort_index] = type(
                                    sort_field.__clause_element__().type)
                                query_fields.append(
                                    sort_field.label(
                                        'anon_sort_{}'.format(sort_index)))
                            queries.append(
                                CrudApi._filter_query(
                                    session.query(*query_fields), model,
                                    filter))
                            count_queries.append(
                                CrudApi._filter_query(session.query(model.id),
                                                      model, filter))

                    total = count_queries[0].union(
                        *(count_queries[1:])).count()
                    query = queries[0].union(*(queries[1:]))
                    normalized_sort_fields = normalize_sort(None, order)
                    for sort_index, sort in enumerate(normalized_sort_fields):
                        dir = {'asc': asc, 'desc': desc}[sort['dir']]
                        sort_field = 'anon_sort_{}'.format(sort_index)
                        if issubclass(sort_field_types[sort_index], String):
                            sort_field = 'lower({})'.format(sort_field)
                        query = query.order_by(dir(sort_field))
                    if normalized_sort_fields:
                        query = query.order_by("_table_name")
                    rows = CrudApi._limit_query(query, limit, offset).all()

                    result_table = {}
                    result_order = {}
                    query_index_table = {}
                    for i, row in enumerate(rows):
                        id = str(row[0])
                        model = Session.resolve_model(row[1])
                        query_index = row[2]
                        result_table.setdefault(model, []).append(id)
                        result_order[id] = i
                        query_index_table[id] = query_index

                    for model, ids in result_table.items():
                        result_table[model] = session.query(model).filter(
                            model.id.in_(ids)).all()

                    ordered_results = len(result_order) * [None]
                    for model, instances in result_table.items():
                        for instance in instances:
                            ordered_results[result_order[
                                instance.id]] = instance
                    results = [r for r in ordered_results if r is not None]

                    return {
                        'total':
                        total,
                        'results': [
                            r.crud_read(data[query_index_table[r.id]])
                            for r in results
                        ]
                    }
                else:
                    return {'total': 0, 'results': []}
Esempio n. 41
0
    def get(self):
        """
            .. http:get:: /api/1/revisions

            Get a list of revisions

            **Example Request**:

            .. sourcecode:: http

                GET /api/1/revisions?count=1 HTTP/1.1
                Host: example.com
                Accept: application/json

            **Example Response**:

            .. sourcecode:: http

                HTTP/1.1 200 OK
                Vary: Accept
                Content-Type: application/json

                {
                    "items": [
                        {
                            "account": "example_account",
                            "accounttype": "AWS",
                            "name": "Example Name",
                            "region": "us-east-1",
                            "item_id": 144,
                            "active": false,
                            "date_created": "2014-06-19 20:54:12.962951",
                            "technology": "sqs",
                            "id": 223757
                        }
                    ],
                    "total": 1,
                    "page": 1,
                    "auth": {
                        "authenticated": true,
                        "user": "******"
                    }
                }

            :statuscode 200: no error
            :statuscode 401: Authentication Error. Please Login.
        """

        self.reqparse.add_argument('count',
                                   type=int,
                                   default=30,
                                   location='args')
        self.reqparse.add_argument('page',
                                   type=int,
                                   default=1,
                                   location='args')
        self.reqparse.add_argument('active',
                                   type=str,
                                   default=None,
                                   location='args')
        self.reqparse.add_argument('regions',
                                   type=str,
                                   default=None,
                                   location='args')
        self.reqparse.add_argument('accounts',
                                   type=str,
                                   default=None,
                                   location='args')
        self.reqparse.add_argument('accounttypes',
                                   type=str,
                                   default=None,
                                   location='args')
        self.reqparse.add_argument('names',
                                   type=str,
                                   default=None,
                                   location='args')
        self.reqparse.add_argument('arns',
                                   type=str,
                                   default=None,
                                   location='args')
        self.reqparse.add_argument('technologies',
                                   type=str,
                                   default=None,
                                   location='args')
        self.reqparse.add_argument('searchconfig',
                                   type=str,
                                   default=None,
                                   location='args')
        args = self.reqparse.parse_args()

        page = args.pop('page', None)
        count = args.pop('count', None)
        for k, v in args.items():
            if not v:
                del args[k]

        query = ItemRevision.query.join("item")
        if 'regions' in args:
            regions = args['regions'].split(',')
            query = query.filter(Item.region.in_(regions))
        if 'accounts' in args:
            accounts = args['accounts'].split(',')
            query = query.join((Account, Account.id == Item.account_id))
            query = query.filter(Account.name.in_(accounts))
        if 'accounttypes' in args:
            accounttypes = args['accounttypes'].split(',')
            query = query.join((Account, Account.id == Item.account_id))
            query = query.join(
                (AccountType, AccountType.id == Account.account_type_id))
            query = query.filter(AccountType.name.in_(accounttypes))
        if 'technologies' in args:
            technologies = args['technologies'].split(',')
            query = query.join((Technology, Technology.id == Item.tech_id))
            query = query.filter(Technology.name.in_(technologies))
        if 'names' in args:
            names = args['names'].split(',')
            query = query.filter(Item.name.in_(names))
        if 'arns' in args:
            arns = args['arns'].split(',')
            query = query.filter(Item.arn.in_(arns))
        if 'active' in args:
            active = args['active'].lower() == "true"
            query = query.filter(ItemRevision.active == active)
        if 'searchconfig' in args:
            searchconfig = args['searchconfig']
            query = query.filter(
                cast(ItemRevision.config,
                     String).ilike('%{}%'.format(searchconfig)))
        query = query.order_by(ItemRevision.date_created.desc())
        revisions = query.paginate(page, count)

        marshaled_dict = {
            'page': revisions.page,
            'total': revisions.total,
            'auth': self.auth_dict
        }

        items_marshaled = []
        for revision in revisions.items:
            item_marshaled = marshal(revision.item.__dict__, ITEM_FIELDS)
            revision_marshaled = marshal(revision.__dict__, REVISION_FIELDS)
            account_marshaled = {'account': revision.item.account.name}
            accounttype_marshaled = {
                'account_type': revision.item.account.account_type.name
            }
            technology_marshaled = {
                'technology': revision.item.technology.name
            }
            merged_marshaled = dict(item_marshaled.items() +
                                    revision_marshaled.items() +
                                    account_marshaled.items() +
                                    accounttype_marshaled.items() +
                                    technology_marshaled.items())
            items_marshaled.append(merged_marshaled)

        marshaled_dict['items'] = items_marshaled
        marshaled_dict['count'] = len(items_marshaled)
        return marshaled_dict, 200
Esempio n. 42
0
 def search(self, qs):
     return filter_number(cast(self.dt.model.pk, Integer), qs, type_=int)
Esempio n. 43
0
File: util.py Progetto: esbesb/clld
def as_int(col):
    return cast(col, Integer)
Esempio n. 44
0
        db.Enum(ThingType),
        db.CheckConstraint(
            "\"type\" <> 'Book' OR \"extraData\"->>'prix_livre' SIMILAR TO '[0-9]+(.[0-9]*|)'",
            name='check_thing_book_has_price'),
        db.CheckConstraint(
            "\"type\" <> 'Book' OR NOT \"extraData\"->'author' IS NULL",
            name='check_thing_book_has_author'),
        db.CheckConstraint(
            "\"type\" <> 'Book' OR \"idAtProviders\" SIMILAR TO '[0-9]{13}'",
            name='check_thing_book_has_ean13'),
        nullable=False)

    name = db.Column(db.String(140), nullable=False)

    description = db.Column(db.Text, nullable=True)

    mediaUrls = db.Column(ARRAY(db.String(120)), nullable=False, default=[])


Thing.__ts_vector__ = create_tsvector(
    cast(coalesce(Thing.name, ''), TEXT),
    coalesce(Thing.extraData['author'].cast(TEXT), ''),
    coalesce(Thing.extraData['byArtist'].cast(TEXT), ''),
)

Thing.__table_args__ = (Index('idx_thing_fts',
                              Thing.__ts_vector__,
                              postgresql_using='gin'), )

app.model.Thing = Thing
Esempio n. 45
0
    def get_project_stats(self) -> ProjectStatsDTO:
        """ Create Project Stats model for postgis project object"""
        project_stats = ProjectStatsDTO()
        project_stats.project_id = self.id
        project_area_sql = "select ST_Area(geometry, true)/1000000 as area from public.projects where id = :id"
        project_area_result = db.engine.execute(text(project_area_sql),
                                                id=self.id)

        project_stats.area = project_area_result.fetchone()["area"]
        project_stats.total_mappers = (db.session.query(User).filter(
            User.projects_mapped.any(self.id)).count())
        project_stats.total_tasks = self.total_tasks
        project_stats.total_comments = (db.session.query(ProjectChat).filter(
            ProjectChat.project_id == self.id).count())
        project_stats.percent_mapped = Project.calculate_tasks_percent(
            "mapped",
            self.total_tasks,
            self.tasks_mapped,
            self.tasks_validated,
            self.tasks_bad_imagery,
        )
        project_stats.percent_validated = Project.calculate_tasks_percent(
            "validated",
            self.total_tasks,
            self.tasks_mapped,
            self.tasks_validated,
            self.tasks_bad_imagery,
        )
        project_stats.percent_bad_imagery = Project.calculate_tasks_percent(
            "bad_imagery",
            self.total_tasks,
            self.tasks_mapped,
            self.tasks_validated,
            self.tasks_bad_imagery,
        )
        centroid_geojson = db.session.scalar(self.centroid.ST_AsGeoJSON())
        project_stats.aoi_centroid = geojson.loads(centroid_geojson)
        project_stats.total_time_spent = 0
        project_stats.total_mapping_time = 0
        project_stats.total_validation_time = 0
        project_stats.average_mapping_time = 0
        project_stats.average_validation_time = 0

        total_mapping_time, total_mapping_tasks = (db.session.query(
            func.sum(
                cast(func.to_timestamp(TaskHistory.action_text, "HH24:MI:SS"),
                     Time)),
            func.count(TaskHistory.action),
        ).filter(
            or_(
                TaskHistory.action == "LOCKED_FOR_MAPPING",
                TaskHistory.action == "AUTO_UNLOCKED_FOR_MAPPING",
            )).filter(TaskHistory.project_id == self.id).one())

        if total_mapping_tasks > 0:
            total_mapping_time = total_mapping_time.total_seconds()
            project_stats.total_mapping_time = total_mapping_time
            project_stats.average_mapping_time = (total_mapping_time /
                                                  total_mapping_tasks)
            project_stats.total_time_spent += total_mapping_time

        total_validation_time, total_validation_tasks = (db.session.query(
            func.sum(
                cast(func.to_timestamp(TaskHistory.action_text, "HH24:MI:SS"),
                     Time)),
            func.count(TaskHistory.action),
        ).filter(
            or_(
                TaskHistory.action == "LOCKED_FOR_VALIDATION",
                TaskHistory.action == "AUTO_UNLOCKED_FOR_VALIDATION",
            )).filter(TaskHistory.project_id == self.id).one())

        if total_validation_tasks > 0:
            total_validation_time = total_validation_time.total_seconds()
            project_stats.total_validation_time = total_validation_time
            project_stats.average_validation_time = (total_validation_time /
                                                     total_validation_tasks)
            project_stats.total_time_spent += total_validation_time

        actions = []
        if project_stats.average_mapping_time <= 0:
            actions.append(TaskStatus.LOCKED_FOR_MAPPING.name)
        if project_stats.average_validation_time <= 0:
            actions.append(TaskStatus.LOCKED_FOR_VALIDATION.name)

        zoom_levels = []
        # Check that averages are non-zero.
        if len(actions) != 0:
            zoom_levels = (Task.query.with_entities(
                Task.zoom.distinct()).filter(Task.project_id == self.id).all())
            zoom_levels = [z[0] for z in zoom_levels]

        # Validate project has arbitrary tasks.
        is_square = True
        if None in zoom_levels:
            is_square = False
        sq = (TaskHistory.query.with_entities(
            Task.zoom,
            TaskHistory.action,
            (cast(func.to_timestamp(TaskHistory.action_text, "HH24:MI:SS"),
                  Time)).label("ts"),
        ).filter(Task.is_square == is_square).filter(
            TaskHistory.project_id == Task.project_id).filter(
                TaskHistory.task_id == Task.id).filter(
                    TaskHistory.action.in_(actions)))
        if is_square is True:
            sq = sq.filter(Task.zoom.in_(zoom_levels))

        sq = sq.subquery()

        nz = (db.session.query(sq.c.zoom, sq.c.action, sq.c.ts).filter(
            sq.c.ts > datetime.time(0)).limit(10000).subquery())

        if project_stats.average_mapping_time <= 0:
            mapped_avg = (db.session.query(nz.c.zoom, (func.avg(
                nz.c.ts)).label("avg")).filter(
                    nz.c.action ==
                    TaskStatus.LOCKED_FOR_MAPPING.name).group_by(
                        nz.c.zoom).all())
            mapping_time = sum([t.avg.total_seconds()
                                for t in mapped_avg]) / len(mapped_avg)
            project_stats.average_mapping_time = mapping_time

        if project_stats.average_validation_time <= 0:
            val_avg = (db.session.query(nz.c.zoom, (func.avg(
                nz.c.ts)).label("avg")).filter(
                    nz.c.action ==
                    TaskStatus.LOCKED_FOR_VALIDATION.name).group_by(
                        nz.c.zoom).all())
            validation_time = sum([t.avg.total_seconds()
                                   for t in val_avg]) / len(val_avg)
            project_stats.average_validation_time = validation_time

        time_to_finish_mapping = (
            self.total_tasks -
            (self.tasks_mapped + self.tasks_bad_imagery +
             self.tasks_validated)) * project_stats.average_mapping_time
        project_stats.time_to_finish_mapping = time_to_finish_mapping
        project_stats.time_to_finish_validating = (
            self.total_tasks - (self.tasks_validated + self.tasks_bad_imagery)
        ) * project_stats.average_validation_time

        return project_stats
Esempio n. 46
0
 def distance(self, location):
     loc1 = cast(self.location_wkt, Geography)
     loc2 = func.ST_GeographyFromText(location.to_wkt())
     return db.session.scalar(func.ST_Distance(loc1, loc2))
Esempio n. 47
0
 def score(self):
     """Make score queryable."""
     return cast(self.property3, Integer)
Esempio n. 48
0
    siren = db.Column(
        db.String(9), nullable=True, unique=True
    )  # FIXME: should not be nullable, is until we have all SIRENs filled in the DB

    def make_admin(self, admin):
        if admin:
            user_offerer = app.model.UserOfferer()
            user_offerer.offerer = self
            user_offerer.user = admin
            user_offerer.rights = app.model.RightsType.admin
            return user_offerer

    def errors(self):
        errors = super(Offerer, self).errors()
        if self.siren is not None\
           and not len(self.siren) == 9:
            errors.addError('siren', 'Ce code SIREN est invalide')
        return errors


Offerer.__ts_vector__ = create_tsvector(
    cast(coalesce(Offerer.name, ''), TEXT),
    cast(coalesce(Offerer.address, ''), TEXT),
    cast(coalesce(Offerer.siren, ''), TEXT))

Offerer.__table_args__ = (Index('idx_offerer_fts',
                                Offerer.__ts_vector__,
                                postgresql_using='gin'), )

app.model.Offerer = Offerer
Esempio n. 49
0
 def proportion(self):
     """Make proportion queryable."""
     return cast(self.property4, Float)
Esempio n. 50
0
def score_per_meaning_query(type_, filter_=None):  # pragma: no cover
    """
select
    a.id, a.label, a.semantic_category, a.semantic_field_id,
    sum(a.borrowed_score)/sum(a.representation) as borrowed_score,
    count(distinct a.word_id)
from
(
    -- tabulate (word_id, meaning_id) pairs against a word's discounted score

    select
        x.word_id, x.meaning_id as id, x.label as label,
        x.semantic_field_id as semantic_field_id,
        x.semantic_category as semantic_category, y.borrowed_score, y.representation
    from
    (
        select wm.word_id as word_id, m.id as meaning_id, m.label as label,
        m.semantic_field_id as semantic_field_id, m.semantic_category as semantic_category
        from word_meaning as w+
    ) as x,
    --
    -- tabulate word ids against score discounted by number of meanings
    --
    (
        select
            w.pk as word_pk, w.id as word_id,
            cast(w.borrowed_score as float)/count(*) as borrowed_score,
            cast(1 as float)/count(*) as representation
        from
            word as w, counterpart as wm
        where
            w.pk = wm.word_pk
        group by
            w.id, w.borrowed_score
    ) as y
    -- ---------------------------------------------------------------------------
    where x.word_id = y.word_id
) as a --,
-- ---------------------------------------------------------------------------
-- select words we are interested in
--

group by
    a.label, a.id, a.semantic_category, a.semantic_field_id
order by
    a.id
    """
    assert type_ in ['borrowed', 'age', 'simplicity']
    attr = '%s_score' % type_

    word, counterpart, parameter, meaning, valueset, value = [
        m.__table__
        for m in [Word, Counterpart, Parameter, Meaning, ValueSet, Value]
    ]

    x = alias(select([
        counterpart.c.word_pk.label('word_pk'),
        parameter.c.pk.label('meaning_pk'),
        meaning.c.semantic_field_pk.label('semantic_field_pk'),
    ],
                     from_obj=value,
                     whereclause=and_(
                         value.c.valueset_pk == valueset.c.pk,
                         valueset.c.parameter_pk == parameter.c.pk,
                         parameter.c.pk == meaning.c.pk,
                         value.c.pk == counterpart.c.pk)),
              name='x')

    y = alias(select(
        [
            word.c.pk.label('word_pk'),
            (cast(getattr(word.c, attr), Float) / func.count('*')).label(attr),
            (cast(1, Float) / func.count('*')).label('representation'),
        ],
        from_obj=counterpart,
        whereclause=word.c.pk == counterpart.c.word_pk,
        group_by=[word.c.pk, getattr(word.c, attr)],
    ),
              name='y')

    a = alias(select([
        x.c.meaning_pk, x.c.semantic_field_pk,
        getattr(y.c, attr), y.c.representation
    ],
                     whereclause=x.c.word_pk == y.c.word_pk),
              name='a')

    query = select([
        a.c.meaning_pk,
        a.c.semantic_field_pk,
        (func.sum(getattr(a.c, attr)) /
         func.sum(a.c.representation)).label(attr),
        func.count(distinct(a.c.meaning_pk)),
    ],
                   group_by=[a.c.meaning_pk, a.c.semantic_field_pk],
                   order_by=a.c.meaning_pk)

    if isinstance(filter_, Meaning):
        query = query.where(a.c.meaning_pk == filter_.pk)

    if isinstance(filter_, SemanticField):
        query = query.where(a.c.semantic_field_pk == filter_.pk)

    return query
Esempio n. 51
0
 def fitness(self):
     """Retrieve fitness via property1."""
     return cast(self.property1, Integer)
Esempio n. 52
0
    def get(self):
        """
            .. http:get:: /api/1/items

            Get a list of items matching the given criteria.

            **Example Request**:

            .. sourcecode:: http

                GET /api/1/items HTTP/1.1
                Host: example.com
                Accept: application/json

            **Example Response**:

            .. sourcecode:: http

                HTTP/1.1 200 OK
                Vary: Accept
                Content-Type: application/json

                {
                    "items": [
                        {
                            "account": "example_account",
                            "account_type": "AWS",
                            "region": AWS_DEFAULT_REGION,
                            "technology": "sqs",
                            "id": 14414,
                            "name": "example_name",
                            "num_issues": 3,
                            "issue_score": 9,
                            "unjustified_issue_score": 3,
                            "active" true,
                            "first_seen": "2014-06-17 19:47:07.299760",
                            "last_seen": "2014-06-18 11:53:16.467709"
                        }
                    ],
                    "total": 144,
                    "page": 1,
                    "auth": {
                        "authenticated": true,
                        "user": "******"
                    }
                }

            :statuscode 200: no error
            :statuscode 401: Authenciation Error. Please Login.
        """

        self.reqparse.add_argument('count',
                                   type=int,
                                   default=30,
                                   location='args')
        self.reqparse.add_argument('page',
                                   type=int,
                                   default=1,
                                   location='args')
        self.reqparse.add_argument('regions',
                                   type=str,
                                   default=None,
                                   location='args')
        self.reqparse.add_argument('accounts',
                                   type=str,
                                   default=None,
                                   location='args')
        self.reqparse.add_argument('accounttypes',
                                   type=str,
                                   default=None,
                                   location='args')
        self.reqparse.add_argument('active',
                                   type=str,
                                   default=None,
                                   location='args')
        self.reqparse.add_argument('names',
                                   type=str,
                                   default=None,
                                   location='args')
        self.reqparse.add_argument('arns',
                                   type=str,
                                   default=None,
                                   location='args')
        self.reqparse.add_argument('technologies',
                                   type=str,
                                   default=None,
                                   location='args')
        self.reqparse.add_argument('searchconfig',
                                   type=str,
                                   default=None,
                                   location='args')
        self.reqparse.add_argument('ids',
                                   type=int,
                                   default=None,
                                   location='args')
        self.reqparse.add_argument('summary',
                                   type=bool,
                                   default=False,
                                   location='args')
        self.reqparse.add_argument('min_score',
                                   type=int,
                                   default=False,
                                   location='args')
        self.reqparse.add_argument('min_unjustified_score',
                                   type=int,
                                   default=False,
                                   location='args')
        args = self.reqparse.parse_args()

        page = args.pop('page', None)
        count = args.pop('count', None)
        for k, v in list(args.items()):
            if not v:
                del args[k]

        # Read more about filtering:
        # https://docs.sqlalchemy.org/en/latest/orm/query.html
        query = Item.query.join(
            (ItemRevision, Item.latest_revision_id == ItemRevision.id))

        # Fix for issue https://github.com/Netflix/security_monkey/issues/1150
        # PR https://github.com/Netflix/security_monkey/pull/1153
        join_account = False

        if 'regions' in args:
            regions = args['regions'].split(',')
            query = query.filter(Item.region.in_(regions))
        if 'accounts' in args:
            accounts = args['accounts'].split(',')
            query = query.filter(Account.name.in_(accounts))
            join_account = True
        if 'accounttypes' in args:
            accounttypes = args['accounttypes'].split(',')
            query = query.join(
                (AccountType, AccountType.id == Account.account_type_id))
            query = query.filter(AccountType.name.in_(accounttypes))
            join_account = True
        if 'technologies' in args:
            technologies = args['technologies'].split(',')
            query = query.join((Technology, Technology.id == Item.tech_id))
            query = query.filter(Technology.name.in_(technologies))
        if 'names' in args:
            names = args['names'].split(',')
            query = query.filter(Item.name.in_(names))
        if 'arns' in args:
            arns = args['arns'].split(',')
            query = query.filter(Item.arn.in_(arns))
        if 'ids' in args:
            ids = args['ids'].split(',')
            query = query.filter(Item.id.in_(ids))
        if 'active' in args:
            active = args['active'].lower() == "true"
            query = query.filter(ItemRevision.active == active)
            query = query.filter(Account.active == True)
            join_account = True
        if 'searchconfig' in args:
            searchconfig = args['searchconfig']
            query = query.filter(
                cast(ItemRevision.config,
                     String).ilike('%{}%'.format(searchconfig)))
        if 'min_score' in args:
            min_score = args['min_score']
            query = query.filter(Item.score >= min_score)
        if 'min_unjustified_score' in args:
            min_unjustified_score = args['min_unjustified_score']
            query = query.filter(
                Item.unjustified_score >= min_unjustified_score)
        if join_account == True:
            query = query.join((Account, Account.id == Item.account_id))

        # Eager load the joins except for the revisions because of the dynamic lazy relationship
        query = query.options(joinedload('issues'))
        query = query.options(joinedload('account'))
        query = query.options(joinedload('technology'))

        query = query.order_by(ItemRevision.date_created.desc())

        items = query.paginate(page, count)

        marshaled_dict = {
            'page': items.page,
            'total': items.total,
            'auth': self.auth_dict
        }

        marshaled_items = []
        for item in items.items:
            item_marshaled = marshal(item.__dict__, ITEM_FIELDS)

            if 'summary' in args and args['summary']:
                item_marshaled = dict(
                    list(item_marshaled.items()) +
                    list({
                        'account': item.account.name,
                        'account_type': item.account.account_type.name,
                        'technology': item.technology.name,
                        'num_issues': item.issue_count,
                        'issue_score': item.score,
                        'unjustified_issue_score': item.unjustified_score,
                        'active': active,
                        #'last_rev': item.revisions[0].config,
                    }.items()))
            else:
                first_seen_query = ItemRevision.query.filter(
                    ItemRevision.item_id == item.id).order_by(
                        ItemRevision.date_created.asc())
                first_seen = str(first_seen_query.first().date_created)
                last_seen = str(item.revisions.first().date_created)
                active = item.revisions.first().active
                item_marshaled = dict(
                    list(item_marshaled.items()) +
                    list({
                        'account': item.account.name,
                        'account_type': item.account.account_type.name,
                        'technology': item.technology.name,
                        'num_issues': item.issue_count,
                        'issue_score': item.score,
                        'unjustified_issue_score': item.unjustified_score,
                        'active': active,
                        'first_seen': first_seen,
                        'last_seen': last_seen
                        # 'last_rev': item.revisions[0].config,
                    }.items()))

            marshaled_items.append(item_marshaled)

        marshaled_dict['items'] = marshaled_items
        marshaled_dict['count'] = len(marshaled_items)

        return marshaled_dict, 200
Esempio n. 53
0
def rename_vo(old_vo,
              new_vo,
              insert_new_vo=False,
              description=None,
              email=None,
              commit_changes=False,
              skip_history=False,
              echo=True):
    """
    Updates rows so that entries associated with `old_vo` are now associated with `new_vo` as part of multi-VO migration.

    :param old_vo:         The 3 character string for the current VO (for a single-VO instance this will be 'def').
    :param new_vo:         The 3 character string for the new VO.
    :param insert_new_vo:  If True then an entry for `new_vo` is created in the database.
    :param description:    Full description of the new VO, unused if `insert_new_vo` is False.
    :param email:          Admin email for the new VO, unused if `insert_new_vo` is False.
    :param commit_changes: If True then changes are made against the database directly.
                           If False, then nothing is commited and the commands needed are dumped to be run later.
    :param skip_history:   If True then tables without FKC containing historical data will not be converted to save time.
    """
    success = True
    engine = session.get_engine(echo=echo)
    conn = engine.connect()
    trans = conn.begin()
    inspector = reflection.Inspector.from_engine(engine)
    metadata = MetaData(bind=conn, reflect=True)
    dialect = engine.dialect.name

    # Gather all the columns that need updating and all relevant foreign key constraints
    all_fks = []
    tables_and_columns = []
    for table_name in inspector.get_table_names():
        if skip_history and ('_history' in table_name
                             or '_hist_recent' in table_name):
            continue
        fks = []
        table = Table(table_name, metadata)
        for column in table.c:
            if 'scope' in column.name or column.name == 'account':
                tables_and_columns.append((table, column))
        for fk in inspector.get_foreign_keys(table_name):
            if not fk['name']:
                continue
            if 'scope' in fk['referred_columns'] or 'account' in fk[
                    'referred_columns']:
                fks.append(
                    ForeignKeyConstraint(fk['constrained_columns'], [
                        fk['referred_table'] + '.' + r
                        for r in fk['referred_columns']
                    ],
                                         name=fk['name'],
                                         table=table,
                                         **fk['options']))
        all_fks.extend(fks)

    try:
        bound_params = {
            'old_vo': old_vo,
            'new_vo': new_vo,
            'old_vo_suffix': '' if old_vo == 'def' else old_vo,
            'new_vo_suffix': '' if new_vo == 'def' else '@%s' % new_vo,
            'split_character': '@',
            'int_1': 1,
            'int_2': 2,
            'new_description': description,
            'new_email': email,
            'datetime': datetime.utcnow()
        }

        bound_params_text = {}
        for key in bound_params:
            if isinstance(bound_params[key], int):
                bound_params_text[key] = bound_params[key]
            else:
                bound_params_text[key] = "'%s'" % bound_params[key]

        if insert_new_vo:
            table = Table('vos', metadata)
            insert_command = table.insert().values(
                vo=bindparam('new_vo'),
                description=bindparam('new_description'),
                email=bindparam('new_email'),
                updated_at=bindparam('datetime'),
                created_at=bindparam('datetime'))
            print(str(insert_command) % bound_params_text + ';')
            if commit_changes:
                conn.execute(insert_command, bound_params)

        # Drop all FKCs affecting InternalAccounts/Scopes
        for fk in all_fks:
            print(str(DropConstraint(fk)) + ';')
            if commit_changes:
                conn.execute(DropConstraint(fk))

        # Update columns
        for table, column in tables_and_columns:
            update_command = table.update().where(
                split_vo(dialect, column, return_vo=True) == bindparam(
                    'old_vo_suffix'))

            if new_vo == 'def':
                update_command = update_command.values(
                    {column.name: split_vo(dialect, column)})
            else:
                update_command = update_command.values({
                    column.name:
                    split_vo(dialect, column) +
                    cast(bindparam('new_vo_suffix'), CHAR(4))
                })

            print(str(update_command) % bound_params_text + ';')
            if commit_changes:
                conn.execute(update_command, bound_params)

        table = Table('rses', metadata)
        update_command = table.update().where(
            table.c.vo == bindparam('old_vo')).values(vo=bindparam('new_vo'))
        print(str(update_command) % bound_params_text + ';')
        if commit_changes:
            conn.execute(update_command, bound_params)

        # Re-add the FKCs we dropped
        for fkc in all_fks:
            print(str(AddConstraint(fkc)) + ';')
            if commit_changes:
                conn.execute(AddConstraint(fkc))
    except:
        success = False
        print(format_exc())
        print('Exception occured, changes not committed to DB.')

    if commit_changes and success:
        trans.commit()
    trans.close()
    return success
Esempio n. 54
0
    def get_project_stats(self) -> ProjectStatsDTO:
        """ Create Project Stats model for postgis project object"""
        project_stats = ProjectStatsDTO()
        project_stats.project_id = self.id
        project_area_sql = "select ST_Area(geometry, true)/1000000 as area from public.projects where id = :id"
        project_area_result = db.engine.execute(text(project_area_sql), id=self.id)

        project_stats.area = project_area_result.fetchone()["area"]
        project_stats.total_mappers = (
            db.session.query(User).filter(User.projects_mapped.any(self.id)).count()
        )
        project_stats.total_tasks = self.total_tasks
        project_stats.total_comments = (
            db.session.query(ProjectChat)
            .filter(ProjectChat.project_id == self.id)
            .count()
        )
        project_stats.percent_mapped = Project.calculate_tasks_percent(
            "mapped",
            self.total_tasks,
            self.tasks_mapped,
            self.tasks_validated,
            self.tasks_bad_imagery,
        )
        project_stats.percent_validated = Project.calculate_tasks_percent(
            "validated",
            self.total_tasks,
            self.tasks_mapped,
            self.tasks_validated,
            self.tasks_bad_imagery,
        )
        project_stats.percent_bad_imagery = Project.calculate_tasks_percent(
            "bad_imagery",
            self.total_tasks,
            self.tasks_mapped,
            self.tasks_validated,
            self.tasks_bad_imagery,
        )
        centroid_geojson = db.session.scalar(self.centroid.ST_AsGeoJSON())
        project_stats.aoi_centroid = geojson.loads(centroid_geojson)
        unique_mappers = (
            TaskHistory.query.filter(
                TaskHistory.action == "LOCKED_FOR_MAPPING",
                TaskHistory.project_id == self.id,
            )
            .distinct(TaskHistory.user_id)
            .count()
        )
        unique_validators = (
            TaskHistory.query.filter(
                TaskHistory.action == "LOCKED_FOR_VALIDATION",
                TaskHistory.project_id == self.id,
            )
            .distinct(TaskHistory.user_id)
            .count()
        )
        project_stats.total_time_spent = 0
        project_stats.total_mapping_time = 0
        project_stats.total_validation_time = 0
        project_stats.average_mapping_time = 0
        project_stats.average_validation_time = 0

        total_mapping_time = (
            db.session.query(
                func.sum(
                    cast(func.to_timestamp(TaskHistory.action_text, "HH24:MI:SS"), Time)
                )
            )
            .filter(
                or_(
                    TaskHistory.action == "LOCKED_FOR_MAPPING",
                    TaskHistory.action == "AUTO_UNLOCKED_FOR_MAPPING",
                )
            )
            .filter(TaskHistory.project_id == self.id)
        )
        for row in total_mapping_time:
            total_mapping_time = row[0]
            if total_mapping_time:
                total_mapping_seconds = total_mapping_time.total_seconds()
                project_stats.total_mapping_time = total_mapping_seconds
                project_stats.total_time_spent += project_stats.total_mapping_time
                if unique_mappers:
                    average_mapping_time = total_mapping_seconds / unique_mappers
                    project_stats.average_mapping_time = average_mapping_time

        query = (
            TaskHistory.query.with_entities(
                func.date_trunc("minute", TaskHistory.action_date).label("trn"),
                func.max(TaskHistory.action_text).label("tm"),
            )
            .filter(TaskHistory.project_id == self.id)
            .filter(TaskHistory.action == "LOCKED_FOR_VALIDATION")
            .group_by("trn")
            .subquery()
        )
        total_validation_time = db.session.query(
            func.sum(cast(func.to_timestamp(query.c.tm, "HH24:MI:SS"), Time))
        ).all()

        for row in total_validation_time:
            total_validation_time = row[0]
            if total_validation_time:
                total_validation_seconds = total_validation_time.total_seconds()
                project_stats.total_validation_time = total_validation_seconds
                project_stats.total_time_spent += project_stats.total_validation_time
                if unique_validators:
                    average_validation_time = (
                        total_validation_seconds / unique_validators
                    )
                    project_stats.average_validation_time = average_validation_time

        return project_stats
Esempio n. 55
0
 def ancestor_of(self, other):
     if isinstance(other, list):
         return self.op('@>')(expression.cast(other, ARRAY(LtreeType)))
     else:
         return self.op('@>')(other)
Esempio n. 56
0
 def __ne__(self, other):
     return self._case() != cast(other, String)
Esempio n. 57
0
 def descendant_of(self, other):
     if isinstance(other, list):
         return self.op('<@')(expression.cast(other, ARRAY(LtreeType)))
     else:
         return self.op('<@')(other)
Esempio n. 58
0
 def lquery(self, other):
     if isinstance(other, list):
         return self.op('?')(expression.cast(other, ARRAY(LQUERY)))
     else:
         return self.op('~')(other)
Esempio n. 59
0
    def _summarize_issues_sql(self, include_filename=False):

        diff_issue_occurrence_table = self.backend.get_table(DiffIssueOccurrence)
        issue_occurrence_table = self.backend.get_table(IssueOccurrence)
        issue_table = self.backend.get_table(Issue)
        file_revision_table = self.backend.get_table(FileRevision)
        project_issue_class_table = self.backend.get_table(ProjectIssueClass)
        issue_class_table = self.backend.get_table(self.project.IssueClass)

        #we group by file revision path, issue code and analyzer
        group_columns = [file_revision_table.c.language,
                         file_revision_table.c.path,
                         diff_issue_occurrence_table.c['key'],
                         #we should not group by pk
#                         diff_issue_occurrence_table.c['pk'],
                         issue_table.c.code,
                         issue_table.c.analyzer]

        project_pk_type = self.backend.get_field_type(self.project.fields['pk'])

        #here we make sure that the given issue class is enabled for the project
        subselect = select([issue_class_table.c.pk])\
            .select_from(issue_class_table.join(project_issue_class_table))\
            .where(and_(
            issue_table.c.analyzer == issue_class_table.c.analyzer,
            issue_table.c.code == issue_class_table.c.code,
            project_issue_class_table.c.project == expression.cast(self.project.pk,project_pk_type),
            project_issue_class_table.c.enabled == True))\

        #we perform a JOIN of the file revision table to the issue tables
        table = diff_issue_occurrence_table\
        .join(issue_occurrence_table,
              issue_occurrence_table.c.pk == diff_issue_occurrence_table.c.issue_occurrence)\
        .join(issue_table)\
        .join(file_revision_table)

        #we select the aggregated issues for all file revisions in this snapshot
        s = select(group_columns+[func.count().label('count')])\
        .select_from(table)\
        .where(and_(exists(subselect),diff_issue_occurrence_table.c.diff == self.pk))\
        .group_by(*group_columns)\
        .order_by(file_revision_table.c.path)

        #we fetch the result
        with self.backend.transaction():
            result  = self.backend.connection.execute(s).fetchall()

        #we aggregate the issues by path fragments
        aggregator = lambda f: directory_splitter(f['path'],include_filename = include_filename)

        added_issues = []
        fixed_issues = []

        for row in result:
            if row['key'] == 'added':
                added_issues.append(row)
            else:
                fixed_issues.append(row)

        #we perform a map/reduce on the result
        map_reducer = IssuesMapReducer(aggregators = [aggregator],
                                       group_by = ['language','analyzer','code'])

        return {'added': map_reducer.mapreduce(added_issues),
                'fixed': map_reducer.mapreduce(fixed_issues)}
def get_periodic_stats_quarter_hour(session, start, end):
    quarter_hour_step = func.date_trunc(literal('hour'), StatCallOnQueue.time) + \
        (cast(extract('minute', StatCallOnQueue.time), Integer) / 15) * timedelta(minutes=15)
    return _get_periodic_stat_by_step(session, start, end, quarter_hour_step)