Exemple #1
0
    def load_in_extent_from_db(cls, session: Session, min_easting: float, max_easting: float, min_northing: float,
                               max_northing: float) -> List["AbstractGeoObject"]:
        """
        Returns all objects inside the given extent in the database connected to the SQLAlchemy Session session.

        :param session: represents the database connection as SQLAlchemy Session
        :param min_easting: minimal easting of extent
        :param max_easting: maximal easting of extent
        :param min_northing: minimal northing of extent
        :param max_northing: maximal northing of extent
        :return: a list of objects representing the result of the database query
        :raises ValueError: if one of the extension values is not compatible to type float
        :raises TypeError: if session is not of type SQLAlchemy Session
        """
        min_easting = float(min_easting)
        max_easting = float(max_easting)
        min_northing = float(min_northing)
        max_northing = float(max_northing)

        if not isinstance(session, Session):
            raise TypeError("'session' is not of type SQLAlchemy Session!")

        result = session.query(cls).filter(sq.between(cls.east, min_easting, max_easting)). \
            filter(sq.between(cls.north, min_northing, max_northing))
        result = result.order_by(cls.id).all()
        for obj in result:
            obj.session = session
        return result
Exemple #2
0
    def load_in_extent_without_lines_from_db(cls, session: Session, min_easting: float, max_easting: float,
                                             min_northing: float, max_northing: float) -> List["GeoPoint"]:
        """
        Returns all points inside the given extent in the database connected to the SQLAlchemy Session session, which
        are not part of a line. This function is similar the
        :meth:`AbstractGeoObject.load_in_extent_from_db()<geological_toolbox.geo_object.AbstractGeoObject.load_in_extent_from_db>`
        function.

        :param min_easting: minimal easting of extent
        :param max_easting: maximal easting of extent
        :param min_northing: minimal northing of extent
        :param max_northing: maximal northing of extent
        :param session: represents the database connection as SQLAlchemy Session
        :return: a list of points representing the result of the database query
        :raises ValueError: if one of the extension values is not compatible to type float
        :raises TypeError: if session is not of type SQLAlchemy Session
        """
        min_easting = float(min_easting)
        max_easting = float(max_easting)
        min_northing = float(min_northing)
        max_northing = float(max_northing)

        if not isinstance(session, Session):
            raise TypeError("'session' is not of type SQLAlchemy Session!")

        result = session.query(GeoPoint).filter(sq.between(GeoPoint.east, min_easting, max_easting)). \
            filter(sq.between(GeoPoint.north, min_northing, max_northing)). \
            filter(sq.or_(GeoPoint.line_id == None, GeoPoint.line_id == -1)). \
            order_by(cls.id).all()
        for point in result:
            point.session = session
        return result
Exemple #3
0
def get_data():
    json_data = request.json
    print("/data", json_data, "\n")

    q_stime = datetime.datetime.fromtimestamp(
        json_data['s_time'] /
        1e3)  # conversions from database date string to timestamp number
    q_etime = datetime.datetime.fromtimestamp(json_data['e_time'] / 1e3)

    try:
        statement = select([Measurements]).where(
            between(Measurements.s_time, q_stime, q_etime)
            | between(Measurements.e_time, q_stime,
                      q_etime))  # select statement on the database
        results = db_session.execute(statement).fetchall()
        print(results)
        result = []
        for row in results:  # formats the results into a JSON list
            tmp = Measurements(id=row['id'],
                               s_time=row['s_time'],
                               e_time=row['e_time'],
                               data=row['data'])
            result.append(tmp.jsonize())

    except Exception as e:
        print("Error occured trying to upload data:", e)
        result = {'result': 'failure'}
    finally:
        db_session.close()

    return jsonify(result)
Exemple #4
0
def create_doc_annotations():
    url = server_address + 'basicviz/get_all_doc_data/{}'.format(
        args.experiment_id)
    response = requests.get(url)
    doc_features = response.json()
    # json.dump(doc_features, open('tmp.json', 'w'))
    # doc_features = list(json.load(open('tmp.json', 'r')))

    for doc, feature_list, motifs in doc_features:
        if doc not in scan:
            continue
        results = db_session.query(Fragment.mz, Molecule.mol).\
                filter(Fragment.scanid == scan[doc]).\
                join(Molecule, Fragment.molid==Molecule.molid).all()
        sys.stderr.write('doc: ' + doc + '; scan: ' + str(scan[doc]) + '\n')
        if len(results) != 1:
            sys.stderr.write('---> No molecule !\n')
            continue
        parent_mz, parent_mol = results[0]
        ddoc = {
            'name': doc,
            'scan': scan[doc],
            'mol': parent_mol,
            'features': []
        }
        # read fragments and losses
        frags = db_session.query(Fragment).\
                filter(Fragment.scanid == scan[doc] + 1)
        for f, i in feature_list:
            feature_type, massbin = f.split('_')
            feature = {
                'name': f,
                'intensity': i,
                'type': feature_type,
                'matches': []
            }
            massbin = float(massbin)
            if feature_type == 'fragment':
                matching_frags = frags.filter(
                    between(Fragment.mz, massbin - args.binsize / 2,
                            massbin + args.binsize / 2)).all()
            else:
                matching_frags = frags.filter(
                    between(parent_mz - Fragment.mz,
                            massbin - args.binsize / 2,
                            massbin + args.binsize / 2)).all()
            for frag in matching_frags:
                if frag.atoms in [m['fragatoms'] for m in feature['matches']]:
                    continue  # avoid redundant matches
                smiles = frag.smiles if feature_type == 'fragment' else loss2smiles(
                    parent_mol, frag.atoms)
                match = {
                    'smiles': smiles,
                    'mz': frag.mz,
                    'fragatoms': frag.atoms
                }
                feature['matches'].append(match)
            ddoc['features'].append(feature)
        doc_annotations.append(ddoc)
Exemple #5
0
def select_data(begin, end, person=None):
    if person:
        expression = sqlselect([Log]).where(and_(between(Log.timestamp, begin, end), Log.userid == person))
        data = pd.read_sql(expression, engine)
        return data
    else:
        expression = sqlselect([Log]).where(between(Log.timestamp, begin, end))
        return pd.read_sql(expression, engine)
    def create_thickness_point(
        sorted_dict: dict,
        well_id: int,
        marker_1: int,
        marker_2: int,
        session: Session,
        use_faulted: bool = False,
        fault_name: str = "",
        add_properties: Tuple = tuple()) -> GeoPoint:
        """
        Generate a new GeoPoint with thickness property from 2 well marker

        :param sorted_dict: dictionary containing well_id / WellMarker data
        :param well_id: current well_id
        :param marker_1: id of marker 1
        :param marker_2: id of marker 2
        :param session: current SQLAlchemy session
        :param use_faulted: should faulted sequence be included?
        :param fault_name: name of fault stratigraphic unit (default: "Fault")
        :param add_properties: Adds the properties to the GeoPoint. Format for each property: (value, type, name, unit)
        :return: new GeoPoint Object
        :raises FaultException: if a fault is inside the section and use_faulted is False
        :raises ValueError: if a property in the add_property tuple has less than 3 entries
        """

        min_depth = sorted_dict[well_id][marker_1].depth
        max_depth = sorted_dict[well_id][marker_2].depth

        faults = session.query(WellMarker).join(StratigraphicObject). \
            filter(WellMarker.horizon_id == StratigraphicObject.id). \
            filter(StratigraphicObject.unit_name == fault_name). \
            filter(WellMarker.well_id == well_id)
        if min_depth > max_depth:
            faults = faults.filter(
                sq.between(WellMarker.drill_depth, max_depth, min_depth))
        else:
            faults = faults.filter(
                sq.between(WellMarker.drill_depth, min_depth, max_depth))
        if (faults.count() > 0) and (use_faulted is False):
            raise FaultException("Fault inside section")

        point = sorted_dict[well_id][marker_1].to_geopoint()
        thickness = Property(max_depth - min_depth, PropertyTypes.FLOAT,
                             "thickness", "m", session)
        point.add_property(thickness)
        if use_faulted:
            faulted = Property(faults.count(), PropertyTypes.INT, "faulted",
                               "count", session)
            point.add_property(faulted)
        for prop in add_properties:
            if len(prop) < 4:
                raise ValueError("property tuple has less than 4 entries!")
            new_property = Property(prop[0], PropertyTypes[prop[1]], prop[2],
                                    prop[3], session)
            point.add_property(new_property)
        return point
Exemple #7
0
def orders_from_to_query(is_history, from_, to, business_id):
    if is_history is None:
        print('here')
        query = get_orders_query(is_history, business_id).filter(
            between(Orders.order_date, from_, to))
    else:
        query = get_orders_query(is_history, business_id).filter(
            between(Orders.completion_date, from_, to))

    return query
Exemple #8
0
    def immediate_children(self):
        """
        Get immediate children of the organization
        Reference:
        http://mikehillyer.com/articles/managing-hierarchical-data-in-mysql/
        http://www.sitepoint.com/hierarchical-data-database/
        Generated SQL Sample:
        SELECT node.name, (COUNT(parent.name) - (sub_tree.depth + 1)) AS depth
        FROM nested_category AS node,
             nested_category AS parent,
             nested_category AS sub_parent,
             (
              SELECT node.name, (COUNT(parent.name) - 1) AS depth
              FROM nested_category AS node,
              nested_category AS parent
              WHERE node.lft BETWEEN parent.lft AND parent.rgt
              AND node.name = 'PORTABLE ELECTRONICS'
              GROUP BY node.name
              ORDER BY node.lft
             )AS sub_tree
             WHERE node.lft BETWEEN parent.lft AND parent.rgt
             AND node.lft BETWEEN sub_parent.lft AND sub_parent.rgt
             AND sub_parent.name = sub_tree.name
             GROUP BY node.name
             HAVING depth <= 1
             ORDER BY node.lft;
        """
        s_node = aliased(Organization, name='s_node')
        s_parent = aliased(Organization, name='s_parent')
        sub_tree = db.session.query(s_node.id, (func.count(s_parent.name) - 1).label('depth')). \
            filter(and_(between(s_node.lft, s_parent.lft, s_parent.rgt), s_node.id == self.id)) \
            .group_by(s_node.id, s_node.lft).order_by(s_node.lft).subquery()

        t_node = aliased(Organization, name='t_node')
        t_parent = aliased(Organization, name='t_parent')
        t_sub_parent = aliased(Organization, name='t_sub_parent')
        # Postgres does not support label as (func.count(t_parent.name) - (sub_tree.c.depth + 1)).label('xxx')
        # And have the field in having clause will cause issue.
        query = (
            db.session.query(
                t_node.id, t_node.name,
                (func.count(t_parent.name) - (sub_tree.c.depth + 1))).filter(
                    and_(
                        between(t_node.lft, t_parent.lft, t_parent.rgt),
                        between(t_node.lft, t_sub_parent.lft,
                                t_sub_parent.rgt),
                        t_node.id !=
                        self.id,  # Exclude current node --> itself
                        t_sub_parent.id == sub_tree.c.id)).group_by(
                            t_node.id, t_node.name, t_node.lft,
                            'depth').having(
                                (func.count(t_parent.name) -
                                 (sub_tree.c.depth + 1)) <= 1).order_by(
                                     t_node.lft))
        return id_query_to_obj(Organization, query)
Exemple #9
0
    def listIdentifiers(self, metadataPrefix, set=None, cursor=None,
                        from_=None, until=None, batch_size=None):
        '''List all identifiers for this repository.
        '''
        data = []
        packages = []
        group = None
        if not set:
            if not from_ and not until:
                packages = Session.query(Package).filter(Package.type=='dataset').\
                    filter(Package.private!=True).filter(Package.state=='active').all()
            else:
                if from_ and not until:
                    packages = Session.query(Package).filter(Package.type=='dataset').filter(Package.private!=True).\
                        filter(PackageRevision.revision_timestamp > from_).\
                        filter(Package.name==PackageRevision.name).filter(Package.state=='active').all()
                if until and not from_:
                    packages = Session.query(Package).filter(Package.type=='dataset').filter(Package.private!=True).\
                        filter(PackageRevision.revision_timestamp < until).\
                        filter(Package.name==PackageRevision.name).filter(Package.state=='active').all()
                if from_ and until:
                    packages = Session.query(Package).filter(Package.type=='dataset').filter(Package.private!=True).\
                        filter(between(PackageRevision.revision_timestamp, from_, until)).\
                        filter(Package.name==PackageRevision.name).filter(Package.state=='active').all()
        else:
            group = Group.get(set)
            if group:
                packages = group.packages(return_query=True).filter(Package.type=='dataset').\
                    filter(Package.private!=True).filter(Package.state=='active')
                if from_ and not until:
                    packages = packages.filter(PackageRevision.revision_timestamp > from_).\
                        filter(Package.name==PackageRevision.name).filter(Package.state=='active')
                if until and not from_:
                    packages = packages.filter(PackageRevision.revision_timestamp < until).\
                        filter(Package.name==PackageRevision.name).filter(Package.state=='active')
                if from_ and until:
                    packages = packages.filter(between(PackageRevision.revision_timestamp, from_, until)).\
                        filter(Package.name==PackageRevision.name).filter(Package.state=='active')
                packages = packages.all()
        if cursor:
            packages = packages[cursor:]
        for package in packages:
            spec = package.name
            if group:
                spec = group.name
            else:
                if package.owner_org:
                    group = Group.get(package.owner_org)
                    if group and group.name:
                        spec = group.name
                    group = None
            data.append(common.Header('', package.id, package.metadata_created, [spec], False))

        return data
Exemple #10
0
def nodes():
    max_viewport_area = 15000 * 15000 # 15km * 15km
    node_types = request.args.getlist('features')
    radius = dict([map(int, value.split(',')) for value in request.args.getlist('radius')])

    if not node_types:
        # nothing to look for...
        return jsonify({})

    try:
        bounds = request.args.get('bounds').split(',')
    except TypeError:
        abort(400)

    # Convert bounds to utm
    bounds = (
        # upper left bound lat/lon
        utm.from_latlon(float(bounds[1]), float(bounds[0])),
        # lower right bound lat/lon
        utm.from_latlon(float(bounds[3]), float(bounds[2]))
    )

    if nodetools.bounds_area(bounds) > max_viewport_area:
        return jsonify({'error': 'Zoom in to view the results'})

    zone_number, zone_letter = bounds[0][2:4]

    # Select nodes. I'm going to assume all nodes within these bounds belong to
    # the same utm zone number/letter for now.
    nodes = db.session.query(Node.x, Node.y, Node.node_type_id) \
        .filter(
            Node.node_type_id.in_(node_types),
            between(Node.x, bounds[0][0], bounds[1][0]),
            between(Node.y, bounds[0][1], bounds[1][1]),
            Node.zone_number == zone_number,
            Node.zone_letter == zone_letter
        ) \
        .order_by(Node.node_type_id) \
        .all()

    try:
        intersections = node_intersections(nodes, min_layers=len(node_types), type_radius=radius)
    except TooFewNodeTypesException:
        intersections = []

    # Convert back to WGS84.
    latlon_intersections = [list(to_latlon(polygon.exterior.coords, zone_number, zone_letter)) for polygon in intersections]

    data = {
        'polygons': latlon_intersections
    }

    return jsonify(data)
Exemple #11
0
 def listRecords(self, metadataPrefix, set=None, cursor=None, from_=None,
                 until=None, batch_size=None):
     '''Show a selection of records, basically lists all datasets.
     '''
     data = []
     packages = []
     group = None
     if not set:
         if not from_ and not until:
             packages = Session.query(Package).filter(Package.type=='dataset').filter(Package.private!=True).\
                 filter(Package.state=='active').all()
         if from_ and not until:
             packages = Session.query(Package).filter(Package.type=='dataset').filter(Package.private!=True).\
                 filter(PackageRevision.revision_timestamp > from_).filter(Package.name==PackageRevision.name).\
                 filter(Package.state=='active').all()
         if until and not from_:
             packages = Session.query(Package).filter(Package.type=='dataset').filter(Package.private!=True).\
                 filter(PackageRevision.revision_timestamp < until).filter(Package.name==PackageRevision.name).\
                 filter(Package.state=='active').all()
         if from_ and until:
             packages = Session.query(Package).filter(Package.type=='dataset').filter(Package.private!=True).\
                 filter(between(PackageRevision.revision_timestamp, from_, until)).\
                 filter(Package.name==PackageRevision.name).filter(Package.state=='active').all()
     else:
         group = Group.get(set)
         if group:
             packages = group.packages(return_query=True)
             if from_ and not until:
                 packages = packages.filter(PackageRevision.revision_timestamp > from_).\
                     filter(Package.type=='dataset').filter(Package.private!=True).\
                     filter(Package.name==PackageRevision.name).filter(Package.state=='active').all()
             if until and not from_:
                 packages = packages.filter(PackageRevision.revision_timestamp < until).\
                     filter(Package.type=='dataset').filter(Package.private!=True).\
                     filter(Package.name==PackageRevision.name).filter(Package.state=='active').all()
             if from_ and until:
                 packages = packages.filter(between(PackageRevision.revision_timestamp, from_, until)).\
                     filter(Package.type=='dataset').filter(Package.private!=True).\
                     filter(Package.name==PackageRevision.name).filter(Package.state=='active').all()
     if cursor:
         packages = packages[cursor:]
     for res in packages:
         spec = res.name
         if group:
             spec = group.name
         else:
             if res.owner_org:
                 group = Group.get(res.owner_org)
                 if group and group.name:
                     spec = group.name
                 group = None
         data.append(self._record_for_dataset(res, spec))
     return data
Exemple #12
0
	def SelectByDateRange(self, query, date_type, from_date_string, to_date_string):
		print from_date_string[6:10], from_date_string[0:2], from_date_string[3:5]
		from_date=datetime.date(int(from_date_string[6:10]), int(from_date_string[0:2]), int(from_date_string[3:5]))
		to_date=datetime.date(int(to_date_string[6:10]), int(to_date_string[0:2]), int(to_date_string[3:5]))

		if date_type == 'CreatedDate':
			return query.filter(between(FileIndex.ctime, from_date, to_date))
		elif date_type == 'ModifiedDate':
			return query.filter(between(FileIndex.mtime, from_date, to_date))
		elif date_type == 'AddedDate':
			return query.filter(between(FileIndex.added_time, from_date, to_date))
		return query	
Exemple #13
0
 def _filter_packages(set, cursor, from_, until, batch_size):
     '''Get a part of datasets for "listNN" verbs.
     '''
     packages = []
     setspc = None
     if not set:
         packages = Session.query(Package).filter(Package.type=='dataset'). \
             filter(Package.state == 'active').filter(Package.private!=True)
         if from_ and not until:
             packages = packages.filter(PackageRevision.revision_timestamp > from_).\
                 filter(Package.name==PackageRevision.name)
         if until and not from_:
             packages = packages.filter(PackageRevision.revision_timestamp < until).\
                 filter(Package.name==PackageRevision.name)
         if from_ and until:
             packages = packages.filter(between(PackageRevision.revision_timestamp, from_, until)).\
                 filter(Package.name==PackageRevision.name)
         if batch_size:
             packages = packages.limit(batch_size)
         if cursor:
             packages = packages.offset(cursor)
         packages = packages.all()
     elif set == 'openaire_data':
         oa_tag = Session.query(Tag).filter(
             Tag.name == 'openaire_data').first()
         if oa_tag:
             packages = oa_tag.packages
         setspc = set
     else:
         group = Group.get(set)
         if group:
             # Note that group.packages never returns private datasets regardless of 'with_private' parameter.
             packages = group.packages(return_query=True, with_private=False).filter(Package.type=='dataset'). \
                 filter(Package.state == 'active')
             if from_ and not until:
                 packages = packages.filter(PackageRevision.revision_timestamp > from_).\
                     filter(Package.name==PackageRevision.name)
             if until and not from_:
                 packages = packages.filter(PackageRevision.revision_timestamp < until).\
                     filter(Package.name==PackageRevision.name)
             if from_ and until:
                 packages = packages.filter(between(PackageRevision.revision_timestamp, from_, until)).\
                     filter(Package.name==PackageRevision.name)
             if batch_size:
                 packages = packages.limit(batch_size)
             if cursor:
                 packages = packages.offset(cursor)
             packages = packages.all()
     # if cursor is not None:
     #     cursor_end = cursor + batch_size if cursor + batch_size < len(packages) else len(packages)
     #     packages = packages[cursor:cursor_end]
     return packages, setspc
Exemple #14
0
    def __init__(
        self,
        hid=None,
        hdiameter=None,
        hheight=None
    ):
        """
        Instatiate the Calibration Chamber class.

        Parameters
        ----------
        hid:  str | calibration chamber ID
        hdiameter:  float | chamber diameter (cm)
        hheight:  float | chamber height (cm)

        """
        # Get the connection and soils table objects.
        connection, htable = get_table()

        # Filter the Soils Database.
        stmt = select([htable])

        ## Filter by chamber ID.
        if hid:
            stmt = stmt.where(htable.columns.hid.in_((hid)))

        ## Filter by chamber diameter.
        if hdiameter:
            stmt = stmt.where(
                between(
                    htable.columns.hdiameter,
                    hdiameter[0],
                    hdiameter[1]
                )
            )

        ## Filter by chamber height.
        if hheight:
            stmt = stmt.where(
                between(
                    htable.columns.hheight,
                    hheight[0],
                    hheight[1]
                )
            )

        ## Make the dataframe.
        self.__results = pd.read_sql(
            stmt,
            connection
        ).set_index('hid')
Exemple #15
0
def search_matching_transaction(res):
    def format_result(data):
        for row in data:
            item = row.UserTransaction
            yield {
                'user_transaction_id': item.user_transaction_id,
                'amount': float(str(item.amount)),
                'currency': item.currency,
                'exchange_currency': item.exchange_currency,
                'depart_date': item.depart_date,
                'arrive_date': item.arrive_date,
                'depart_from': item.depart_from,
                'arrive_to': item.arrive_to,
                'name': row.name,
                'email': row.email,
                'user_id': item.user_id
            }

    params = utils.get_data(['transaction_id'], [], request.args)

    base = db.session.query(UserTransaction)\
        .filter(UserTransaction.user_transaction_id == params['transaction_id']).first()

    base_arrive = base.arrive_to
    base_depart = base.depart_from
    # Set 12 hours as grace period/waiting time
    base_arrive_date_start = base.arrive_date + datetime.timedelta(hours=3)
    base_arrive_date_end = base.arrive_date + datetime.timedelta(hours=15)

    base_depart_date_start = base.depart_date - datetime.timedelta(hours=15)
    base_depart_date_end = base.depart_date - datetime.timedelta(hours=3)

    if not base:
        raise FailedRequest('Cannot find transaction!', status_code=404)

    transactions = db.session.query(UserTransaction, User.name, User.email)\
        .join(User, User.user_id == UserTransaction.user_id)\
        .filter(UserTransaction.closed == False)\
        .filter(UserTransaction.user_id != g.current_user['user_id'])\
        .filter(UserTransaction.exchange_currency == base.currency)\
        .filter(
            or_(
                and_(UserTransaction.depart_from == base_arrive,
                     between(UserTransaction.depart_date, base_arrive_date_start, base_arrive_date_end)),
                and_(UserTransaction.arrive_to == base_depart,
                     between(UserTransaction.arrive_date, base_depart_date_start, base_depart_date_end))
            )
        ).all()

    return res.send(list(format_result(transactions)))
Exemple #16
0
def date_ap(item: QueryableAttribute, expected: datetime):
    """
    Date is approximately equal. Approximately means at discretion of the implementation. Here for dates we set
    approximation between one day before and one day after the required value
    :param item:
    :param expected:
    :return:
    """
    delta = timedelta(days=1)
    if isinstance(item.property.columns[0].type, sqltypes.Date):
        return between(func.DATE(item), func.DATE(expected - delta),
                       func.DATE(expected + delta))
    else:
        return between(func.DATETIME(item), func.DATETIME(expected - delta),
                       func.DATETIME(expected + delta))
Exemple #17
0
def list_all(country_code=None):
    """Show a list of all airports."""
    or_args = []
    if country_code is None:
        or_args = [between(Airport.style, 2, 5)]
    else:
        or_args = [and_(between(Airport.style, 2, 5),
                        Airport.country_code == country_code)]
    query = session.query(Airport) \
        .order_by(Airport.name) \
        .filter(*or_args)

    print('--- Airports ---')
    for airport in query.all():
        print(airport.name)
Exemple #18
0
 def listIdentifiers(self, metadataPrefix, set=None, cursor=None,
                     from_=None, until=None, batch_size=None):
     '''List all identifiers for this repository.
     '''
     data = []
     packages = []
     if not set:
         if not from_ and not until:
             packages = Session.query(Package).all()
         else:
             if from_:
                 packages = Session.query(Package).\
                     filter(PackageRevision.revision_timestamp > from_).\
                     all()
             if until:
                 packages = Session.query(Package).\
                     filter(PackageRevision.revision_timestamp < until).\
                     all()
             if from_ and until:
                 packages = Session.query(Package).\
                     filter(between(PackageRevision.revision_timestamp,
                                    from_,
                                    until)\
                            ).all()
     else:
         group = Group.get(set)
         if group:
             packages = group.active_packages()
             if from_ and not until:
                 packages = packages.\
                     filter(PackageRevision.revision_timestamp > from_)
             if until and not from_:
                 packages = packages.\
                     filter(PackageRevision.revision_timestamp < until)
             if from_ and until:
                 packages = packages.filter(
                     between(PackageRevision.revision_timestamp,
                             from_,
                             until))
             packages = packages.all()
     if cursor:
         packages = packages[:cursor]
     for package in packages:
         data.append(common.Header(package.id,
                                   package.metadata_created,
                                   [package.name],
                                   False))
     return data
Exemple #19
0
    def index(self):
        from gviz_data_table import Table
        from rockpack.mainsite.services.user.models import User, UserActivity, UserAccountEvent
        if request.args.get('activity') == 'activity':
            activity_model, activity_date = UserActivity, UserActivity.date_actioned
        else:
            activity_model, activity_date = UserAccountEvent, UserAccountEvent.event_date

        try:
            interval_count = int(request.args['interval_count'])
        except Exception:
            interval_count = 10

        interval = request.args.get('interval')
        if interval not in ('week', 'month'):
            interval = 'week'

        cohort = func.date_part(interval, User.date_joined)
        cohort_label = func.min(func.date(User.date_joined))
        active_interval = (func.date_part(interval, activity_date) - cohort).label('active_interval')

        q = readonly_session.query(User).filter(
            User.date_joined > LAUNCHDATE, User.refresh_token != '')
        if request.args.get('gender') in ('m', 'f'):
            q = q.filter(User.gender == request.args['gender'])
        if request.args.get('locale') in app.config['ENABLED_LOCALES']:
            q = q.filter(User.locale == request.args['locale'])
        if request.args.get('age') in ('13-18', '18-25', '25-35', '35-45', '45-55'):
            age1, age2 = map(int, request.args['age'].split('-'))
            q = q.filter(between(
                func.age(User.date_of_birth),
                text("interval '%d years'" % age1),
                text("interval '%d years'" % age2)
            ))

        active_users = dict(
            ((c, int(w)), u) for c, w, u in
            q.join(
                activity_model,
                (activity_model.user == User.id) &
                (activity_date >= User.date_joined)
            ).group_by(cohort, active_interval).values(
                cohort, active_interval, func.count(func.distinct(activity_model.user))
            )
        )

        table = Table(
            [dict(id='cohort', type=date)] +
            [dict(id='%s%d' % (interval, i), type=str) for i in range(interval_count)]
        )

        totals = q.group_by(cohort).order_by(cohort)
        for c, l, t in totals.values(cohort, cohort_label, func.count()):
            data = []
            for i in range(interval_count):
                a = active_users.get((c, i), '')
                data.append(a and '%d%% (%d)' % (ceil(a * 100.0 / t), a))
            table.append([l] + data)

        return self.render('admin/retention_stats.html', data=table.encode())
Exemple #20
0
 def post_list(self, fromDate, toDate):
     return PostId.query.filter(
         between(
             PostId.publish_date,
             fromDate, toDate)).order_by(
                 desc(PostId.publish_date),
                 PostId.publish_id).all()
Exemple #21
0
 def calls_for_pricing(self, area_code, phone_number, start_date, end_date):
     call_start_t = aliased(self.t)
     call_end_t = aliased(self.t)
     query_join = join(call_start_t, call_end_t,
                       call_start_t.c.call_id == call_end_t.c.call_id)
     columns = [
         call_start_t.c.id.label('call_start_id'), call_start_t.c.call_id,
         call_start_t.c.destination_area_code.label('area_code'),
         call_start_t.c.destination.label('phone'),
         call_start_t.c.timestamp.label('start_at'),
         call_end_t.c.timestamp.label('end_at')
     ]
     query = select(columns).select_from(query_join)
     query = query.where(call_start_t.c.id != call_end_t.c.id)
     query = query.where(call_start_t.c.source_area_code == area_code)
     query = query.where(call_start_t.c.source == phone_number)
     query = query.where(
         between(call_end_t.c.timestamp, start_date, end_date))
     result = self.connection.execute(query).fetchall()
     calls = self.to_dict(result)
     for call in calls:
         configs = []
         where = [self.db.applied_config.call_id == call['call_start_id']]
         for config in self.db.applied_config.select(where=where):
             configs.append(config)
         call['configs'] = configs
     return calls
Exemple #22
0
def search_around(timestamp: T.number, ):
    get_logger().info('/search_around %s', timestamp)
    utc_timestamp = timestamp  # old 'timestamp' name is legacy

    # TODO meh. use count/pagination instead?
    delta_back = timedelta(hours=3).total_seconds()
    delta_front = timedelta(minutes=2).total_seconds()
    # TODO not sure about delta_front.. but it also serves as quick hack to accomodate for all the truncations etc

    return search_common(
        url=
        'http://dummy.org',  # NOTE: not used in the where query (below).. perhaps need to get rid of this
        where=lambda table, url: between(
            func.strftime(
                '%s',  # NOTE: it's tz aware, e.g. would distinguish +05:00 vs -03:00
                # this is a bit fragile, relies on cachew internal timestamp format, e.g.
                # 2020-11-10T06:13:03.196376+00:00 Europe/London
                func.substr(
                    table.c.dt,
                    1,  # substr is 1-indexed
                    # instr finds the first match, but if not found it defaults to 0.. which we hack by concatting with ' '
                    func.instr(
                        func.cast(table.c.dt, types.Unicode).op('||')
                        (' '), ' ') - 1,
                    # for f***s sake.. seems that cast is necessary otherwise it tries to treat ' ' as datetime???
                )) - literal(utc_timestamp),
            literal(-delta_back),
            literal(delta_front),
        ),
    )
def run_test(args):
    start_date = parser.parse(args.start_date)
    end_date = parser.parse(args.end_date)
    all_games = (db.session.query(Game)
                 .filter(between(Game.game_day, start_date, end_date))).all()

    numb_success = 0
    numb_failed = 0

    for game in all_games:
        computed_spread = compute_spread(
            host=game.home_team,
            visitor=game.visitor,
            date=str(game.game_day),
        )
        if abs(computed_spread - game.spread) <= 3:
            continue
        actual_spread = game.actual_spread
        success = False
        if computed_spread < game.spread and actual_spread < game.spread:
            success = True
        elif computed_spread > game.spread and actual_spread > game.spread:
            success = True
        if success:
            numb_success += 1
        else:
            numb_failed += 1
        print game
        print computed_spread, game.spread, actual_spread, success

    print 'total games ', len(all_games)
    print numb_success, numb_failed
Exemple #24
0
def prune_keep_record(job_id, keep, storage):
    event = storage.session.query(Event).filter_by(job_id=job_id).\
        order_by(asc(Event.id)).limit(1).first()

    min_id = None
    if event is not None:
        min_id = event.id

    events = storage.session.query(Event).filter_by(job_id=job_id).\
        order_by(desc(Event.id)).limit(keep)
    event_ids = [e.id for e in events]
    if len(event_ids) == 0:
        logger.info('No events for {0}'.format(job_id))
        return

    max_id = min(event_ids)
    if min_id == max_id:
        logger.info('Min and max event ids for {0} are the same: {1} - {2}'.format(  # noqa
            job_id, min_id, max_id))
        return

    if min_id > max_id:
        logger.info('Min event id for {0} is larger than max event id: {1} - {2}'.format(  # noqa
            job_id, min_id, max_id))
        return

    logger.info('Job ID {0}, Pruning events {1} - {2}'.format(
        job_id, min_id, max_id))

    stmt = Event.__table__.delete()\
                          .where(between(Event.id, min_id, max_id))\
                          .where(Event.job_id == job_id)
    storage._engine.execute(stmt)
    storage.session.commit()
Exemple #25
0
 def get_current_configs(self, source_area_code, dest_area_code, timestamp):
     cfg_con_join = join(self.t, self.db.tariff_condition.t,
                         self.conditions_id == self.db.tariff_condition.id)
     columns = [
         self.id, self.standard_charge, self.call_time_charge,
         self.db.tariff_condition.id, self.db.tariff_condition.start_at,
         self.db.tariff_condition.end_at
     ]
     query = select(columns).select_from(cfg_con_join)
     query = query.where(
         between(timestamp, self.config_start_date, self.config_end_date))
     query = query.where(
         self.db.tariff_condition.source_area_code == source_area_code)
     query = query.where(
         self.db.tariff_condition.destination_area_code == dest_area_code)
     configs = self.connection.execute(query).fetchall()
     configs = [{
         'config_id': config_id,
         'standard_charge': standard_charge,
         'call_time_charge': call_time_charge,
         'condition_id': condition_id,
         'start_at': start_at,
         'end_at': end_at
     } for config_id, standard_charge, call_time_charge, condition_id,
                start_at, end_at in configs]
     return configs
    def active_on(cls, date, transient=None):
        """Constructs a check to filter the table 'transient' (with columns
        'effective_from' and 'effective_to') down to only those rows active on
        'date'.

        Args:
            date: The datetime on which the transient must be active.
            transient: If given, the table, column set or model whose transient
                columns will be checked; if None, the class from which this
                method was called will be the transient.  (Default: None.)

        Returns:
            A SQLAlchemy expression implementing the transient activity check.
        """
        null = None  # stop static analysis checkers from moaning about == None

        if transient is None:
            transient = cls

        return sqlalchemy.between(
            date,
            transient.effective_from,
            sqlalchemy.case(
                [
                    # NULL effective_to => always on past effective_from
                    (transient.effective_to == null, date),
                    (transient.effective_to != null, transient.effective_to)
                ]
            )
        )
Exemple #27
0
def weight_query(bin_id: int, start_timestamp: datetime,
                 end_timestamp: datetime, db: Session):
    return db.query(weight_model.BinWeight).filter(
        and_(
            weight_model.BinWeight.bin_id == bin_id,
            between(weight_model.BinWeight.datetimestamp, start_timestamp,
                    end_timestamp))).all()
Exemple #28
0
    def getMaxCount(self, session=None, numerator_table=None, cur_year=None):
        """
        Determine the maximum value of the numbering counter.
        The maximum value of the counter must be produced
        taking into account the current program year.

        :param session: SQLAlchemy session.
        :param numerator_table: Numerator table.
        :param cur_year: Current year.
            If not defined then get actual year.
        :return: Maximum numerator counter value.
        """
        if numerator_table is None:
            numerator_table = self.createTable()
        if session is None:
            session = sqlalchemy.orm.session.sessionmaker(
                bind=self._connection)
        if cur_year is None:
            cur_year = self.getActualYear()

        min_date = datetime.date(cur_year, 1, 1)
        max_date = datetime.date(cur_year, 12, 31)

        session_query = sqlalchemy.orm.scoping.scoped_session(session)
        max_count = session_query.query(
            sqlalchemy.sql.functions.max(
                numerator_table.c.cur_count).label('max_count')).filter(
                    sqlalchemy.between(numerator_table.c.dt_num, min_date,
                                       max_date)).one()[0]
        return max_count
Exemple #29
0
    def filter_ancestors(self, and_self=False):
        "The same as :meth:`filter_descendants` but filters ancestor nodes."
        options = self._tree_options
        obj = self._get_obj()

        #self._get_session_and_assert_flushed(obj)

        # Restrict ourselves to just those nodes within the same tree:
        tree_id = getattr(obj, self.tree_id_field.name)
        filter_ = self.tree_id_field == tree_id

        alias = sqlalchemy.alias(options.table)
        left_field = self.left_field
        filter_ &= sqlalchemy.between(getattr(alias.c, self.left_field.name),
                                      self.left_field, self.right_field)
        filter_ &= getattr(alias.c, self.pk_field.name) == \
                   getattr(obj,     self.pk_field.name)

        if not and_self:
            filter_ &= self.pk_field != getattr(obj, self.pk_field.name)

        # WHERE tree_id = <node.tree_id> AND <node.path> LIKE path || '%'
        #filter_ = (self.tree_id_field == tree_id) \
        #          & sqlalchemy.sql.expression.literal(
        #                path, sqlalchemy.String
        #            ).like(options.path_field + '%')
        #if and_self:
        #  filter_ &= self.depth_field  <= depth
        #else:
        #  filter_ &= self.depth_field < depth
        return filter_
  def filter_ancestors(self, and_self=False):
    "The same as :meth:`filter_descendants` but filters ancestor nodes."
    options = self._tree_options
    obj     = self._get_obj()

    #self._get_session_and_assert_flushed(obj)

    # Restrict ourselves to just those nodes within the same tree:
    tree_id = getattr(obj, self.tree_id_field.name)
    filter_ = self.tree_id_field == tree_id

    alias = sqlalchemy.alias(options.table)
    left_field = self.left_field
    filter_ &= sqlalchemy.between(
      getattr(alias.c, self.left_field.name),
      self.left_field, self.right_field)
    filter_ &= getattr(alias.c, self.pk_field.name) == \
               getattr(obj,     self.pk_field.name)

    if not and_self:
      filter_ &= self.pk_field != getattr(obj, self.pk_field.name)

    # WHERE tree_id = <node.tree_id> AND <node.path> LIKE path || '%'
    #filter_ = (self.tree_id_field == tree_id) \
    #          & sqlalchemy.sql.expression.literal(
    #                path, sqlalchemy.String
    #            ).like(options.path_field + '%')
    #if and_self:
    #  filter_ &= self.depth_field  <= depth
    #else:
    #  filter_ &= self.depth_field < depth
    return filter_
Exemple #31
0
    def _filter_by_column_value(cls, query, column, value):

        def return_(e):
            return query.filter(e)

        import_value = getattr(cls, 'import_value')
        if not isinstance(value, str):
            raise HttpBadRequest()

        in_operator_match = FILTERING_IN_OPERATOR_REGEX.match(value)
        if in_operator_match:
            not_ = value.startswith('!')
            items = in_operator_match.groupdict()['items'].split(',')
            items = [i for i in items if i.strip() != '']
            if not len(items):
                raise HttpBadRequest('Invalid query string: %s' % value)
            expression = column.in_([import_value(column, j) for j in items])
            if not_:
                expression = ~expression

            return return_(expression)

        between_operator_match = FILTERING_BETWEEN_OPERATOR_REGEX.match(value)
        if between_operator_match:
            not_ = value.startswith('!')
            groups = between_operator_match.groupdict()
            start, end = groups['min'].strip(), groups['max'].strip()
            if not (start or end):
                raise HttpBadRequest('Invalid query string: %s' % value)
            expression = between(column, start, end)
            if not_:
                expression = ~expression

            return return_(expression)

        if value == 'null':
            expression = column.is_(None)
        elif value == '!null':
            expression = column.isnot(None)
        elif value.startswith('!'):
            expression = column != import_value(column, value[1:])
        elif value.startswith('>='):
            expression = column >= import_value(column, value[2:])
        elif value.startswith('>'):
            expression = column > import_value(column, value[1:])
        elif value.startswith('<='):
            expression = column <= import_value(column, value[2:])
        elif value.startswith('<'):
            expression = column < import_value(column, value[1:])

        # LIKE
        elif '%' in value:
            func, actual_value = (column.ilike, value[1:]) if value.startswith('~') else (column.like, value)
            expression = func(import_value(column, actual_value))

        # EQUAL
        else:
            expression = column == import_value(column, value)

        return return_(expression)
Exemple #32
0
    def get_event(self, statistic):
        if statistic.start_date is None and statistic.finish_date is None and statistic.ready_date is None:
            raise IncorrectData()

        HOURS = 3600
        DAY = 86400

        if statistic.ready_date == 'this_day':
            start_date = int(time.time()) - DAY
            finish_date = int(time.time())
        elif statistic.ready_date == 'this_hours':
            start_date = int(time.time()) - HOURS
            finish_date = int(time.time())
        else:
            start_date = int(
                time.mktime(time.strptime(statistic.start_date, '%Y-%m-%d')))
            finish_date = int(
                time.mktime(time.strptime(statistic.finish_date, '%Y-%m-%d')))

        query = (self.session.query(StatisticsModel).filter(
            between(StatisticsModel.datetime, start_date, finish_date)).all())
        return_event = []
        for event in query:
            return_event.append(event.as_dict())

        for item in return_event:
            item.update({
                'datetime':
                time.strftime("%d-%m-%Y %H:%M:%S",
                              time.localtime(item.get('datetime')))
            })

        return return_event
Exemple #33
0
def create_country_stats(session, date, logger=None):
    if logger is None:
        logger = app.logger

    (start, end) = date_to_timestamps(date)

    # First kill the stats for the selected date
    deleted_counter = session.query(CountryStats).filter(
        CountryStats.date == date).delete()

    country_stats = (session.query(
        literal(date), Country.gid,
        func.count(AircraftBeacon.timestamp).label("aircraft_beacon_count"),
        func.count(func.distinct(
            AircraftBeacon.receiver_id)).label("device_count")).filter(
                between(AircraftBeacon.timestamp, start, end)).filter(
                    func.st_contains(Country.geom,
                                     AircraftBeacon.location)).group_by(
                                         Country.gid).subquery())

    # And insert them
    ins = insert(CountryStats).from_select([
        CountryStats.date, CountryStats.country_id,
        CountryStats.aircraft_beacon_count, CountryStats.device_count
    ], country_stats)
    res = session.execute(ins)
    insert_counter = res.rowcount
    session.commit()
Exemple #34
0
def _get_event_with_enterqueue(session, start, end, match, event):
    start = start.strftime(_STR_TIME_FMT)
    end = end.strftime(_STR_TIME_FMT)

    enter_queues = (session
                    .query(QueueLog.callid,
                           cast(QueueLog.time, TIMESTAMP).label('time'))
                    .filter(and_(QueueLog.event == 'ENTERQUEUE',
                                 between(QueueLog.time, start, end))))

    enter_map = {}
    for enter_queue in enter_queues.all():
        enter_map[enter_queue.callid] = enter_queue.time

    if enter_map:
        res = (session
               .query(QueueLog.event,
                      QueueLog.queuename,
                      cast(QueueLog.time, TIMESTAMP).label('time'),
                      QueueLog.callid,
                      QueueLog.data3)
               .filter(and_(QueueLog.event == match,
                            QueueLog.callid.in_(enter_map))))

        for r in res.all():
            yield {
                'callid': r.callid,
                'queue_name': r.queuename,
                'time': enter_map[r.callid],
                'event': event,
                'talktime': 0,
                'waittime': int(r.data3) if r.data3 else 0
            }
Exemple #35
0
def listener_peak():
    """Return the global listener peak

    Keyword arguments:
        - None
    """

    peak = StatsistcsData.query.join(Statistic).filter(Statistic.identifier == 'lst-total').order_by(StatsistcsData.value.desc()).first()
    if peak:
        data = {'listener_peak': {'peak_value': peak.value, 'peak_time': peak.timestamp.isoformat()}}

        show = Show.query.join(UserShow).filter(between(peak.timestamp, Show.begin, Show.end)).first()
        if show:
            dj = []
            for usershow in show.users:
                dj.append({'dj_name': usershow.user.username, 'dj_id': usershow.user.user, 'status': usershow.status})

            data['listener_peak']['peak_show'] = {
                'show_id': show.show,
                'show_name': show.name,
                'show_description': show.description,
                'show_flags': show.flags,
                'show_begin': show.begin.isoformat(),
                'show_end': show.end.isoformat(),
                'dj': dj
            }
        else:
            data['listener_peak']['peak_show'] = None
    else:
        data = {'listener_peak': None}
    return jsonify(wrapper(data))
Exemple #36
0
 def traffic_report_filter(cls, contributor_id):
     query = cls.query.filter(cls.status == 'published')
     query = query.filter(cls.contributor_id == contributor_id)
     query = query.filter(
         between(get_current_time(), cls.start_publication_date,
                 cls.end_publication_date))
     return query.all()
Exemple #37
0
    def __iter__(self, *, chunksize: int = 100):
        """Yield pairs of ``(Entry.id, Entry.hash)`` and unmerged field values."""
        with self.connect() as conn:
            assert Entry.allid(conn=conn)
            assert Entry.onetoone(conn=conn)

        select_values = (sa.select(
            Entry.id, Entry.hash, Value.field, Value.value,
            File.name.label('filename'),
            Entry.bibkey).join_from(Entry, File).join(Value).where(
                sa.between(Entry.id, sa.bindparam('first'),
                           sa.bindparam('last'))).order_by(
                               'id', 'field', File.priority.desc(), 'filename',
                               'bibkey'))

        groupby_id_hash = functools.partial(itertools.groupby,
                                            key=operator.attrgetter(
                                                'id', 'hash'))

        groupby_field = functools.partial(itertools.groupby,
                                          key=operator.attrgetter('field'))

        with self.connect() as conn:
            for first, last in Entry.windowed(conn,
                                              key_column='id',
                                              size=chunksize):
                result = conn.execute(select_values, {
                    'first': first,
                    'last': last
                })
                for id_hash, grp in groupby_id_hash(result):
                    fields = [(field, [(r.value, r.filename, r.bibkey)
                                       for r in g])
                              for field, g in groupby_field(grp)]
                    yield id_hash, fields
Exemple #38
0
def get_grade_timings(problem_id, grader_id):
    query_per_problem = Solution.query.filter(Solution.grader_id == grader_id, Solution.problem_id == problem_id)\
        .order_by(Solution.graded_at)
    first_grade, last_grade = query_per_problem[0].graded_at, query_per_problem[-1].graded_at

    if query_per_problem.count() == 1:
        # only one solution graded, then first and last grade are the same
        # look for some other graded solution before first_grade
        previous_grade = Solution.query.filter(Solution.grader_id == grader_id, Solution.graded_at < first_grade)\
            .order_by(desc(Solution.graded_at)).first()
        if previous_grade is None:
            return None

        return np.array([[previous_grade.problem_id, previous_grade.graded_at.timestamp()],
                        [problem_id, first_grade.timestamp()]])

    # get the datetime data for all Solutions graded by the same grader ordered in ascending order
    graded_timings = np.array([
        [it.problem_id, it.graded_at.timestamp()]
        for it in Solution.query
        .filter(Solution.grader_id == grader_id, between(Solution.graded_at, first_grade, last_grade))
        .order_by(Solution.graded_at)
        if it.graded_at
    ])

    return graded_timings
def query_criteria(query, table, criteria):
	 for column, value in criteria.items():
				 if column in table.c:
						if re.search(',',value):
							 sp = value.split(',')
							 if sp[0] == 'IN':
									if re.search('|',sp[1]):
										 data = sp[1].split('|')
									else:
										 data = sp[1]
									query = query.where(table.c[column].in_(data))
							 elif sp[0] == 'LIKE':
									query = query.where(table.c[column].like('%'+sp[1]+'%'))
							 elif sp[0] == '<':
									query = query.where(table.c[column] < sp[1])
							 elif sp[0] == '>':
									query = query.where(table.c[column] > sp[1])
							 elif sp[0] == '<>':
									query = query.where(table.c[column] != sp[1])
							 elif sp[0] == '=':
									query = query.where(table.c[column] == sp[1])
						else:
							 if value == 'IS NULL':
									query = query.where(table.c[column].is_(None))
							 elif value == 'IS NOT NULL':
									query = query.where(table.c[column].isnot(None))
				 else:
						if column == 'bbox':
							 sp = value.split(',')
							 query = query.where(and_(between(table.c['LAT'], float(sp[1]), float(sp[3])), between(table.c['LON'], float(sp[0]), float(sp[2]))))
	 return query
 def get_acquirers_count(self, start_date: datetime, end_date: datetime):
     df = []
     # const data = [
     # { name: 'Adyen 25%', value: 25 },
     # { name: 'Group B', value: 25 },
     # { name: 'Group C', value: 25 },
     # { name: 'Group D', value: 25 },
     # ];
     try:
         queue_files = self.session.query(func.count(QueueDetail.detailId).label("quantity"),
                                          QueueProcesses.acquirer)\
                                   .join(QueueProcesses, QueueDetail.processId==QueueProcesses.process_id)\
                                   .group_by(QueueProcesses.acquirer)\
                                   .filter(between(QueueDetail.createdAt, start_date, end_date),
                                           QueueDetail.statusCode == '200')
         df = pd.read_sql(queue_files.statement, self.session.bind)
         # pivot table
         df['quantity'] = df['quantity'].fillna(0).astype(float)
         total = df['quantity'].sum()
         df['name'] = [f"{acquirer} {int((quantity/total*100))}%" for quantity, acquirer in df.itertuples(index=False)]
         df = df.drop('acquirer', axis=1).to_dict(orient="records")
     except Exception as error:
         print(str(error))
     finally:
         self.session.close()
         return df, []     
Exemple #41
0
def run(long, lat):
    lat_min = lat - SEARCH_R
    lat_max = lat + SEARCH_R
    long_min = long - SEARCH_R
    long_max = long + SEARCH_R
    geokey_min = CombineGeo(long_min, lat_min)
    geokey_max = CombineGeo(long_max, lat_max)
    with dbconfig.Session() as session:
        users = session.query(UserGeoPosition).filter(
            and_(between(UserGeoPosition.geokey, geokey_min, geokey_max),
                 between(UserGeoPosition.lat, lat_min, lat_max),
                 between(UserGeoPosition.long, long_min, long_max))).all()
        ulist = []
        for user in users:
            ulist.append(user.toJson())
        return Res({"users": ulist})
Exemple #42
0
 def printf(cls):
     return sa.case([
         (cls.lang == '', cls.name),
         (sa.between(sa.func.length(cls.lang), 2,
                     3), sa.func.printf('%s [%s]', cls.name, cls.lang)),
     ],
                    else_=cls.name)
def get_log_entries():
    # Limits and offsets
    if 'limit' in request.args:
        limit = int(request.args['limit'])
        if limit > 100:
            limit = 100
    else:
        limit = 10

    if 'offset' in request.args:
        offset = int(request.args['offset'])
    else:
        offset = 0

    if 'organization_id' in request.args:
        query = LogEntry.query\
            .filter(LogEntry.organization_id == request.args['organization_id'])\
                .filter(sa.not_(LogEntry.retired))

    else:
        query = LogEntry.query.filter(sa.not_(LogEntry.retired))

    if 'search' in request.args:
        if request.args["search"]:
            slist = []
            clist=[]
            for s in request.args["search"].split():
                slist.append(sa.or_(sa.func.upper(LogEntry.entry_title).like("%" + s.upper() + "%")))
                slist.append(sa.or_(sa.func.upper(LogEntry.entry_description).like("%" + s.upper() + "%")))
                clist.append(sa.or_(sa.func.upper(LogEntryComment.comment).like("%" + s.upper() + "%")))

            query=LogEntry.query\
                .filter(sa.or_(*slist) | LogEntry.id.in_(LogEntryComment.query.with_entities("log_entry_id")\
                                                         .filter(sa.or_(*clist))\
                                                         .filter(LogEntryComment.retired == False)))\
                    .filter (LogEntry.user_id.in_(User.query.with_entities("id")\
                        .filter(sa.func.upper(User.first_name).in_(request.args["search"].upper().split()) | sa.func.upper(User.last_name).in_(request.args["search"].upper().split()))))\
                            .filter(LogEntry.organization_id == request.args['organization_id'])\
                                .filter(sa.not_(LogEntry.retired))

            if query.count()==0:
                query=LogEntry.query\
                    .filter(sa.or_(*slist) | LogEntry.id.in_(LogEntryComment.query.with_entities("log_entry_id")\
                                                             .filter(sa.or_(*clist))\
                                                             .filter(LogEntryComment.retired == False)))\
                        .filter(LogEntry.organization_id == request.args['organization_id'])\
                            .filter(LogEntry.retired == False)

    if 'daterange' in request.args and request.args["daterange"]:
        rdates=request.args["daterange"].split('_')
        query.whereclause.append(sa.between(LogEntry.entry_time,rdates[0] + " 00:00:00.000",rdates[1] + " 11:59:59.000"))

    log_entries = query.order_by(sa.desc(LogEntry.entry_time)).limit(limit).offset(offset).all()

    if not log_entries:
        return jsonify({}), 204

    log_entries = [l.to_json() for l in log_entries]
    return jsonify(log_entries=log_entries)
Exemple #44
0
 def traffic_report_filter(cls, contributor_id):
     query = cls.query.filter(cls.status == "published")
     query = query.join(Disruption)
     query = query.filter(Disruption.contributor_id == contributor_id)
     query = query.filter(
         between(get_current_time(), Disruption.start_publication_date, Disruption.end_publication_date)
     )
     return query.all()
Exemple #45
0
 def get_last_24_hour_post_count(self):
     qry = Posts.query.filter(
         and_(
                 Posts.thread.in_(Threads.query.with_entities(Threads.id).filter_by(fk_forum=self.id)),
                 between(Posts.timestamp, datetime.utcnow() - timedelta(days=1), datetime.utcnow())
             ),
         )
     return qry.count()
    def processXML(self): # records represents whatever element you're tacking more onto, like entry_exits or clients
        if settings.DEBUG:
            print "processXML: Appending XML to Base Record"
        self.root_element = self.createDoc() #makes root element with XML header attributes
        #print '==== root created'
        clients = self.createClients(self.root_element) # JCS - tag is <clientRecords> Only node under clients is <Client>
        print '==== clientRecords created'
        
        if self.options.reported == True:
            Persons = self.session.query(dbobjects.Person).filter(dbobjects.Person.reported == True)
        elif self.options.unreported == True:
            Persons = self.session.query(dbobjects.Person).filter(or_(dbobjects.Person.reported == False, dbobjects.Person.reported == None))
        elif self.options.reported == None:
            Persons = self.session.query(dbobjects.Person)
        # Now apply the dates to the result set.
        if self.options.alldates == None:
            Persons = Persons.filter(between(dbobjects.Person.person_id_date_collected, self.options.startDate, self.options.endDate))
        
        pulledConfigID = 0    # JCS Only pull it if it has changed
        for self.person in Persons:
            #print "person is: ", self.person

            export = self.person.fk_person_to_export    # this is a single record because:
            # person has: export_index_id = Column(Integer, ForeignKey('export.id'))
            # export has: fk_export_to_person = relationship('Person', backref='fk_person_to_export')
            # Therefore there are multiple persons to one export - but only one export to a person

            #print "==== export before pullconfig:", export.id, export  # JCS
            if pulledConfigID != export.id:
                self.pullConfiguration(export.export_id)
                pulledConfigID = export.id
            
            self.ph = self.person.fk_person_to_person_historical    # JCS This is a list of records
            self.race = self.person.fk_person_to_races
            self.site_service_part = self.person.site_service_participations    # JCS

            #information_releases = self.person.fk_person_to_release_of_information    # JCS a set
            #self.service_event = self.person.fk_person_to_service_event
            # Instead of generating a number (above), use the client number that is already provided in the legacy system
            # or
            # self.iDG.initializeSystemID(self.person.id)
            self.sysID = self.person.id        # JCS beware set self.sysID
            #if settings.DEBUG:
                #print "self.person is:", self.person 
            if self.person: # and not self.person.person_legal_first_name_unhashed+self.person.person_legal_last_name_unhashed == None:
                self.client = self.createClient(clients) # JCS - no clients in svc5? yes as clientRecords
                # Sub can be: active, anonymous, firstName, suffix, unnamedClient, alias, middleName, childEntryExit,
                # childReleaseOfInfo, childGoal
                self.customizeClient(self.client)    
                self.customizeClientPersonalIdentifiers(self.client, self.person)
                self.assessment_data = self.createAssessmentData(self.client) # JCS New - self?
                self.customizeAssessmentData(self.assessment_data)
                if self.site_service_part:      # JCS 21 Dec 2012
                    self.child_entry_exit = self.createChildEntryExit(self.client)
                    for ssp in self.site_service_part:
                        self.createEntryExit(self.child_entry_exit, ssp)
            # update the reported flag for person (This needs to be applied to all objects that we are getting data from)
            self.updateReported(self.person)
Exemple #47
0
def prune_record(event_id, max_event_id, event_range, storage):
    last = event_id + event_range
    if last > max_event_id:
        last = max_event_id

    stmt = Event.__table__.delete().where(between(Event.id, event_id, last))
    storage._engine.execute(stmt)
    logger.info('Pruning {0} - {1}'.format(event_id, last))
    storage.session.commit()
    return last
Exemple #48
0
def run(long,lat):
    lat_min=lat-SEARCH_R
    lat_max=lat+SEARCH_R
    long_min=long-SEARCH_R
    long_max=long+SEARCH_R
    geokey_min=CombineGeo(long_min,lat_min)
    geokey_max=CombineGeo(long_max,lat_max)
    with dbconfig.Session() as session:
        users=session.query(UserGeoPosition).filter(
         and_(
         between(UserGeoPosition.geokey,geokey_min,geokey_max),
         between(UserGeoPosition.lat,lat_min,lat_max),
         between(UserGeoPosition.long,long_min,long_max)
        )
        ).all()
        ulist=[]
        for user in users:
            ulist.append(user.toJson())
        return Res({"users":ulist})
Exemple #49
0
def hours_with_calls(session, start, end):
    start = start.strftime(_STR_TIME_FMT)
    end = end.strftime(_STR_TIME_FMT)

    hours = (session
             .query(distinct(func.date_trunc('hour', cast(QueueLog.time, TIMESTAMP))).label('time'))
             .filter(between(QueueLog.time, start, end)))

    for hour in hours.all():
        yield hour.time
Exemple #50
0
 def listRecords(self, metadataPrefix, set=None, cursor=None, from_=None,
                 until=None, batch_size=None):
     '''Show a selection of records, basically lists all datasets.
     '''
     data = []
     packages = []
     if not set:
         if not from_ and not until:
             packages = Session.query(Package).all()
         if from_:
             packages = Session.query(Package).\
                 filter(PackageRevision.revision_timestamp > from_).all()
         if until:
             packages = Session.query(Package).\
                 filter(PackageRevision.revision_timestamp < until).all()
         if from_ and until:
             packages = Session.query(Package).filter(
                 between(PackageRevision.revision_timestamp,from_,until)).\
                 all()
     else:
         group = Group.get(set)
         if group:
             packages = group.active_packages()
             if from_ and not until:
                 packages = packages.\
                     filter(PackageRevision.revision_timestamp > from_).\
                     all()
             if until and not from_:
                 packages = packages.\
                     filter(PackageRevision.revision_timestamp < until).\
                     all()
             if from_ and until:
                 packages = packages.filter(
                         between(PackageRevision.revision_timestamp,
                                 from_,
                                 until))\
                                 .all()
     if cursor:
         packages = packages[:cursor]
     for res in packages:
         data.append(self._record_for_dataset(res))
     return data
Exemple #51
0
    def _build(self):
        query = db.session.\
            query(Project, Task, TimeEntry).\
            join(Project.tasks).\
            join(Task.time_entries).\
            filter(Project.client_id == self.client.id).\
            filter(between(TimeEntry.added_at, *self.date_range)).\
            order_by(Project.name.asc())

        dates = [day.date() for day in
                 arrow.Arrow.range('day', *self.date_range)]
        daily_totals = OrderedDict()
        for date in dates:
            daily_totals[date] = datetime.timedelta()
        projects = []

        for project, group in itertools.groupby(query, lambda row: row[0]):

            project_total = datetime.timedelta()
            daily_data = OrderedDict()
            for date in dates:
                daily_data[date] = {
                    'total': datetime.timedelta(),
                    'tasks': set(),
                }

            for row in group:
                task_title = row[1].title
                date = localize(row[2].added_at).date()
                duration = row[2].duration
                daily_data[date]['tasks'].add(task_title)
                daily_data[date]['total'] += duration
                project_total += duration
                daily_totals[date] += duration

            projects.append({
                'project': project,
                'time': list(daily_data.values()),
                'total': project_total,
            })

        timesheet = {
            'client': self.client,
            'date_range': {
                'beg': self.date_range[0],
                'end': self.date_range[1],
            },
            'projects': projects,
            'totals': {
                'time': list(daily_totals.values()),
                'total': sum(daily_totals.values(), datetime.timedelta()),
            },
        }
        return timesheet
 def get_q_unique(date):
     return session.query(GroupMemberTitle).filter(
         rdb.and_(
             schema.user_group_memberships.c.group_id == group_id,
             schema.title_types.c.user_unique == True,
             schema.member_titles.c.title_type_id == title_type_id,
             rdb.or_(
                 rdb.between(date, schema.member_titles.c.start_date, schema.member_titles.c.end_date),
                 schema.member_titles.c.end_date == None,
             ),
         )
     )
Exemple #53
0
def get_semester_for_date(target_date):
    """
    Get the semester which contains a given target date.
    :param date target_date: The date for which a corresponding semester should
    be found.
    :rtype: Semester
    :raises sqlalchemy.orm.exc.NoResultFound if no semester was found
    :raises sqlalchemy.orm.exc.MultipleResultsFound if multiple semester were
    found.
    """
    return Semester.q.filter(
        between(target_date, Semester.begin_date, Semester.end_date)
    ).one()
 def get_q_user(date):
     return session.query(GroupMemberTitle).filter(
             rdb.and_(
                 GroupMemberTitle.group_id == group_id,
                 GroupMemberTitle.membership_id == membership_id,
                 GroupMemberTitle.title_type_id == title_type_id,
                 rdb.or_(
                     rdb.between(
                         date, 
                         schema.member_title.c.start_date,
                         schema.member_title.c.end_date),
                     schema.member_title.c.end_date == None
                 )))
Exemple #55
0
def create_clause(min_val, max_val, column):
    """returns correct ==, between, <=, >= filter clause based on min_val and max_val"""
    if min_val:
        if min_val == max_val:
            return column == int(min_val)
        elif max_val:
            return between(column, int(min_val), int(max_val), True)
        else:
            return column >= int(min_val)
    elif max_val:
        return column <= int(max_val)
    else:
        return None
Exemple #56
0
 def get_q_unique(date):
     return session.query(GroupMemberTitle).filter(
         sa.and_(
             GroupMemberTitle.group_id == group_id,
             schema.title_type.c.user_unique == True,
             GroupMemberTitle.title_type_id == title_type_id,
             sa.or_(
                 sa.between(
                     date, 
                     schema.member_title.c.start_date,
                     schema.member_title.c.end_date),
                 schema.member_title.c.end_date == None
             )))
 def _filter_packages(set, cursor, from_, until, batch_size):
     '''Get a part of datasets for "listNN" verbs.
     '''
     packages = []
     group = None
     if not set:
         packages = Session.query(Package).filter(Package.type=='dataset'). \
             filter(Package.state == 'active').filter(Package.private!=True)
         if from_ and not until:
             packages = packages.filter(PackageRevision.revision_timestamp > from_).\
                 filter(Package.name==PackageRevision.name)
         if until and not from_:
             packages = packages.filter(PackageRevision.revision_timestamp < until).\
                 filter(Package.name==PackageRevision.name)
         if from_ and until:
             packages = packages.filter(between(PackageRevision.revision_timestamp, from_, until)).\
                 filter(Package.name==PackageRevision.name)
         packages = packages.all()
     else:
         group = Group.get(set)
         if group:
             # Note that group.packages never returns private datasets regardless of 'with_private' parameter.
             packages = group.packages(return_query=True, with_private=False).filter(Package.type=='dataset'). \
                 filter(Package.state == 'active')
             if from_ and not until:
                 packages = packages.filter(PackageRevision.revision_timestamp > from_).\
                     filter(Package.name==PackageRevision.name)
             if until and not from_:
                 packages = packages.filter(PackageRevision.revision_timestamp < until).\
                     filter(Package.name==PackageRevision.name)
             if from_ and until:
                 packages = packages.filter(between(PackageRevision.revision_timestamp, from_, until)).\
                     filter(Package.name==PackageRevision.name)
             packages = packages.all()
     if cursor is not None:
         cursor_end = cursor + batch_size if cursor + batch_size < len(packages) else len(packages)
         packages = packages[cursor:cursor_end]
     return packages, group
Exemple #58
0
 def target_get_entity(self):
     session = sqlalchemy.orm.object_session(self)
     assert session is not None, "Instance has no session: Must have session to read relations"
     query = session.query(target_entity).filter( 
         and_( 
             target_column == getattr(self,local_column_name),                     
             target_table.c.changeset_id >= self._changeset_view,
             between(self._changeset_view, target_table.c.changeset_id, target_table.c.changeset_invalid_id)
         )
     ) 
 
     e = query.first()
     e._changeset_view = self._changeset_view
     return e
Exemple #59
0
 def get_at_changeset(self, changeset):
     sess = sqlalchemy.orm.object_session(self)
     
     q = sess.query(self.Revision)
     q = q.filter(and_(
             self.Revision.head == self, 
             self.Revision.changeset_id >= changeset,
             between(changeset, self.Revision.changeset_id, self.Revision.changeset_invalid_id)
             )).limit(1)
             
     r = q.first()
     if r:
         r._changeset_view = changeset
     return r
Exemple #60
-1
def argoQuery(dbcon, geoWKT=None, tspan=None, withinDmeter=None, tsort=None):
    tbl = dbcon.getTable('argo2', 'oceanobs')

    #first create a subquery to quickly discard argo profiles

    subqry = select([tbl])

    if tspan:
        subqry = subqry.where(
            func.overlaps(tbl.c.tstart, tbl.c.tend, tspan[0], tspan[1]))

    # Apply initial geospatial constraints
    if geoWKT:
        if withinDmeter:
            #only base initial constraints ont he bounding box
            subqry = subqry.where(
                func.ST_DWithin(
                    literal_column('ST_Envelope(geom::geometry)::geography'),
                    func.ST_GeogFromText(geoWKT), withinDmeter))
        else:
            subqry = subqry.where(
                func.ST_Intersects(literal_column('geom::geometry'),
                                   func.ST_GeomFromText(geoWKT, 4326)))

    #we need to assign an alias to this subquery in order to work with it
    subqry = subqry.alias("ar")
    #expand the arrays and points int he subquery
    qry = select([
        subqry.c.wmoid, subqry.c.uri, subqry.c.datacenter,
        func.unnest(subqry.c.mode).label('mode'),
        func.unnest(subqry.c.ascend).label('ascend'),
        func.unnest(subqry.c.tlocation).label('tlocation'),
        func.unnest(subqry.c.cycle).label('cycle'),
        func.unnest(subqry.c.iprof).label('iprof'),
        ST_Dump(literal_column("ar.geom::geometry")).geom.label('geom')
    ])

    #additional spatial constraints
    finalqry = qry
    qry = qry.alias("arex")

    if tspan:
        finalqry = select([qry]).where(
            between(qry.c.tlocation, tspan[0], tspan[1]))

    if geoWKT:
        if withinDmeter:
            #only base initial constraints ont he bounding box
            finalqry = finalqry.where(
                func.ST_DWithin(qry.c.geom, func.ST_GeogFromText(geoWKT),
                                withinDmeter))
        else:
            finalqry = finalqry.where(
                func.ST_Within(literal_column("arex.geom"),
                               func.ST_GeomFromText(geoWKT, 4326)))

    if tsort:
        finalqry = finalqry.order_by(qry.c.tlocation)

    return dbcon.dbeng.execute(finalqry)