Example #1
0
    def from_fits_object(cls, fits_object, ccd_no):
        hdu = fits_object.fits_data[ccd_no]

        header = hdu.header
        session = object_session(fits_object)

        x_binning, y_binning = map(int, header['CCDSUM'].split())

        readout_direction = header['ampname'].split(',')[1].strip()

        instrument_id = Instrument.from_fits_object(fits_object).id

        detector_object = session.query(cls).filter(
            cls.naxis1 == header['NAXIS1'], cls.naxis2 == header['NAXIS2'],
            cls.ccd_name == header['CCDNAME'],
            cls.readout_direction == readout_direction,
            (func.abs(cls.gain - header['GAIN']) / header['GAIN']) < 0.0001,
            (func.abs(cls.readout_noise - header['RDNOISE']) /
             header['RDNOISE']) < 0.0001, cls.x_binning == x_binning,
            cls.y_binning == y_binning, cls.frame_id == int(header['FRAMEID']),
            cls.instrument_id == instrument_id).all()
        if detector_object == []:
            detector_object = cls(header['NAXIS1'], header['NAXIS2'],
                                  header['CCDNAME'], readout_direction,
                                  header['GAIN'], header['RDNOISE'], x_binning,
                                  y_binning, header['FRAMEID'], instrument_id)
            session.add(detector_object)
            session.commit()
            return detector_object
        elif len(detector_object) == 1:
            return detector_object[0]
        else:
            raise ValueError('Found more than one detectors')
Example #2
0
def get_parkings_custom():
    try:
        data = json.loads(connexion.request.data,
                          object_pairs_hook=collections.OrderedDict)
        longitude = data["longitude"]
        latitude = data["latitude"]
        radius = data["radius"]

        kmInLongitudeDegree = 111.320 * math.cos(latitude / 180.0 * math.pi)

        delta_lat = radius / 111.1
        delta_long = radius / kmInLongitudeDegree

        qs = current_app.context.rospark_db_session()
        rows = (qs.query(ParkingsRecordsTbl).filter(
            and_(
                func.abs(ParkingsRecordsTbl.latitude - latitude) < delta_lat,
                func.abs(ParkingsRecordsTbl.longitude - longitude) <
                delta_long)))
        result = []
        for row in rows.all():
            result.append(row.serialize())

    except Exception as e:
        LOG.exception("Error: %s" % str(e))
        return jsonify(dict(status="error")), 500

    return jsonify(dict(status="ok", result=result)), 200
Example #3
0
def top_spenders(limit = 10, start_date = None, end_date = None):
    if start_date and end_date and start_date <= end_date:
        return DBSession.query(
                    MoneyLog.username,
                    func.sum(func.abs(MoneyLog.amount))
                ).filter(
                    MoneyLog.reason == 'drop',
                    MoneyLog.time >= start_date,
                    MoneyLog.time <= end_date,
                    MoneyLog.username != 'openhouse'
                ).group_by(
                    MoneyLog.username
                ).order_by(
                    'sum_1 desc'
                ).limit(
                    limit
                ).all()
    else:
        return DBSession.query(
                    MoneyLog.username,
                    func.sum(func.abs(MoneyLog.amount))
                ).filter(
                    MoneyLog.reason == 'drop',
                    MoneyLog.username != 'openhouse'
                ).group_by(
                    MoneyLog.username
                ).order_by(
                    'sum_1 desc'
                ).limit(
                    limit
                ).all()
Example #4
0
    def from_fits_object(cls, fits_object, ccd_no):
        hdu = fits_object.fits_data[ccd_no]

        header = hdu.header
        session = object_session(fits_object)

        x_binning, y_binning = map(int, header['CCDSUM'].split())

        readout_direction = header['ampname'].split(',')[1].strip()

        instrument_id = Instrument.from_fits_object(fits_object).id

        detector_object = session.query(cls).filter(cls.naxis1==header['NAXIS1'], cls.naxis2==header['NAXIS2'],
                                  cls.ccd_name==header['CCDNAME'], cls.readout_direction==readout_direction,
                                  (func.abs(cls.gain - header['GAIN']) / header['GAIN']) < 0.0001,
                                  (func.abs(cls.readout_noise - header['RDNOISE']) / header['RDNOISE']) < 0.0001,
                                  cls.x_binning==x_binning, cls.y_binning==y_binning,
                                  cls.frame_id==int(header['FRAMEID']),
                                  cls.instrument_id==instrument_id).all()
        if detector_object == []:
            detector_object = cls(header['NAXIS1'], header['NAXIS2'], header['CCDNAME'], readout_direction, header['GAIN'],
                       header['RDNOISE'], x_binning, y_binning, header['FRAMEID'], instrument_id)
            session.add(detector_object)
            session.commit()
            return detector_object
        elif len(detector_object) == 1:
            return detector_object[0]
        else:
            raise ValueError('Found more than one detectors')
Example #5
0
    def nearest_neighbors(self, limit=10):
        '''
        Returns a list of (user, score) tuples with the closest matching
        skills.  If they haven't answered the equivalent skill question, we
        consider that a very big difference (12).

        Order is closest to least close, which is an ascending score.
        '''
        my_skills = aliased(UserSkill, name='my_skills', adapt_on_names=True)
        their_skills = aliased(UserSkill, name='their_skills', adapt_on_names=True)

        # difference we assume for user that has not answered question
        unanswered_difference = (LEVELS['LEVEL_I_CAN_DO_IT']['score'] -
                                 LEVELS['LEVEL_I_WANT_TO_LEARN']['score']) * 2

        return User.query_in_deployment().\
                add_column(((len(self.skills) - func.count(func.distinct(their_skills.id))) *
                            unanswered_difference) + \
                       func.sum(func.abs(their_skills.level - my_skills.level))).\
                filter(their_skills.user_id != my_skills.user_id).\
                filter(User.id == their_skills.user_id).\
                filter(their_skills.name == my_skills.name).\
                filter(my_skills.user_id == self.id).\
                group_by(User).\
                order_by(((len(self.skills) - func.count(func.distinct(their_skills.id)))
                          * unanswered_difference) + \
                     func.sum(func.abs(their_skills.level - my_skills.level))).\
                limit(limit)
Example #6
0
File: models.py Project: tekd/noi2
    def nearest_neighbors(self, limit=10):
        '''
        Returns a list of (user, score) tuples with the closest matching
        skills.  If they haven't answered the equivalent skill question, we
        consider that a very big difference (12).

        Order is closest to least close, which is an ascending score.
        '''
        my_skills = aliased(UserSkill, name='my_skills', adapt_on_names=True)
        their_skills = aliased(UserSkill,
                               name='their_skills',
                               adapt_on_names=True)

        # difference we assume for user that has not answered question
        unanswered_difference = (LEVELS['LEVEL_I_CAN_DO_IT']['score'] -
                                 LEVELS['LEVEL_I_WANT_TO_LEARN']['score']) * 2

        return User.query_in_deployment().\
                add_column(((len(self.skills) - func.count(func.distinct(their_skills.id))) *
                            unanswered_difference) + \
                       func.sum(func.abs(their_skills.level - my_skills.level))).\
                filter(their_skills.user_id != my_skills.user_id).\
                filter(User.id == their_skills.user_id).\
                filter(their_skills.name == my_skills.name).\
                filter(my_skills.user_id == self.id).\
                group_by(User).\
                order_by(((len(self.skills) - func.count(func.distinct(their_skills.id)))
                          * unanswered_difference) + \
                     func.sum(func.abs(their_skills.level - my_skills.level))).\
                limit(limit)
    def visit_binary_op(self, expr):
        if isinstance(expr, Power):
            op = func.pow
        elif isinstance(expr, FloorDivide):
            op = operator.div if six.PY2 else operator.truediv
        elif isinstance(expr, (Add, Substract)) and expr.dtype == df_types.datetime:
            if isinstance(expr, Add) and \
                    all(child.dtype == df_types.datetime for child in (expr.lhs, expr.rhs)):
                raise CompileError('Cannot add two datetimes')
            if isinstance(expr.rhs, DTScalar) or (isinstance(expr, Add) and expr.lhs, DTScalar):
                if isinstance(expr.rhs, DTScalar):
                    dt, scalar = expr.lhs, expr.rhs
                else:
                    dt, scalar = expr.rhs, expr.lhs
                val = scalar.value
                if isinstance(expr, Substract):
                    val = -val

                dt_type = type(scalar).__name__[:-6]
                sa_dt = self._expr_to_sqlalchemy[dt]
                try:
                    key = DATE_KEY_DIC[dt_type]
                except KeyError:
                    raise NotImplementedError
                if self._sa_engine and self._sa_engine.name == 'mysql':
                    if dt_type == 'MilliSecond':
                        val, dt_type = val * 1000, 'MicroSecond'
                    sa_expr = func.date_add(sa_dt, text('interval %d %s' % (val, dt_type.lower())))
                else:
                    sa_expr = sa_dt + timedelta(**{key: val})
                self._add(expr, sa_expr)
                return
            else:
                raise NotImplementedError
        elif isinstance(expr, Substract) and expr._lhs.dtype == df_types.datetime and \
                expr._rhs.dtype == df_types.datetime:
            sa_expr = self._expr_to_sqlalchemy[expr._lhs] - self._expr_to_sqlalchemy[expr._rhs]
            if self._sa_engine and self._sa_engine.name == 'mysql':
                sa_expr = func.abs(func.microsecond(sa_expr)
                                   .cast(types.df_type_to_sqlalchemy_type(expr.dtype))) / 1000
            else:
                sa_expr = func.abs(extract('MICROSECONDS', sa_expr)
                                   .cast(types.df_type_to_sqlalchemy_type(expr.dtype))) / 1000
            self._add(expr, sa_expr)
            return
        elif isinstance(expr, Mod):
            lhs, rhs = self._expr_to_sqlalchemy[expr._lhs], self._expr_to_sqlalchemy[expr._rhs]
            sa_expr = BINARY_OP[expr.node_name](lhs, rhs)
            if not is_constant_scalar(expr._rhs):
                sa_expr = case([(rhs > 0, func.abs(sa_expr))], else_=sa_expr)
            elif expr._rhs.value > 0:
                sa_expr = func.abs(sa_expr)
            self._add(expr, sa_expr)
            return
        else:
            op = BINARY_OP[expr.node_name]
        lhs, rhs = self._expr_to_sqlalchemy[expr._lhs], self._expr_to_sqlalchemy[expr._rhs]
        sa_expr = op(lhs, rhs)
        self._add(expr, sa_expr)
Example #8
0
 def match_one(self, m):
     for row in self.session.query(Mtab).\
         filter(Mtab.id != m.id).\
         filter(Mtab.withMS2 >= withms2_min(self.config)).\
         filter(func.abs(Mtab.rt - m.rt) <= self.config[RT_DIFF]).\
         filter(func.abs(1e6 * (Mtab.mz - m.mz) / m.mz) <= self.config[PPM_DIFF]).\
         all():
         yield row
Example #9
0
 def match_one(self,m):
     for row in self.session.query(Mtab).\
         filter(Mtab.id != m.id).\
         filter(Mtab.withMS2 >= withms2_min(self.config)).\
         filter(func.abs(Mtab.rt - m.rt) <= self.config[RT_DIFF]).\
         filter(func.abs(1e6 * (Mtab.mz - m.mz) / m.mz) <= self.config[PPM_DIFF]).\
         all():
         yield row
Example #10
0
def eat_princess(session, szerokosc, wysokosc):
    princesses_to_eat = (session.query(Ksiezniczki).filter(
        Ksiezniczki.czy_dziewica == True).filter(
            func.abs(Ksiezniczki.szerokosc - szerokosc) < 25).filter(
                func.abs(Ksiezniczki.wysokosc - wysokosc) < 25))
    for princess in princesses_to_eat:
        session.delete(princess)
    session.commit()
Example #11
0
    def link_science_sets(self):
        """
        Link individual science observations to their calibration data

        Parameters
        ----------

        science_instrument2longslit_instrument: ~np.recarray
            a dictionary mapping science_frame instrument_setup_id to
            longslit_arc_instrument_setup_id
        """

        science_frames = (self.session.query(GMOSMOSRawFITS)
                          .join(ObservationType).join(ObservationClass)
                          .filter(ObservationClass.name == 'science',
                                  ObservationType.name == 'object')
                          .all())
        for science_frame in science_frames:
            flat = (self.session.query(GMOSMOSRawFITS)
                    .join(ObservationType)
                    .filter(ObservationType.name == 'flat',
                            GMOSMOSRawFITS.mask_id == science_frame.mask_id,
                            GMOSMOSRawFITS.observation_block_id ==
                            science_frame.observation_block_id)
                    .order_by(func.abs(GMOSMOSRawFITS.mjd -
                                       science_frame.mjd))
                    .all())

            #### WRONG - just picking one flat for now ###
            flat = flat[0]

            mask_arc = (self.session.query(GMOSMOSRawFITS)
                        .join(ObservationType).join(ObservationClass)
                        .filter(
                            ObservationType.name == 'arc',
                            GMOSMOSRawFITS.mask_id == science_frame.mask_id,
                            GMOSMOSRawFITS.instrument_setup_id ==
                            science_frame.instrument_setup_id)
                        .order_by(func.abs(GMOSMOSRawFITS.mjd -
                                           science_frame.mjd))
                        .all())

            if len(mask_arc) != 1:
                logger.warn('Science Frame {0} has more than one mask arc:\n'
                            '{1} - selecting closest arc'
                            .format(science_frame,
                                    '\n'.join(map(str, mask_arc))))
            mask_arc = mask_arc[0]

            self.session.add(GMOSMOSScienceSet(id=science_frame.id,
                                               flat_id=flat.id,
                                               mask_arc_id=mask_arc.id,))
            logger.info('Link Science Frame {0} with:\nFlat: {1}\n'
                        'Mask Arc: {2}\n'
                        .format(science_frame, flat, mask_arc))
        self.session.commit()
Example #12
0
 def match_all(self):
     m_alias = aliased(Mtab)
     for row in self.session.query(Mtab, m_alias).\
         filter(Mtab.withMS2 >= withms2_min(self.config)).\
         join((m_alias, Mtab.id != m_alias.id)).\
         filter(m_alias.withMS2 >= withms2_min(self.config)).\
         filter(func.abs(Mtab.rt - m_alias.rt) <= self.config[RT_DIFF]).\
         filter(func.abs(1e6 * (Mtab.mz - m_alias.mz) / m_alias.mz) <= self.config[PPM_DIFF]).\
         all():
         yield row
Example #13
0
 def match_all(self):
     m_alias = aliased(Mtab)
     for row in self.session.query(Mtab, m_alias).\
         filter(Mtab.withMS2 >= withms2_min(self.config)).\
         join((m_alias, Mtab.id != m_alias.id)).\
         filter(m_alias.withMS2 >= withms2_min(self.config)).\
         filter(func.abs(Mtab.rt - m_alias.rt) <= self.config[RT_DIFF]).\
         filter(func.abs(1e6 * (Mtab.mz - m_alias.mz) / m_alias.mz) <= self.config[PPM_DIFF]).\
         all():
         yield row
Example #14
0
    def fetch_transactions(self, start_time, end_time):

        session = self.__session()

        try:

            transactions = session.query(
                Transaction.tx_site,
                Transaction.tx_code,
                functions.count(Transaction.tx_time).label('tx_size'),
                functions.min(Transaction.tx_time).label('tx_time_min'),
                functions.max(Transaction.tx_time).label('tx_time_max'),
                functions.sum(Transaction.tx_inst).label('tx_net_inst'),
                functions.sum(Transaction.tx_fund).label('tx_net_fund'),
                functions.sum(func.abs(
                    Transaction.tx_inst)).label('tx_grs_inst'),
                functions.sum(func.abs(
                    Transaction.tx_fund)).label('tx_grs_fund'),
            ).filter(Transaction.tx_time >= start_time,
                     Transaction.tx_time < end_time).group_by(
                         Transaction.tx_site,
                         Transaction.tx_code,
                     ).subquery()

            inst = aliased(Evaluation, name='ev_inst')
            fund = aliased(Evaluation, name='ev_fund')

            results = session.query(transactions, Product, inst, fund).join(
                Product,
                and_(
                    Product.pr_site == transactions.c.tx_site,
                    Product.pr_code == transactions.c.tx_code,
                )).outerjoin(
                    inst,
                    and_(
                        inst.ev_site == Product.pr_site,
                        inst.ev_unit == Product.pr_inst,
                    )).outerjoin(
                        fund,
                        and_(
                            fund.ev_site == Product.pr_site,
                            fund.ev_unit == Product.pr_fund,
                        )).all()

        finally:

            session.close()

        dto = namedtuple(
            'TransactionDto',
            ('tx_site', 'tx_code', 'tx_size', 'tx_time_min', 'tx_time_max',
             'tx_net_inst', 'tx_net_fund', 'tx_grs_inst', 'tx_grs_fund',
             'product', 'ev_inst', 'ev_fund'))

        return [dto(*r) for r in results]
Example #15
0
    def link_science_sets(self):
        """
        Link individual science observations to their calibration data

        Parameters
        ----------

        science_instrument2longslit_instrument: ~np.recarray
            a dictionary mapping science_frame instrument_setup_id to
            longslit_arc_instrument_setup_id
        """

        science_frames = (self.session.query(GMOSMOSRawFITS).join(
            ObservationType).join(ObservationClass).filter(
                ObservationClass.name == 'science',
                ObservationType.name == 'object').all())
        for science_frame in science_frames:
            flat = (self.session.query(GMOSMOSRawFITS).join(
                ObservationType).filter(
                    ObservationType.name == 'flat',
                    GMOSMOSRawFITS.mask_id == science_frame.mask_id,
                    GMOSMOSRawFITS.observation_block_id ==
                    science_frame.observation_block_id).order_by(
                        func.abs(GMOSMOSRawFITS.mjd -
                                 science_frame.mjd)).all())

            #### WRONG - just picking one flat for now ###
            flat = flat[0]

            mask_arc = (self.session.query(GMOSMOSRawFITS).join(
                ObservationType).join(ObservationClass).filter(
                    ObservationType.name == 'arc',
                    GMOSMOSRawFITS.mask_id == science_frame.mask_id,
                    GMOSMOSRawFITS.instrument_setup_id ==
                    science_frame.instrument_setup_id).order_by(
                        func.abs(GMOSMOSRawFITS.mjd -
                                 science_frame.mjd)).all())

            if len(mask_arc) != 1:
                logger.warn('Science Frame {0} has more than one mask arc:\n'
                            '{1} - selecting closest arc'.format(
                                science_frame, '\n'.join(map(str, mask_arc))))
            mask_arc = mask_arc[0]

            self.session.add(
                GMOSMOSScienceSet(
                    id=science_frame.id,
                    flat_id=flat.id,
                    mask_arc_id=mask_arc.id,
                ))
            logger.info('Link Science Frame {0} with:\nFlat: {1}\n'
                        'Mask Arc: {2}\n'.format(science_frame, flat,
                                                 mask_arc))
        self.session.commit()
Example #16
0
    def test_bound_to_query_reference(self):
        class A(self.base_cls):
            name = Column(String)
            b_list = relationship('B')

        class B(self.base_cls):
            name = Column(String)
            a_id = Column(Integer, ForeignKey('a.id'))
            ref = Column(Integer)

        class C(self.base_cls):
            name = Column(String)
            ref = Column(Integer)

        session = self.init()
        session.add_all([
            A(name='a1', b_list=[B(name='b1', ref=-1),
                                 B(name='b2', ref=2)]),
            A(name='a2', b_list=[B(name='b3', ref=-3),
                                 B(name='b4', ref=4)]),
            C(name='c1', ref=1),
            C(name='c2', ref=-1),
            C(name='c3', ref=2),
            C(name='c4', ref=-2),
            C(name='c5', ref=3),
            C(name='c6', ref=-3),
            C(name='c7', ref=4),
            C(name='c8', ref=-4),
        ])
        session.commit()

        sq1 = (RelativeCollectionSubQuery.from_relation(A.b_list).order_by(
            B.name.asc()))
        sq2 = (RelativeCollectionSubQuery(bind(func.abs(B.ref), sq1),
                                          func.abs(C.ref)).order_by(
                                              C.name.asc()))
        query = (ConstructQuery({
            'a_name': A.name,
            'c_names': map_(map_(C.name, sq2), sq1),
        }).order_by(A.name.asc()).with_session(session.registry()))
        self.assertEqual(
            [dict(obj) for obj in query.all()],
            [
                {
                    'a_name': 'a1',
                    'c_names': [['c1', 'c2'], ['c3', 'c4']]
                },
                {
                    'a_name': 'a2',
                    'c_names': [['c5', 'c6'], ['c7', 'c8']]
                },
            ],
        )
Example #17
0
    def from_fits_object(cls, fits_object, equivalency_threshold = 0.0001):
        session = object_session(fits_object)
        header = fits_object.fits_data[0].header
        filter1 = GMOSFilter.from_keyword(header['filter1'], session)
        filter2 = GMOSFilter.from_keyword(header['filter2'], session)

        grating = GMOSGrating.from_keyword(header['grating'], session)


        grating_central_wavelength = header['centwave']
        grating_slit_wavelength = header['grwlen']

        grating_tilt = header['grtilt']
        grating_order = header['grorder']

        instrument = Instrument.from_fits_object(fits_object)

        instrument_setup_object = session.query(cls).filter(
            cls.filter1_id==filter1.id, cls.filter2_id==filter2.id,
            cls.grating_id==grating.id, cls.instrument_id==instrument.id,
            (func.abs(cls.grating_central_wavelength_value - grating_central_wavelength)
                                                    / grating_central_wavelength) < equivalency_threshold,
            (func.abs(cls.grating_slit_wavelength_value - grating_slit_wavelength)
                                                    / grating_slit_wavelength) < equivalency_threshold,
            (func.abs(cls.grating_tilt_value - grating_tilt)
                                                    / grating_tilt) < equivalency_threshold).all()


        if instrument_setup_object == []:

            instrument_setup_object = cls(filter1.id, filter2.id, grating.id, grating_central_wavelength,
                                          grating_slit_wavelength, grating_tilt, grating_order, instrument.id)

            session.add(instrument_setup_object)
            session.commit()

            for fits_extension_id in xrange(1, len(fits_object.fits_data)):
                current_detector_id = GMOSDetector.from_fits_object(fits_object, fits_extension_id).id
                session.add(GMOSMOSInstrumentSetup2Detector(instrument_setup_id=instrument_setup_object.id,
                                                            fits_extension_id=fits_extension_id,
                                                            detector_id=current_detector_id))
            session.commit()



            return instrument_setup_object

        elif len(instrument_setup_object) == 1:
            return instrument_setup_object[0]

        else:
            raise ValueError('More than one Instrument setup with the same setup found: %s' % instrument_setup_object)
Example #18
0
 def match_all_from(self, exp):
     m_alias = aliased(Mtab)
     for row in self.session.query(Mtab, m_alias).\
         filter(Mtab.exp.has(name=exp)).\
         filter(Mtab.withMS2 >= withms2_min(self.config)).\
         join((m_alias, and_(Mtab.id != m_alias.id, Mtab.exp_id != m_alias.exp_id))).\
         filter(m_alias.withMS2 >= withms2_min(self.config)).\
         filter(func.abs(Mtab.rt - m_alias.rt) <= self.config[RT_DIFF]).\
         filter(func.abs(1e6 * (Mtab.mz - m_alias.mz) / m_alias.mz) <= self.config[PPM_DIFF]).\
         join((Exp, Exp.id==m_alias.exp_id)).\
         order_by(Mtab.mz, Exp.name).\
         all():
         yield row
Example #19
0
 def match_all_from(self,exp):
     m_alias = aliased(Mtab)
     for row in self.session.query(Mtab, m_alias).\
         filter(Mtab.exp.has(name=exp)).\
         filter(Mtab.withMS2 >= withms2_min(self.config)).\
         join((m_alias, and_(Mtab.id != m_alias.id, Mtab.exp_id != m_alias.exp_id))).\
         filter(m_alias.withMS2 >= withms2_min(self.config)).\
         filter(func.abs(Mtab.rt - m_alias.rt) <= self.config[RT_DIFF]).\
         filter(func.abs(1e6 * (Mtab.mz - m_alias.mz) / m_alias.mz) <= self.config[PPM_DIFF]).\
         join((Exp, Exp.id==m_alias.exp_id)).\
         order_by(Mtab.mz, Exp.name).\
         all():
         yield row
    def band(cls):

        return case([
            (func.abs(func.timestampdiff(text('SECOND'), jcmt.COMMON.wvmdatst, jcmt.COMMON.date_obs)) > _TIMESTAMP_OFFSET_ALLOWANCE,
                 'unknown'),
            (func.abs(func.timestampdiff(text('SECOND'), jcmt.COMMON.wvmdaten, jcmt.COMMON.date_end)) > _TIMESTAMP_OFFSET_ALLOWANCE,
                 'unknown'),
            ((jcmt.COMMON.wvmtauen + jcmt.COMMON.wvmtaust)/2.0 < 0.05, 1),
            ((jcmt.COMMON.wvmtauen + jcmt.COMMON.wvmtaust)/2.0 < 0.08, 2),
            ((jcmt.COMMON.wvmtauen + jcmt.COMMON.wvmtaust)/2.0 < 0.12, 3),
            ((jcmt.COMMON.wvmtauen + jcmt.COMMON.wvmtaust)/2.0 < 0.20, 4),
            ((jcmt.COMMON.wvmtauen + jcmt.COMMON.wvmtaust)/2.0 < 10, 5),
            ],
            else_='unknown')
Example #21
0
    def test_bound_to_query_reference(self):

        class A(self.base_cls):
            name = Column(String)
            b_list = relationship('B')

        class B(self.base_cls):
            name = Column(String)
            a_id = Column(Integer, ForeignKey('a.id'))
            ref = Column(Integer)

        class C(self.base_cls):
            name = Column(String)
            ref = Column(Integer)

        session = self.init()
        session.add_all([
            A(name='a1', b_list=[B(name='b1', ref=-1), B(name='b2', ref=2)]),
            A(name='a2', b_list=[B(name='b3', ref=-3), B(name='b4', ref=4)]),
            C(name='c1', ref=1), C(name='c2', ref=-1),
            C(name='c3', ref=2), C(name='c4', ref=-2),
            C(name='c5', ref=3), C(name='c6', ref=-3),
            C(name='c7', ref=4), C(name='c8', ref=-4),
        ])
        session.commit()

        sq1 = (
            RelativeCollectionSubQuery.from_relation(A.b_list)
            .order_by(B.name.asc())
        )
        sq2 = (
            RelativeCollectionSubQuery(bind(func.abs(B.ref), sq1), func.abs(C.ref))
            .order_by(C.name.asc())
        )
        query = (
            ConstructQuery({
                'a_name': A.name,
                'c_names': map_(map_(C.name, sq2), sq1),
            })
            .order_by(A.name.asc())
            .with_session(session.registry())
        )
        self.assertEqual(
            [dict(obj) for obj in query.all()],
            [
                {'a_name': 'a1', 'c_names': [['c1', 'c2'], ['c3', 'c4']]},
                {'a_name': 'a2', 'c_names': [['c5', 'c6'], ['c7', 'c8']]},
            ],
        )
Example #22
0
    def from_fits_file(cls, fits_file, session,
                       tilt_equivalency_threshold=0.001,
                       wavelength_equivalency_threshold=0.0001):
        header = fits.getheader(fits_file)

        filter1 = GMOSFilter.from_keyword(header['filter1'], session)
        filter2 = GMOSFilter.from_keyword(header['filter2'], session)

        grating = GMOSGrating.from_keyword(header['grating'], session)
        instrument = Instrument.from_keyword(header['instrume'], session)

        grating_central_wavelength = header['centwave']
        grating_slit_wavelength = header['grwlen']

        grating_tilt = header['grtilt']
        grating_order = header['grorder']


        #Checking if the same instrument setup already exists
        instrument_setup_query = session.query(cls).filter(
            cls.filter1_id==filter1.id, cls.filter2_id==filter2.id,
            cls.grating_id==grating.id, cls.instrument_id==instrument.id,
            func.abs(cls.grating_central_wavelength_value -
                     grating_central_wavelength)
            < wavelength_equivalency_threshold,
            func.abs(cls.grating_slit_wavelength_value -
                      grating_slit_wavelength) <
            wavelength_equivalency_threshold,
            func.abs(cls.grating_tilt_value -
                      grating_tilt) < tilt_equivalency_threshold)

        instrument_setup_object = instrument_setup_query.first()

        if instrument_setup_object is None:
            instrument_setup_object = cls(
                filter1_id=filter1.id,
                filter2_id=filter2.id,
                grating_id=grating.id,
                grating_central_wavelength_value=grating_central_wavelength,
                grating_slit_wavelength_value=grating_slit_wavelength,
                grating_tilt_value=grating_tilt,
                grating_order=grating_order,
                instrument_id=instrument.id)

            session.add(instrument_setup_object)
            session.commit()

        return instrument_setup_object
    def estimate_trends(self):
        '''
        After assigning peak group features, impute the global
        trend for peak and scan shapes
        '''
        session = self.manager.session()
        hypothesis_sample_match_id = self.hypothesis_sample_match_id
        logger.info("Estimating peak trends")

        conn = session.connection()

        cen_alpha, cen_beta = centroid_scan_error_regression(
            session, source_model=JointPeakGroupMatch, filter_fn=lambda q: q.filter(
                JointPeakGroupMatch.hypothesis_sample_match_id == hypothesis_sample_match_id))

        expected_a_alpha, expected_a_beta = expected_a_peak_regression(
            session, source_model=JointPeakGroupMatch, filter_fn=lambda q: q.filter(
                JointPeakGroupMatch.hypothesis_sample_match_id == hypothesis_sample_match_id))

        update_expr = T_JointPeakGroupMatch.update().values(
            centroid_scan_error=func.abs(
                T_JointPeakGroupMatch.c.centroid_scan_estimate - (
                    cen_alpha + cen_beta * T_JointPeakGroupMatch.c.weighted_monoisotopic_mass)),
            a_peak_intensity_error=func.abs(
                T_JointPeakGroupMatch.c.average_a_to_a_plus_2_ratio - (
                    expected_a_alpha + expected_a_beta * T_JointPeakGroupMatch.c.weighted_monoisotopic_mass))
                ).where(
            T_JointPeakGroupMatch.c.hypothesis_sample_match_id == hypothesis_sample_match_id)

        max_weight = conn.execute(select([func.max(T_JointPeakGroupMatch.c.weighted_monoisotopic_mass)])).scalar()
        slices = [0] + [max_weight * float(i)/10. for i in range(1, 11)]
        for i in range(1, len(slices)):
            lower = slices[i - 1]
            upper = slices[i]
            logger.info("Updating slice %f-%f", lower, upper)
            step = update_expr.where(
                T_JointPeakGroupMatch.c.weighted_monoisotopic_mass.between(
                    lower, upper))
            conn.execute(step)
            session.commit()
            conn = session.connection()

        conn = session.connection()
        lower = slices[len(slices) - 1]
        step = update_expr.where(
            T_JointPeakGroupMatch.c.centroid_scan_error == None)
        conn.execute(step)
        session.commit()
Example #24
0
    def great_circle_distance(cls, p, l):

        radius = 6371  # Radius of earth

        p1 = cls.latitude
        l1 = cls.longitude

        print(p1, l1)

        p2 = p

        l2 = l

        dl = func.radians(func.abs(l1 - l2))

        p1 = func.radians(p1)
        p2 = func.radians(p2)
        l1 = func.radians(l1)
        l2 = func.radians(l2)

        ds = func.acos((func.sin(p1) * func.sin(p2)) +
                       (func.cos(p1) * func.cos(p2) * func.cos(dl)))

        dist = radius * ds

        return dist
Example #25
0
    def get_intervals(ticker: Ticker, attribute_name: str, min_delta: float):
        """Получить список минимальных интервалов, когда цена на акцию изменилась более чем на min_delta.

        :param ticker: акция
        :param attribute_name: тип цены (open/high/low/close)
        :param min_delta: минимальное изменение цены акции
        """
        historical_price_from = aliased(HistoricalPrice,
                                        name='historical_price_from')
        historical_price_to = aliased(HistoricalPrice,
                                      name='historical_price_to')

        # подзапрос, получающий все интервалы, с дельтой больше либо равной заданной
        intervals_with_delta = session.query(
            historical_price_from.date.label('date_from'),
            historical_price_to.date.label('date_to'),
            (getattr(historical_price_to, attribute_name) -
             getattr(historical_price_from, attribute_name)).label('delta'),
            (historical_price_to.date -
             historical_price_from.date).label('duration'),
            func.min(historical_price_to.date -
                     historical_price_from.date).over().label('min_duration'),
        ).filter(
            historical_price_from.ticker_id == ticker.id,
            historical_price_to.ticker_id == ticker.id,
            historical_price_from.ticker_id == historical_price_to.ticker_id,
            historical_price_from.date < historical_price_to.date,
            func.abs(
                getattr(historical_price_from, attribute_name) -
                getattr(historical_price_to, attribute_name)) > min_delta,
        ).order_by(historical_price_from.date)

        return session.query(
            intervals_with_delta.subquery('intervals_with_delta')).filter(
                text('duration = min_duration')).all()
Example #26
0
    def get_by_category(cls, value, location=None):
        """
        """

        query = cls.query.join('categories').filter(Category.value == value)

        # if we have a location, sort by nearest to the user
        if location is not None:
            query = query.order_by(
                asc(
                    func.abs(Place.latitude - location.latitude) +
                    func.abs(Place.longitude - location.longitude)))
        else:  # otherwise sort by title
            query.order_by(Place.title)

        return query.all()
Example #27
0
def index():
    today = datetime.date.today()

    # Latest talk query

    subquery = db.session.query(
        tables.Event.city_id,
        func.max(tables.Event.date).label('latest_date'),
    )
    subquery = subquery.group_by(tables.Event.city_id)
    subquery = subquery.subquery()

    query = db.session.query(tables.Event)
    query = query.join(
        subquery,
        and_(
            subquery.c.latest_date == tables.Event.date,
            subquery.c.city_id == tables.Event.city_id,
        ))
    # order: upcoming first, then by distance from today
    jd = func.julianday
    query = query.order_by(
        jd(subquery.c.latest_date) < jd(today),
        func.abs(jd(subquery.c.latest_date) - jd(today)))
    query = query.options(joinedload(tables.Event.city))
    query = query.options(joinedload(tables.Event.venue))
    latest_events = query.all()

    # Video query

    query = db.session.query(tables.TalkLink)
    query = query.filter(
        or_(
            tables.TalkLink.url.startswith('http://www.youtube.com'),
            tables.TalkLink.url.startswith('https://www.youtube.com'),
        ))
    query = query.join(tables.TalkLink.talk)
    query = query.join(tables.Talk.event)
    query = query.options(joinedload(tables.TalkLink.talk))
    query = query.options(joinedload(tables.TalkLink.talk, 'event'))
    query = query.options(joinedload(tables.TalkLink.talk, 'event', 'city'))
    query = query.options(subqueryload(tables.TalkLink.talk, 'talk_speakers'))
    query = query.options(
        joinedload(tables.TalkLink.talk, 'talk_speakers', 'speaker'))
    query = query.order_by(desc(tables.Event.date), tables.Talk.index)
    videos = []
    for link in query[:12]:
        if link.youtube_id:
            videos.append(link)

    calendar = get_calendar(db.session,
                            first_year=today.year,
                            first_month=today.month - 1,
                            num_months=3)

    return render_template('index.html',
                           latest_events=latest_events,
                           today=today,
                           videos=videos,
                           calendar=calendar)
Example #28
0
    def get_flow(botid,
                 target_type,
                 target_account,
                 date_from,
                 date_to,
                 member=None):
        if member is None:
            member_id = None
        elif not member.isdigit():
            record = ScoreRecord.find_first_by_member_name(member)
            member_id = record.member_id
        else:
            member_id = member

        accounts = ScoreAccount.find_by_target(botid, target_type,
                                               target_account)

        return ScoreRecord.query.session.query(
            func.sum(func.abs(ScoreRecord.amount)).label('total')).filter(
                ScoreRecord.account.in_(
                    (r.name
                     for r in accounts)) if len(accounts) > 0 else 1 == 1,
                ScoreRecord.date >= date_from, ScoreRecord.date <= date_to,
                ScoreRecord.member_id == member_id
                if member_id is not None else 1 == 1).first()
Example #29
0
    def get_temperature_at(self, timestamp, unit='C'):
        query = self.session.query(Temperature)
        query = query.order_by(func.abs(Temperature.timestamp - timestamp))

        data = query.first().get_data(unit)
        data['unit'] = unit
        return data
 def radius(cls):
     """之前的length, contains之所以能够直接使用filter进行筛选, 是因为计算
     其值的操作都仅仅用到了原生的加减乘除和逻辑运算。如果我们需要更复杂一点
     的运算, 例如求绝对值, 那么我们就需要使用 ``hybrid_property.expression()``
     modifier来对针对该项的查询语句进行定义。
     """
     return func.abs(cls.length) / 2
Example #31
0
    def _find_transactions(self,
                           account_types,
                           description="",
                           account_name=""):

        app.logger.debug(
            "_find_transaction - account_types_set: %s, description: %s, account_name: %s",
            account_types, description, account_name)

        transactions = self.book.session.query(Transaction.guid,
                                               Transaction.description,
                                               Transaction.enter_date.label("date"),
                                               func.abs(Split.value).label("amount"),
                                               Account.name.label("account_name"),
                                               Account.type.label("account_type")) \
            .join(Split, Split.transaction_guid == Transaction.guid) \
            .join(Account, Account.guid == Split.account_guid) \
            .filter(Account.type.in_(account_types))

        if account_name:
            transactions = transactions.filter(Account.name == account_name)

        if description:
            transactions = transactions.filter(
                Transaction.description.like('%{}%'.format(description)))

        transactions = transactions.order_by(Transaction.enter_date).all()

        app.logger.debug('Transactions: %s',
                         [t._asdict() for t in transactions])

        return transactions
Example #32
0
    def tags(self):
        dbFacade = self.dbFacade()
        model = dbFacade.model

        conditions = self._get_base_conditions()
        if conditions is not None:
            conditions.append(model.BalanceChange.uid==model.ChangeTag.change_uid)

            tags = dbFacade.db.execute(select([dbFacade.model.ChangeTag.tag, func.abs(func.coalesce(func.sum(dbFacade.model.BalanceChange.amount))).label('sum')],
                and_(*conditions),
                group_by=[dbFacade.model.ChangeTag.tag], 
                order_by=[dbFacade.model.ChangeTag.tag])).fetchall()

            ignore = {}
            if isinstance(request.params.getall('tag'), list):
                for tag in request.params.getall('tag'):
                    ignore[tag] = True
       
            c.sets = [{
                    'name': tag.tag,
                    'value': tag.sum,
                    'link': 'javascript:YAHOO.balances.reports.filterTags(&apos;%(name)s&apos;);' % { 'name': tag.tag },
                } for tag in tags if tag.tag not in ignore]
        
        response.headers['Content-Type'] = 'text/xml; charset=utf-8'
        return render_jinja2('reports/pie-xml.jinja')
Example #33
0
def movie_detailed_info():
    movie = request.args.get('movie_name')
    if not movie:
        return jsonify({"msg": "Please provide movie_name"})

    movies_detailed_info = []
    today_date = datetime.datetime.now().date()
    no = get_records_count()

    try:
        query = db.session.query(
                    func.max(Cinemalevel.movie_name).label("movie"),
                    func.max(Cinemalevel.crawl_hour).label("crawl_hour"),
                    func.max(Cinemalevel.crawl_date).label("crawl_date"),
                    func.avg(Cinemalevel.percent_occupancy).label('percent_occupancy'),
                    func.sum(Cinemalevel.category_occupied_seats).label('tickets_sold'),
                    func.abs(func.sum((Cinemalevel.category_occupied_seats)*(Cinemalevel.category_price))/func.sum(Cinemalevel.category_occupied_seats)).label("avg_price"),
                    func.count(func.distinct(Cinemalevel.show_datetime)).label("shows"))\
                .filter_by(movie_name=movie, show_date=today_date)\
                .group_by(Cinemalevel.movie_name, Cinemalevel.crawl_date, Cinemalevel.crawl_hour)\
                .order_by(Cinemalevel.crawl_date.desc(), Cinemalevel.crawl_hour.desc())\
                .limit(no).all()
        movies_detailed_info = [each._asdict() for each in query]
        # remove the latest crawl as it may be still running, provide one hour ago data
        movies_detailed_info = movies_detailed_info[1:]

    except Exception as err_msg:
        print(err_msg)

    return jsonify({'data': movies_detailed_info, "movie_name": movie})
Example #34
0
 def get_closest(number, days_ago, numbers_count):
     date_from = (datetime.datetime.now() -
                  datetime.timedelta(days=days_ago)).strftime('%y-%m-%d')
     rows = session.query(Result).filter(
         Result.ins_date > date_from).order_by(
             func.abs(Result.number - number)).limit(
                 (days_ago + 1) * numbers_count * 2).all()
     return rows
 def get_cst_subquery_baked(self, main_query):
     subquery = main_query.session.query(Case) \
         .filter(Case.id.op('IN')(bindparam('case_id_array'))) \
         .subquery()
     return main_query.join(subquery, literal(True)) \
         .filter(func.ST_DWithin(Case.location, subquery.c.location, self.dycast_parameters.close_in_space),
                 func.abs(Case.report_date - subquery.c.report_date) <= self.dycast_parameters.close_in_time,
                 Case.id < subquery.c.id)
    def get_close_space_and_time(self, cases_in_cluster_query):
        subquery = cases_in_cluster_query.subquery()
        query = cases_in_cluster_query.join(subquery, literal(True)) \
            .filter(func.ST_DWithin(Case.location, subquery.c.location, self.dycast_parameters.close_in_space),
                    func.abs(Case.report_date - subquery.c.report_date) <= self.dycast_parameters.close_in_time,
                    Case.id < subquery.c.id)

        return database_service.get_count_for_query(query)
Example #37
0
    def reddit_score(self):
        s = self.upvotes - self.downvotes
        order = func.log(10, func.greatest(func.abs(s), 1))
        sign = func.sign(s)
        seconds = func.date_part('epoch', self.timestamp) - 1134028003

        return func.round(func.cast(sign * order + seconds / 45000, Numeric),
                          7)
Example #38
0
    def _get_bf_config_id(self, metadata):
        bf_params = dict(
            centre_frequency=metadata['centre_frequency'],
            bandwidth=metadata['bandwidth'],
            incoherent_nchans=metadata['incoherent_nchans'],
            incoherent_tsamp=metadata['incoherent_tsamp'],
            incoherent_antennas=metadata.get('incoherent_antennas', ''),
            coherent_nchans=metadata['coherent_nchans'],
            coherent_tsamp=metadata['coherent_tsamp'],
            coherent_antennas=metadata.get('coherent_antennas', ''),
            configuration_authority=metadata.get('configuration_authority',
                                                 ''),
            receiver=metadata.get('receiver', ''),
            metainfo=metadata.get('metadata', ''))
        with self.session() as session:
            bf_config_id = session.query(BeamformerConfiguration.id).filter(
                BeamformerConfiguration.centre_frequency ==
                bf_params['centre_frequency'],
                BeamformerConfiguration.bandwidth == bf_params['bandwidth'],
                BeamformerConfiguration.incoherent_nchans ==
                bf_params['incoherent_nchans'],
                func.abs(BeamformerConfiguration.incoherent_tsamp -
                         bf_params['incoherent_tsamp']) < FLOAT_DIFF,
                BeamformerConfiguration.incoherent_antennas.ilike(
                    bf_params['incoherent_antennas']),
                BeamformerConfiguration.coherent_nchans ==
                bf_params['coherent_nchans'],
                func.abs(BeamformerConfiguration.coherent_tsamp -
                         bf_params['coherent_tsamp']) < FLOAT_DIFF,
                BeamformerConfiguration.coherent_antennas.ilike(
                    bf_params['coherent_antennas']),
                BeamformerConfiguration.configuration_authority.ilike(
                    bf_params['configuration_authority']),
                BeamformerConfiguration.receiver.ilike(bf_params['receiver']),
                BeamformerConfiguration.metainfo.ilike(
                    bf_params['metainfo'])).scalar()

            if not bf_config_id:
                bf_config = BeamformerConfiguration(**bf_params)
                session.add(bf_config)
                session.flush()
                bf_config_id = bf_config.id
        with self.session() as session:
            bf_config = session.query(BeamformerConfiguration).get(
                bf_config_id)
        return bf_config_id, bf_config
Example #39
0
    def get_nearest_close_in_time_distribution_margin_query(self, session, cluster):

        return session.query(DistributionMargin.close_time.label('nearest_close_time')) \
            .filter(
            DistributionMargin.number_of_cases == cluster.case_count,
            DistributionMargin.close_in_space_and_time == cluster.close_space_and_time) \
            .order_by(func.abs(DistributionMargin.close_time - cluster.close_in_time)) \
            .limit(1) \
            .as_scalar()
Example #40
0
 def get_by_category(cls, value, location=None):
     """
     """
     
     query = cls.query.join('categories').filter(Category.value==value)
     
     # if we have a location, sort by nearest to the user
     if location is not None:
         query = query.order_by(
             asc(
                 func.abs(Place.latitude - location.latitude) +
                 func.abs(Place.longitude - location.longitude)
             )
         )
     else: # otherwise sort by title
         query.order_by(Place.title)
     
     return query.all()
Example #41
0
    def get_last_identifier(self, sample=None):
        with self.session_ctx() as sess:
            q = sess.query(IrradiationPositionTbl)
            if sample:
                q = q.join(SampleTbl)
                q = q.filter(SampleTbl.name == sample)

            q = q.order_by(func.abs(IrradiationPositionTbl.identifier).desc())
            return self._query_first(q)
Example #42
0
    def get_last_identifier(self, sample=None):
        with self.session_ctx() as sess:
            q = sess.query(IrradiationPositionTbl)
            if sample:
                q = q.join(SampleTbl)
                q = q.filter(SampleTbl.name == sample)

            q = q.order_by(func.abs(IrradiationPositionTbl.identifier).desc())
            return self._query_first(q)
    def get_close_space_and_time_query(self, session):
        case_query = session.query(Case.id)
        subquery = session.query(Case).subquery()

        return case_query.join(subquery, literal(True)) \
            .filter(func.ST_DWithin(Case.location, subquery.c.location, self.dycast_parameters.close_in_space),
                    func.abs(Case.report_date - subquery.c.report_date) <= self.dycast_parameters.close_in_time,
                    Case.id < subquery.c.id) \
            .subquery()
Example #44
0
 def approx_equal(attribute, value, relative_tolerance=1e-6):
     """Return a column expression specifying that ``attribute`` and
     ``value`` are equal within a specified relative tolerance.
     Treat the case when value == 0 specially: require exact equality.
     """
     if value == 0.0:
         return attribute == 0.0
     else:
         return (func.abs((attribute - value) / attribute) <
                 relative_tolerance)
Example #45
0
    def get_close_space_and_time_new(self, session, cluster):
        cases_in_cluster = session.query(Case).filter(Case.id.in_(cluster.case_id_array))
        subquery = cases_in_cluster.subquery()

        close_in_space_and_time = cases_in_cluster.join(subquery, literal(True)) \
            .filter(func.ST_DWithin(Case.location, subquery.c.location, self.dycast_parameters.close_in_space),
                    func.abs(Case.report_date - subquery.c.report_date) <= self.dycast_parameters.close_in_time,
                    Case.id < subquery.c.id)

        return database_service.get_count_for_query(close_in_space_and_time)
Example #46
0
def transferred_amount(from_account, to_account, begin_date=None, end_date=None):
    """
    Determine how much has been transferred from one account to another in a
    given interval.

    A negative value indicates that more has been transferred from to_account
    to from_account than the other way round.

    The interval boundaries may be None, which indicates no lower and upper
    bound respectively.
    :param FinanceAccount from_account:
    :param FinanceAccount to_account:
    :param date|None begin_date: since when (inclusive)
    :param date|None end_date: till when (inclusive)
    :rtype: int
    """
    split1 = aliased(Split)
    split2 = aliased(Split)
    query = session.session.query(
        cast(func.sum(
            sign(split2.amount) *
            least(func.abs(split1.amount), func.abs(split2.amount))
        ), Integer)
    ).select_from(
        split1
    ).join(
        (split2, split1.transaction_id == split2.transaction_id)
    ).join(
        Transaction, split2.transaction_id == Transaction.id
    ).filter(
        split1.account == from_account,
        split2.account == to_account,
        sign(split1.amount) != sign(split2.amount)
    )
    if begin_date is not None and end_date is not None:
        query = query.filter(
            between(Transaction.valid_date, begin_date, end_date)
        )
    elif begin_date is not None:
        query = query.filter(Transaction.valid_date >= begin_date)
    elif end_date is not None:
        query = query.filter(Transaction.valid_date <= end_date)
    return query.scalar()
Example #47
0
def query(table, api_obj, stream=False):
    vars_and_vals = api_obj.vars_and_vals
    values = api_obj.values
    exclude = api_obj.exclude

    filters = process_value_filters(table, vars_and_vals, api_obj)
    filters += where_filters(table, api_obj.where)
    filters += sumlevel_filtering(table, api_obj)

    if values:
        pk = [col for col in table.__table__.columns if col.primary_key and col.key not in values]
        cols = pk + [getattr(table, col_name) for col_name in values]
    else:
        cols = get_columns(table)

    if exclude:
        cols = [col for col in cols
                if (isinstance(col, basestring) and col not in exclude) or col.key not in exclude]

    # qry = table.query.with_entities(*cols)
    qry = table.query

    if hasattr(table, "crosswalk_join"):
        qry = table.crosswalk_join(qry)

    if stream or api_obj.display_names:
        qry, cols = use_attr_names(table, qry, cols)
    qry = qry.with_entities(*cols)

    if hasattr(table, "JOINED_FILTER"):
        qry, filters = handle_join(qry, filters, table, api_obj)

    qry = qry.filter(*filters)

    if api_obj.order:
        sort = desc if api_obj.sort == "desc" else asc
        if api_obj.order not in TableManager.possible_variables:
            if api_obj.order == 'abs(pct_change)':
                pass  # allow this
            else:
                raise DataUSAException("Bad order parameter", api_obj.order)
        # sort_stmt = text("{} {} NULLS LAST".format(api_obj.order, sort))
        if api_obj.order == 'abs(pct_change)':
            target_col = func.abs(table.pct_change)
        else:
            target_col = getattr(table, api_obj.order)

        qry = qry.order_by(sort(target_col).nullslast())
    if api_obj.limit:
        qry = qry.limit(api_obj.limit)

    if stream:
        return stream_format(table, cols, qry, api_obj)

    return simple_format(table, cols, qry, api_obj)
Example #48
0
def transferred_amount(from_account, to_account, when=UnboundedInterval):
    """
    Determine how much has been transferred from one account to another in a
    given interval.

    A negative value indicates that more has been transferred from to_account
    to from_account than the other way round.

    The interval boundaries may be None, which indicates no lower and upper
    bound respectively.
    :param Account from_account: source account
    :param Account to_account: destination account
    :param Interval[date] when: Interval in which transactions became valid
    :rtype: int
    """
    split1 = aliased(Split)
    split2 = aliased(Split)
    query = session.session.query(
        cast(func.sum(
            sign(split2.amount) *
            least(func.abs(split1.amount), func.abs(split2.amount))
        ), Money)
    ).select_from(
        split1
    ).join(
        (split2, split1.transaction_id == split2.transaction_id)
    ).join(
        Transaction, split2.transaction_id == Transaction.id
    ).filter(
        split1.account == from_account,
        split2.account == to_account,
        sign(split1.amount) != sign(split2.amount)
    )
    if not when.unbounded:
        query = query.filter(
            between(Transaction.valid_on, when.begin, when.end)
        )
    elif when.begin is not None:
        query = query.filter(Transaction.valid_on >= when.begin)
    elif when.end is not None:
        query = query.filter(Transaction.valid_on <= when.end)
    return query.scalar()
    def estimate_trends(self):
        '''
        After assigning peak group features, impute the global
        trend for peak and scan shapes
        '''
        logger.info("Estimating peak trends")
        session = self.manager.session()
        engine = self.manager.connect()

        conn = engine.connect()
        cen_alpha, cen_beta = centroid_scan_error_regression(session)
        expected_a_alpha, expected_a_beta = expected_a_peak_regression(session)

        update_expr = TDecon2LSPeakGroup.update().values(
            centroid_scan_error=func.abs(
                TDecon2LSPeakGroup.c.centroid_scan_estimate - (
                    cen_alpha + cen_beta * TDecon2LSPeakGroup.c.weighted_monoisotopic_mass)),
            a_peak_intensity_error=func.abs(
                TDecon2LSPeakGroup.c.average_a_to_a_plus_2_ratio - (
                    expected_a_alpha + expected_a_beta * TDecon2LSPeakGroup.c.weighted_monoisotopic_mass))
                )
        max_weight = conn.execute(select([func.max(TDecon2LSPeakGroup.c.weighted_monoisotopic_mass)])).scalar()
        slices = [0] + [max_weight * float(i)/10. for i in range(1, 11)]
        for i in range(1, len(slices)):
            transaction = conn.begin()
            lower = slices[i - 1]
            upper = slices[i]
            logger.info("Updating slice %f-%f", lower, upper)
            step = update_expr.where(
                TDecon2LSPeakGroup.c.weighted_monoisotopic_mass.between(
                    lower, upper))
            conn.execute(step)
            transaction.commit()

        transaction = conn.begin()
        lower = slices[len(slices) - 1]
        step = update_expr.where(
            TDecon2LSPeakGroup.c.weighted_monoisotopic_mass >= lower)
        conn.execute(step)
        transaction.commit()

        conn.close()
Example #50
0
    def _get_default_order(self):
        order = super(ModelView, self)._get_default_order()

        if order is not None:

            attr, joins = tools.get_field_with_path(self.model, order['field'])
            if order['absolute_value']:
                attr = func.abs(attr)
            return attr, joins, order['sort_desc']

        return None
Example #51
0
    def link_science_frames(self):

        science_frames = self.session.query(GMOSMOSRawFITS).join(ObservationType).join(ObservationClass)\
            .filter(ObservationClass.name=='science', ObservationType.name=='object').all()
        for science_frame in science_frames:
            flat = self.session.query(GMOSMOSRawFITS)\
                .join(ObservationType).filter(ObservationType.name=='flat',
                                              GMOSMOSRawFITS.mask_id==science_frame.mask_id,
                                              GMOSMOSRawFITS.observation_block_id==science_frame.observation_block_id)\
                .order_by(func.abs(GMOSMOSRawFITS.mjd - science_frame.mjd)).first()

            mask_arc = self.session.query(GMOSMOSRawFITS)\
                .join(ObservationType).join(ObservationClass)\
                .filter(ObservationType.name=='arc', GMOSMOSRawFITS.mask_id==science_frame.mask_id,
                        GMOSMOSRawFITS.instrument_setup_id==science_frame.instrument_setup_id)\
                .order_by(func.abs(GMOSMOSRawFITS.mjd - science_frame.mjd)).first()

            self.session.add(GMOSMOSScience(id=science_frame.id, flat_id=flat.id, mask_arc_id=mask_arc.id))
            logger.info('Link Science Frame %s with:\nMask Arc: %s\nFlat: %s\n', science_frame, flat, mask_arc)
        self.session.commit()
Example #52
0
def index():
    today = datetime.date.today()

    # Latest talk query

    subquery = db.session.query(
        tables.Event.series_slug,
        func.max(tables.Event.date).label('latest_date'),
    )
    subquery = subquery.group_by(tables.Event.series_slug)
    subquery = subquery.subquery()

    query = db.session.query(tables.Event)
    query = query.join(subquery,
                       and_(subquery.c.latest_date == tables.Event.date,
                            subquery.c.series_slug == tables.Event.series_slug,
                            ))
    query = query.filter(tables.Event.series_slug.in_(FEATURED_SERIES))
    # order: upcoming first, then by distance from today
    jd = func.julianday
    query = query.order_by(jd(subquery.c.latest_date) < jd(today),
                           func.abs(jd(subquery.c.latest_date) - jd(today)))
    query = query.options(joinedload(tables.Event.series))
    query = query.options(joinedload(tables.Event.venue))
    featured_events = query.all()

    # Video query

    query = db.session.query(tables.TalkLink)
    query = query.filter(or_(
        tables.TalkLink.url.startswith('http://www.youtube.com'),
        tables.TalkLink.url.startswith('https://www.youtube.com'),
    ))
    query = query.join(tables.TalkLink.talk)
    query = query.join(tables.Talk.event)
    query = query.options(joinedload(tables.TalkLink.talk))
    query = query.options(joinedload(tables.TalkLink.talk, 'event'))
    query = query.options(joinedload(tables.TalkLink.talk, 'event', 'series'))
    query = query.options(subqueryload(tables.TalkLink.talk, 'talk_speakers'))
    query = query.options(joinedload(tables.TalkLink.talk, 'talk_speakers', 'speaker'))
    query = query.order_by(desc(tables.Event.date), tables.Talk.index)
    videos = []
    for link in query[:12]:
        if link.youtube_id:
            videos.append(link)

    calendar = get_calendar(db.session, first_year=today.year,
                            series_slugs=FEATURED_SERIES,
                            first_month=today.month - 1, num_months=3)

    return render_template('index.html', featured_events=featured_events,
                           today=today, videos=videos, calendar=calendar)
Example #53
0
def get_next_question():
    distance_to_user_avg_score = func.abs(Question.difficulty - g.user.avg_score)
    question = (Question.query
                .filter(~Question.id.in_(g.user.answered_questions)
                        & (Question.id != g.user.last_question))
                .order_by(distance_to_user_avg_score)
                .first())
    if question is None:
        # user answered every question, just give them a random one that preferably is
        # not the last asked question
        question = Question.query.order_by(Question.id == g.user.last_question,
                                           func.random()).first()
    return question
Example #54
0
 def get_last_identifiers(self, sample=None, limit=1000, excludes=None):
     with self.session_ctx() as sess:
         q = sess.query(IrradiationPositionTbl)
         if sample:
             q = q.join(SampleTbl)
             q = q.filter(SampleTbl.name == sample)
             if excludes:
                 q = q.filter(not_(SampleTbl.name.in_(excludes)))
         elif excludes:
             q = q.join(SampleTbl)
             q = q.filter(not_(SampleTbl.name.in_(excludes)))
         q = q.filter(IrradiationPositionTbl.identifier.isnot(None))
         q = q.order_by(func.abs(IrradiationPositionTbl.identifier).desc())
         q = q.limit(limit)
         return [ni.identifier for ni in self._query_all(q, verbose_query=True)]
Example #55
0
def check_upload (diff):
    """Check upload time of first revision with created time for change.

    For each change, the upload time of the first revision (patchset) is
    matched against the created time for the change. Those changes with
    more than diff mins. of difference are shown.

    Parameters
    ----------

    diff: int
        Minutes of difference considered.

    """

    revs = session.query(label ("daterev",
                                func.min(DB.Revision.date)),
                         label ("change_id",
                                DB.Revision.change_id),
                         label ("number",
                                DB.Change.number)) \
          .filter (DB.Revision.change_id == DB.Change.uid) \
          .group_by("change_id") \
          .subquery()
    res = session.query(
        label ("number",
               revs.c.number),
        label ("created",
               DB.Change.created),
        label ("daterev",
               revs.c.daterev)
        ) \
        .filter(and_(
                func.abs(func.timediff(
                        DB.Change.created,
                        revs.c.daterev) > timedelta (minutes = diff)),
                 DB.Change.uid == revs.c.change_id)) \
       .order_by (func.datediff(DB.Change.created, revs.c.daterev),
                  func.timediff(DB.Change.created, revs.c.daterev))
    messages = res.all()
    for message in messages:
        print "Change " + str(message.number) + ": " + \
            str(message.created - message.daterev) + \
             " -- " + str(message.created) + " (created), " + \
             str(message.daterev) + " (first revision)"
    print "Total changes with discrepancy: " + str (len(messages))
Example #56
0
    def timeline(self):
        dbFacade = self.dbFacade()
        model = dbFacade.model

        conditions = self._get_base_conditions(use_resolution=True)
        if conditions is None:
            return "<graph></graph>"

        resolution = request.params.get('resolution', 'days')
        time_expression = {
            'weeks': cast(func.date_trunc('week', model.BalanceChange.transaction_date), DATE),
            'months': cast(func.date_trunc('month', model.BalanceChange.transaction_date), DATE)
        }.get(resolution, model.BalanceChange.transaction_date)

        timeline = dbFacade.db.execute(select([time_expression.label('time'), func.abs(func.coalesce(func.sum(model.BalanceChange.amount))).label('sum')], 
            and_(*conditions),
            from_obj=[model.balance_changes_table],
            group_by=['time'])).fetchall()

        time2sums = dict([(row.time, row.sum) for row in timeline])

        c.sets = []
        if len(time2sums) > 0:
            (start_date, end_date) = h.get_dates()

            if resolution == 'months':
                for date in months_range(start_date, end_date):
                    show = 1
                    sum = time2sums.get(date, 0)

                    c.sets.append({ 'name': self._timeline_name(date), 'value': sum, 'showName': show})
            elif resolution == 'weeks':
                for date in weeks_range(start_date, end_date):
                    show = 1
                    sum = time2sums.get(date, 0)

                    c.sets.append({ 'name': self._timeline_name(date), 'value': sum, 'showName': show})
            else:
                for date in days_range(start_date, end_date):
                    show = date.weekday() == 0 and 1 or 0
                    sum = time2sums.get(date, 0)
                    
                    c.sets.append({ 'name': self._timeline_name(date), 'value': sum, 'showName': show})

        response.headers['Content-Type'] = 'text/xml; charset=utf-8'
        return render_jinja2('reports/timeline-xml.jinja')
Example #57
0
 def get_approx_position_as_geojson(self, time=datetime.utcnow(),
                                    filter_charging=True):
     positions = []
     if self.id != None:
         query = Position().queryObject().filter(Position.animal_id
                 == self.id)
         if filter_charging:
             query = query.filter(Position.charging == False)
         query = query.filter(func.abs(
                 func.date_part('hour', Position.date - time)) <= 2)
         aux = query.order_by(Position.date.desc()).limit(50)
         for position in aux:
             positions.append(position.geom)
     return self.session.scalar(func.ST_AsGeoJson(
         func.ST_MinimumBoundingCircle(func.ST_Collect(
         array(positions))))) if len(positions) > 1\
         else None
def get_labels(session, score, detection_filters, vehicle_filters,
               model, threshold):
    """Retrieves all possible detection-annotation pairings
       that satify the VOC criterion."""

    overlap_score = overlap(Detection, Vehicle)

    # pylint: disable-msg=E1101
    dist_x = (func.ST_X(func.ST_Transform(Detection.lla, 102718))
              - func.ST_X(func.ST_Transform(Vehicle.lla, 102718))) \
        * 0.3048
    dist_y = (func.ST_Y(func.ST_Transform(Detection.lla, 102718))
              - func.ST_Y(func.ST_Transform(Vehicle.lla, 102718))) \
        * 0.3048
    dist = func.sqrt(dist_x * dist_x + dist_y * dist_y)
    height_diff = func.abs(
        func.ST_Z(Detection.lla) - func.ST_Z(Vehicle.lla))

    labels = session.query(
        overlap_score.label('overlap'),
        Vehicle.id.label('vid'),
        Detection.id.label('did'),
        dist.label('dist'),
        height_diff.label('height_diff'),
        score.label('score')) \
        .select_from(Detection) \
        .join(Photo) \
        .join(Vehicle) \
        .join(Model) \
        .filter(Model.filename == model) \
        .filter(Photo.test == True) \
        .filter(overlap_score > 0.5) \
        .filter(score > threshold)
    # pylint: enable-msg=E1101

    for query_filter in detection_filters:
        labels = labels.filter(query_filter)

    for query_filter in vehicle_filters:
        labels = labels.filter(query_filter)

    labels = labels.order_by(desc(overlap_score)).all()

    return labels
Example #59
0
def acquisitions_by_mapgrid_and_date(date, mapgrid_target, day_buffer):
    from sqlalchemy import Integer, func, Date
    from sqlalchemy.sql.expression import cast

    session = SESSION_MAKER()
    images_paths = (
        session.query(RawProduct.product_path, RapidEyeFootPrintsMexicoOld.code, RapidEyeFootPrintsMexicoOld.mapgrid2)
        .distinct()
        .join(RawProduct.information)
        .filter(
            RawProduct.satellite_id == 1,
            RapidEyeFootPrintsMexicoOld.mapgrid2 == mapgrid_target,
            cast(RapidEyeFootPrintsMexicoOld.code, Integer) == cast(Information.grid_id, Integer),
            func.abs(cast(RawProduct.acquisition_date, Date) - date) < day_buffer,
        )
        .all()
    )
    # RawProduct.sensor_id
    return images_paths
Example #60
0
def list(page_nr=1, search=None):
    if not ModuleAPI.can_read('vacancy'):
        return abort(403)

    # Order the vacancies in such a way that vacancies that are new
    # or almost expired, end up on top.
    order = func.abs(
        (100 * (func.datediff(Vacancy.start_date, func.current_date()) /
                func.datediff(Vacancy.start_date, Vacancy.end_date))) - 50)

    if search is not None:
        vacancies = Vacancy.query.join(Company). \
            filter(or_(Vacancy.title.like('%' + search + '%'),
                       Company.name.like('%' + search + '%'),
                       Vacancy.workload.like('%' + search + '%'),
                       Vacancy.contract_of_service.like('%' + search + '%'))) \
            .order_by(order.desc())

        if not ModuleAPI.can_write('vacancy'):
            vacancies = vacancies.filter(
                and_(Vacancy.start_date <
                     datetime.utcnow(), Vacancy.end_date >
                     datetime.utcnow()))

        vacancies = vacancies.paginate(page_nr, 15, False)

        return render_template('vacancy/list.htm', vacancies=vacancies,
                               search=search, path=FILE_FOLDER,
                               title="Vacatures")

    if ModuleAPI.can_write('vacancy'):
        vacancies = Vacancy.query.join(Company).order_by(order.desc())
    else:
        vacancies = Vacancy.query.order_by(order.desc()) \
            .filter(and_(Vacancy.start_date <
                         datetime.utcnow(), Vacancy.end_date >
                         datetime.utcnow()))

    vacancies = vacancies.paginate(page_nr, 15, False)

    return render_template('vacancy/list.htm', vacancies=vacancies,
                           search="", path=FILE_FOLDER, title="Vacatures")