Esempio n. 1
0
    def authenticate(cls, username, password, session=None):
        from sqlalchemy import func
        try:
            if int(username) == -1:
                acc = cls()
                acc._fill_trygame()
                return acc
        except:
            pass

        user = cls.find(username)

        if not user:
            return False

        if not cls.validate_by_password(user, password):
            return False

        # sync
        dz_member = user.dz_member
        user.id       = dz_member.uid
        user.username = dz_member.username
        user.password = password_hash(password)
        user.email    = dz_member.email
        user.title    = dz_member.member_field.customstatus
        user.status   = dz_member.status

        acc = cls()
        acc._fill_account(user)

        user.lastactivity = func.unix_timestamp()
        dz_member.member_status.lastactivity = func.unix_timestamp()

        return acc
Esempio n. 2
0
def find_unvalidated_versions(time_delta, environment):
    """Find the latest deployments that are not validated in a given
       environment for a given amount of time
    """

    subq = (Session.query(Package.pkg_name, Package.version,
                          Package.revision, AppDefinition.app_type,
                          AppDeployment.environment,
                          AppDeployment.realized, AppDeployment.user,
                          AppDeployment.status)
                   .join(AppDeployment)
                   .join(AppDefinition)
                   .filter(AppDeployment.status!='invalidated')
                   .filter(AppDeployment.environment==environment)
                   .order_by(AppDeployment.realized.desc(), AppDeployment.id.desc())
                   .subquery(name='t_ordered'))

    return (Session.query(subq.c.pkg_name, subq.c.version, subq.c.revision,
                          subq.c.appType, subq.c.environment,
                          subq.c.realized, subq.c.user, subq.c.status)
                   .group_by(subq.c.appType, subq.c.environment,
                             subq.c.pkg_name)
                   .having(and_(subq.c.status.like('%complete'),
                                func.unix_timestamp(subq.c.realized) <
                                func.unix_timestamp(func.now()) - time_delta))
                   .all())
Esempio n. 3
0
def start_client(step):
    try:
        QSIZE = 1024
        CRATIO = 120 # Common ratio for send interval progression
        db_manager = util.DBManager(world.config['connections']['mysql'])
        db = db_manager.get_db()
        where1 = and_(db.messages.type=='out')
        where2 = and_(db.messages.message_version==2)
        where3 = and_(func.unix_timestamp(db.messages.dtlasthandleattempt) +\
                db.messages.handle_attempts * CRATIO < func.unix_timestamp(
                func.now()))
        msgs = db.messages.filter(db.messages.status==0,\
                where1, where2, where3).order_by(
                desc(db.messages.id)).all()[0:QSIZE]
        global right_msgs
        right_msgs = msgs

        cnf = ETC_DIR + '/config.yml'
        subps.Popen(['python', '-m', 'scalrpy.messaging.messaging_client', '--start', '-c', cnf])
        time.sleep(2)
        ps = subps.Popen(['ps -ef'], shell=True, stdout=subps.PIPE)
        output = ps.stdout.read()
        ps.stdout.close()
        ps.wait()
        assert 'messaging_client' in output
    except:
        assert False
    finally:
        db.session.close()
        db.session.remove()
Esempio n. 4
0
def start_daemon(step):
    try:
        QSIZE = 1024
        CRATIO = 120  # Common ratio for send interval progression
        db_manager = dbmanager.DBManager(world.config['connections']['mysql'])
        db = db_manager.get_db()
        where1 = and_(db.messages.type == 'out')
        where2 = and_(db.messages.message_version == 2)
        where3 = and_(func.unix_timestamp(db.messages.dtlasthandleattempt) +\
                db.messages.handle_attempts * CRATIO < func.unix_timestamp(
                func.now()))
        msgs = db.messages.filter(db.messages.status==0,\
                where1, where2, where3).order_by(
                desc(db.messages.id)).all()[0:QSIZE]

        world.right_msgs = [msg.messageid for msg in msgs]

        assert len(world.right_msgs) != 0

        cnf = ETC_DIR + '/config.yml'
        subps.Popen([
            'python', '-m', 'scalrpy.messaging', '--start', '-vvv', '-c', cnf
        ])
        time.sleep(2)

        ps = subps.Popen(['ps -ef'], shell=True, stdout=subps.PIPE)
        output = ps.stdout.read()
        ps.stdout.close()
        ps.wait()
        assert 'scalrpy.messaging --start' in output
    except Exception:
        assert False
    finally:
        db.session.close()
        db.session.remove()
Esempio n. 5
0
def start_daemon(step):
    db_manager = dbmanager.DBManager(world.config['connections']['mysql'],
                                     autoflush=False)
    db = db_manager.get_db()
    try:
        CRATIO = 120
        where = and_(
            db.messages.status == 0, db.messages.type == 'out',
            db.messages.message_version == 2, db.messages.message != '',
            db.messages.message != None,
            func.unix_timestamp(db.messages.dtlasthandleattempt) +
            db.messages.handle_attempts * CRATIO < func.unix_timestamp(
                func.now()))
        msgs = db.messages.filter(where).order_by(desc(db.messages.id)).all()

        world.right_msgs = [
            msg.messageid for msg in msgs if msg.message_name != 'ExecScript'
        ]

        assert len(world.right_msgs) != 0

        config = ETC_DIR + '/config.yml'
        assert lib.start_daemon('msg_sender', config)
    finally:
        db.session.remove()
Esempio n. 6
0
    def authenticate(cls, username, password, session=None):
        from sqlalchemy import func
        try:
            if int(username) == -1:
                acc = cls()
                acc._fill_trygame()
                return acc
        except:
            pass

        user = cls.find(username)

        if not user:
            return False

        if not cls.validate_by_password(user, password):
            return False

        # sync
        dz_member = user.dz_member
        user.id       = dz_member.uid
        user.username = dz_member.username
        user.password = password_hash(password)
        user.email    = dz_member.email
        user.title    = dz_member.member_field.customstatus
        user.status   = dz_member.status

        acc = cls()
        acc._fill_account(user)

        user.lastactivity = func.unix_timestamp()
        dz_member.member_status.lastactivity = func.unix_timestamp()

        return acc
Esempio n. 7
0
class EquityOrder(Base):
    __tablename__ = "equity_order"
    __table_args__ = {"extend_existing": True}

    id = Column(Integer, primary_key=True)
    user_id = Column(
        Integer,
        # Defining the foreign key relationship between the `user_id` in this
        # table and the `id` column in the `user` table
        ForeignKey("user.id"),
        nullable=False,
        index=True,
    )
    ticker_id = Column(Integer, ForeignKey("ticker.id"), nullable=False)
    order_type = Column(Enum(OrderType), nullable=False)
    quantity = Column(Float, nullable=False)
    price = Column(Float, nullable=False)
    created_at = Column(
        Integer,
        default=func.unix_timestamp(),
        onupdate=func.unix_timestamp(),
        nullable=False,
    )

    # Defining a relationship between this table and the `Ticker` model so that
    # we can go from every row in this table to the corresponding user.
    # For example: We can do `EquityOrder.ticker.ticker_name` to find the
    # ticker name that corresponds to the ticket_id in a row of this table
    user = relationship("User", backref="order")
    ticker = relationship("Ticker", backref="order")

    def __init__(
        self,
        user_id: int,
        ticker_id: int,
        order_type: OrderType,
        quantity: Float,
        price: float,
    ):
        self.user_id = user_id
        self.ticker_id = ticker_id
        self.order_type = order_type
        self.quantity = quantity
        self.price = price

    def __repr__(self):
        return (f"EquityOrder(user_id={self.user_id} "
                f"symbol={self.ticker.ticker} "
                f"order_type={self.order_type} "
                f"quantity={self.quantity} "
                f"price={self.price}) ")
Esempio n. 8
0
 def _get_messages(self):
     db = self._db_manager.get_db()
     where = and_(
         db.messages.type == 'out', db.messages.status == 0,
         db.messages.messageid != '', db.messages.message_version == 2,
         func.unix_timestamp(db.messages.dtlasthandleattempt) +
         db.messages.handle_attempts * CONFIG['cratio'] <
         func.unix_timestamp(func.now()))
     messages = db.session.query(
         db.messages.messageid, db.messages.server_id,
         db.messages.message_name, db.messages.message_format,
         db.messages.handle_attempts,
         db.messages.event_id, db.messages.message).filter(where).order_by(
             asc(db.messages.id)).limit(500)[0:500]
     return messages
Esempio n. 9
0
def get_player_graph_data(server, granularity=15, start_date=None, end_date=None):
    end_date = end_date or datetime.utcnow()
    start_date = start_date or end_date - timedelta(days=7)

    result = db.session.query(
        label(
            'timestamp_group',
            func.round(
                (func.unix_timestamp(ServerStatus.timestamp) - time.timezone) / (granularity * 60)
            ),
        ),
        func.avg(ServerStatus.player_count)
    ).filter(
        ServerStatus.server == server,
        ServerStatus.timestamp >= start_date,
        ServerStatus.timestamp <= end_date
    ).group_by('timestamp_group').order_by(
        ServerStatus.timestamp
    ).all()

    points = []
    for chunk, count in result:
        points.append({
            'time': int(chunk * granularity * 60 * 1000),
            'player_count': int(count)
        })

    return {
        'start_time': int(calendar.timegm(start_date.timetuple()) * 1000),
        'end_time': int(calendar.timegm(end_date.timetuple()) * 1000),
        'points': points
    }
Esempio n. 10
0
def get_player_graph_data(server,
                          granularity=15,
                          start_date=None,
                          end_date=None):
    end_date = end_date or datetime.utcnow()
    start_date = start_date or end_date - timedelta(days=7)

    result = db.session.query(
        label(
            'timestamp_group',
            func.round(
                (func.unix_timestamp(ServerStatus.timestamp) - time.timezone) /
                (granularity * 60)),
        ), func.avg(ServerStatus.player_count)).filter(
            ServerStatus.server == server,
            ServerStatus.timestamp >= start_date, ServerStatus.timestamp <=
            end_date).group_by('timestamp_group').order_by(
                ServerStatus.timestamp).all()

    points = []
    for chunk, count in result:
        points.append({
            'time': int(chunk * granularity * 60 * 1000),
            'player_count': int(count)
        })

    return {
        'start_time': int(calendar.timegm(start_date.timetuple()) * 1000),
        'end_time': int(calendar.timegm(end_date.timetuple()) * 1000),
        'points': points
    }
Esempio n. 11
0
def sql_from_unixtime(dtfield, dtformat='%Y-%m-%d'):
    """
    返回一个 FROM_UINXTIME 的查询对象
    :param dtfield: 用于转换的记录名称,必须是 datetime 或者 timestamp 类型
    :param dtformat: 对应与 FROM_UNIXTIME 的第二个参数
    :return:
    """
    return func.from_unixtime(func.unix_timestamp(dtfield), dtformat)
Esempio n. 12
0
def query_wind_speeds(session, cutoff):
    q = session.query(
        WindMeasurement.avg_mph.label('avg'),
        WindMeasurement.max_mph.label('max')).filter(
            WindMeasurement.epoch >= func.unix_timestamp(func.now()) -
            cutoff).order_by(WindMeasurement.epoch.asc())

    last_24h = [(float(row.avg), float(row.max)) for row in q.all()]
    return last_24h
Esempio n. 13
0
def query_groups(session, cutoff):
    q = session.query(
        func.round(WindMeasurement.avg_mph).label('avg'),
        func.count(WindMeasurement.avg_mph).label('count')).filter(
            WindMeasurement.epoch >= func.unix_timestamp(func.now()) -
            cutoff).group_by(func.round(WindMeasurement.avg_mph))

    averages = [(int(row.avg), int(row.count)) for row in q.all()]
    return averages
Esempio n. 14
0
 def _get_messages(self):
     db = self._db_manager.get_db()
     where = and_(
             db.messages.type == 'out',
             db.messages.status == 0,
             db.messages.messageid != '',
             db.messages.message_version == 2,
             func.unix_timestamp(db.messages.dtlasthandleattempt) +
             db.messages.handle_attempts *
             CONFIG['cratio'] < func.unix_timestamp(func.now()))
     messages = db.session.query(
             db.messages.messageid,
             db.messages.server_id,
             db.messages.message_name,
             db.messages.message_format,
             db.messages.handle_attempts,
             db.messages.event_id,
             db.messages.message).filter(where).order_by(asc(db.messages.id)).limit(500)[0:500]
     return messages
Esempio n. 15
0
    def getPlayerCount(cls,active=False):
        try:
            if active is False:
                result = Db.session.query(Player)
            else:
                result = Db.session.query(Player).filter(func.unix_timestamp(Player.last_seen) >= time.time()-Config.active_player_timeframe)

            return result.count()
        except exc.SQLAlchemyError as e:
            out('SQL Failure - getPlayerCount:',e)
            cls.reconnect()
            return None
Esempio n. 16
0
def query_current_vs_max(session, cutoff):
    q = session.query(WindMeasurement.max_mph.label('current')).order_by(
        WindMeasurement.epoch.desc()).limit(1)
    current = q.one().current

    q = session.query(
        func.coalesce(func.max(
            WindMeasurement.max_mph), 1).label('max')).filter(
                WindMeasurement.epoch >= func.unix_timestamp(func.now()) -
                cutoff)
    max = q.one().max

    return float(current), float(max)
Esempio n. 17
0
def start_daemon(step):
    db_manager = dbmanager.DBManager(world.config['connections']['mysql'], autoflush=False)
    db = db_manager.get_db()
    try:
        CRATIO = 120
        where = and_(db.messages.status == 0, db.messages.type == 'out',
                db.messages.message_version == 2,
                db.messages.message != '',
                db.messages.message != None,
                func.unix_timestamp(db.messages.dtlasthandleattempt) +
                db.messages.handle_attempts * CRATIO < func.unix_timestamp(func.now()))
        msgs = db.messages.filter(where).order_by(desc(db.messages.id)).all()

        world.right_msgs = [
                msg.messageid for msg in msgs if msg.message_name != 'ExecScript']

        assert len(world.right_msgs) != 0

        config = ETC_DIR + '/config.yml'
        assert lib.start_daemon('msg_sender', config)
    finally:
        db.session.remove()
Esempio n. 18
0
def sensord(label, start, end, field_name, functions):
    seconds_per_sample_wanted, table, is_base_table = mtable.optimal(label, start, end)
    fields = list()
    for agg_func in functions:
        agg_func_name = str(agg_func()).replace('()', '')
        agg_field_name = field_name if is_base_table else '%s_%s' % (field_name, agg_func_name)
        fields.append(agg_func(table.c[agg_field_name]).label(agg_func_name))
    qry = session.query(table.c.timestamp, *fields) \
                 .group_by(func.round(func.unix_timestamp(table.c.timestamp).op('DIV')(seconds_per_sample_wanted))) \
                 .filter(table.c.timestamp >= start, table.c.timestamp <= end) \
                 .order_by(table.c.timestamp)
    if label:
        qry = qry.filter(table.c.probe_label == label)
    return qry
Esempio n. 19
0
def query_by_grouped_time(session,
                          cutoff,
                          column,
                          group_func=func.date,
                          sql_func=func.avg):
    q = session.query(
        sql_func(column).label('value'),
        group_func(func.from_unixtime(
            WindMeasurement.epoch)).label('date')).filter(
                WindMeasurement.epoch >=
                func.unix_timestamp(func.now()) - cutoff,
                column >= 1).group_by('date').order_by('date')

    grouped_values = [(float(row.value), row.date) for row in q.all()]
    return grouped_values
Esempio n. 20
0
def find_unvalidated_versions(time_delta, environment):
    """Find the latest deployments that are not validated in a given
       environment for a given amount of time
    """

    subq = (Session.query(
        Package.pkg_name, Package.version, Package.revision,
        AppDefinition.app_type, AppDeployment.environment,
        AppDeployment.realized, AppDeployment.user,
        AppDeployment.status).join(AppDeployment).join(AppDefinition).filter(
            AppDeployment.status != 'invalidated').filter(
                AppDeployment.environment == environment).order_by(
                    AppDeployment.realized.desc(),
                    AppDeployment.id.desc()).subquery(name='t_ordered'))

    return (Session.query(
        subq.c.pkg_name, subq.c.version, subq.c.revision, subq.c.appType,
        subq.c.environment,
        subq.c.realized, subq.c.user, subq.c.status).group_by(
            subq.c.appType, subq.c.environment, subq.c.pkg_name).having(
                and_(
                    subq.c.status.like('%complete'),
                    func.unix_timestamp(subq.c.realized) <
                    func.unix_timestamp(func.now()) - time_delta)).all())
Esempio n. 21
0
    def update(self, session, last_data_time):
        for period, agg_table in self.agg_map.iteritems():
            last = session.query(func.max(agg_table.c.timestamp)).scalar()
            if not last:
                last = session.query(func.min(self.base_table.timestamp).label('timestamp')).scalar()
            if (last_data_time - last).total_seconds() < period:
#                print "Not data for tailed agg at", period, \
#                      "last", last, \
#                      "last_data_time", last_data_time, \
#                      "seconds", (last_data_time - last).total_seconds(), \
#                      "days", (last_data_time - last).days
                continue
            last += datetime.timedelta(seconds=period)
            funs = list()
            insp = inspect(self.base_table)
            for field, pvt_funs in self.pvt.iteritems():
                funs.extend([fun(insp.columns[field]) for fun in pvt_funs])
            qry = session.query(self.base_table.timestamp, self.base_table.probe_label, *funs) \
                         .group_by(func.round(func.unix_timestamp(self.base_table.timestamp).op('DIV')(period)), self.base_table.probe_label) \
                         .filter(self.base_table.timestamp > last)
            session.execute(insert(agg_table).from_select(['timestamp', 'probe_label'] + self.pvt_fields, qry))
Esempio n. 22
0
def date_to_secs_mysql(col):
    return func.unix_timestamp(col)
Esempio n. 23
0
    def run(self):
        db_manager = util.DBManager(self.config["connections"]["mysql"])
        db = db_manager.get_db()

        self._set_new_gm_server(block=True)

        timestep = 5
        while True:
            session = db.session
            try:
                gm_adm_client = gearman.GearmanAdminClient([self.gm_host])
                gm_adm_client.ping_server()

                # fix gearman v2.0.2 memory leak bug
                self.gm_client = gearman.GearmanClient([self.gm_host])

                self._update_submitted_jobs()

                if len(self.submitted_jobs) > 5000:
                    LOG.warning("Too much of a submitted jobs. Skip iteration")
                    time.sleep(timestep)
                    continue

                where1 = and_(db.messages.type == "out", db.messages.status == 0, db.messages.message_version == 2)

                where2 = and_(
                    func.unix_timestamp(db.messages.dtlasthandleattempt) + db.messages.handle_attempts * CRATIO
                    < func.unix_timestamp(func.now())
                )

                if self.submitted_jobs:
                    where3 = and_(not_(db.messages.messageid.in_(self.submitted_jobs.keys())))
                    msgs = (
                        session.query(db.messages.messageid, db.messages.handle_attempts)
                        .filter(where1, where2, where3)
                        .order_by(asc(db.messages.id))
                        .all()[0:QSIZE]
                    )
                else:
                    msgs = (
                        session.query(db.messages.messageid, db.messages.handle_attempts)
                        .filter(where1, where2)
                        .order_by(asc(db.messages.id))
                        .all()[0:QSIZE]
                    )

                for msg in msgs:
                    # simple unique version
                    req = self.gm_client.submit_job(
                        "message.send", msg.messageid, unique=msg.messageid[0:64], wait_until_complete=False
                    )
                    # sha256 unique version
                    """
                    req = self.gm_client.submit_job('message.send', msg.messageid,
                            unique=hashlib.sha256(msg.messageid).hexdigest(),
                            wait_until_complete=False)
                    """
                    self.gm_client.wait_until_jobs_accepted([req])
                    self.submitted_jobs.update(
                        {msg.messageid: (req, int(time.time() + CRATIO * (msg.handle_attempts + 1)))}
                    )
                    LOG.info("Sumbit message: msg_id:%s" % msg.messageid)

            except db_exc.OperationalError:
                LOG.error(util.exc_info())
                time.sleep(5)
            except gearman.errors.ServerUnavailable:
                LOG.error(util.exc_info())
                self._set_new_gm_server(block=True)
            except:
                LOG.error(util.exc_info())
                raise
            finally:
                session.close()
                session.remove()

            time.sleep(timestep)
Esempio n. 24
0
    def __call__(self, user_ids, session):
        """
        Parameters:
            user_ids    : list of mediawiki user ids to find edit for
            session     : sqlalchemy session open on a mediawiki database
        
        Returns:
            dictionary from user ids to a dictionary of the form:
            {
                'threshold': 1 for True, 0 for False,
                'time_to_threshold': number in hours or None,
                'censored': 1 for True, 0 for False
            }
        """

        threshold_hours = int(self.threshold_hours.data)
        threshold_secs = threshold_hours * 3600
        number_of_edits = int(self.number_of_edits.data)

        Revision2 = aliased(Revision, name='r2')
        ordered_revisions = session \
            .query(
                Revision.rev_user,
                Revision.rev_timestamp,
                label('number', func.count()),
            ) \
            .join(MediawikiUser) \
            .join(Page) \
            .join(
                Revision2,
                and_(
                    Revision.rev_user == Revision2.rev_user,
                    Revision.rev_timestamp >= Revision2.rev_timestamp
                )
            ) \
            .group_by(Revision.rev_user) \
            .group_by(Revision.rev_timestamp) \
            .filter(Page.page_namespace.in_(self.namespaces.data)) \
            .filter(
                func.unix_timestamp(Revision.rev_timestamp) -
                func.unix_timestamp(MediawikiUser.user_registration) <= threshold_secs
            )

        o_r = self.filter(ordered_revisions, user_ids).subquery()

        metric = session.query(
            MediawikiUser.user_id,
            label(
                Threshold.id,
                func.IF(o_r.c.rev_timestamp != None, 1, 0)
            ),
            label(
                Threshold.time_to_threshold_id,
                func.IF(
                    o_r.c.rev_timestamp != None,
                    (func.unix_timestamp(o_r.c.rev_timestamp) -
                        func.unix_timestamp(MediawikiUser.user_registration)) / 3600,
                    None
                )
            ),
            label(CENSORED, func.IF(
                o_r.c.rev_timestamp != None,
                0,
                func.IF(
                    func.unix_timestamp(MediawikiUser.user_registration) + threshold_secs
                    >
                    func.unix_timestamp(func.now()),
                    1,
                    0
                )
            ))
        ) \
            .outerjoin(
                o_r,
                and_(
                    MediawikiUser.user_id == o_r.c.rev_user,
                    o_r.c.number == number_of_edits))

        metric = self.filter(metric, user_ids, MediawikiUser.user_id)

        return {
            u.user_id: {
                Threshold.id: u.threshold,
                Threshold.time_to_threshold_id: u.time_to_threshold,
                CENSORED: u.censored,
            }
            for u in metric.all()
        }
Esempio n. 25
0
    def run(self):
        db = self.db_manager.get_db()

        while True:
            try:
                db.servers
                db.farm_roles
                db.farm_settings
                db.role_behaviors
                db.server_properties
                db.farm_role_settings
                break
            except (db_exc.OperationalError, db_exc.InternalError):
                logger.error(sys.exc_info())
                time.sleep(10)

        timestep = 5
        wrk_pool = gevent.pool.Pool(config["pool_size"])

        while True:
            try:
                where1 = and_(db.messages.type == "out", db.messages.status == 0, db.messages.message_version == 2)

                where2 = and_(
                    func.unix_timestamp(db.messages.dtlasthandleattempt)
                    + db.messages.handle_attempts * config["cratio"]
                    < func.unix_timestamp(func.now())
                )

                msgs = dict(
                    (msg.messageid, msg)
                    for msg in db.messages.filter(where1, where2)
                    .order_by(asc(db.messages.id))
                    .all()[0 : config["qsize"]]
                )

                if not msgs:
                    time.sleep(timestep)
                    continue

                srvs_id = [msg.server_id for msg in msgs.values()]
                srvs = dict((srv.server_id, srv) for srv in db.servers.filter(db.servers.server_id.in_(srvs_id)).all())

                where = and_(db.server_properties.server_id.in_(srvs_id), db.server_properties.name == "scalarizr.key")
                keys_query = db.server_properties.filter(where).all()
                keys = dict((el.server_id, el.value) for el in keys_query)

                where = and_(
                    db.server_properties.server_id.in_(srvs_id), db.server_properties.name == "scalarizr.ctrl_port"
                )
                ports_query = db.server_properties.filter(where).all()
                ports = dict((el.server_id, el.value if el and el.value else 8013) for el in ports_query)

                tasks = []
                for msg in msgs.values():
                    try:
                        srv = srvs[msg.server_id]
                    except KeyError:
                        logging.warning(
                            "Server with server_id %s dosn't exist. Delete message %s" % (msg.server_id, msg.messageid)
                        )
                        db.delete(msg)
                        continue

                    if not self._server_is_active(srv):
                        continue

                    ip = {
                        "public": srv.remote_ip,
                        "local": srv.local_ip,
                        "auto": srv.remote_ip if srv.remote_ip else srv.local_ip,
                    }[config["inst_conn_policy"]]

                    try:
                        key = keys[msg.server_id]
                    except KeyError:
                        logging.error("Server %s has not scalarizr key" % msg.server_id)
                        continue

                    try:
                        port = ports[msg.server_id]
                    except KeyError:
                        port = 8013

                    req_host = "%s:%s" % (ip, port)
                    data, headers = self._encrypt(msg.server_id, key, msg.message)

                    where = and_(db.farm_settings.farmid == srv.farm_id, db.farm_settings.name == "ec2.vpc.id")
                    is_vpc = db.farm_settings.filter(where).first()

                    if is_vpc:
                        where = and_(db.role_behaviors.behavior == "router")
                        vpc_roles = [behavior.role_id for behavior in db.role_behaviors.filter(where).all()]

                        where = and_(db.farm_roles.role_id.in_(vpc_roles), db.farm_roles.farmid == srv.farm_id)
                        db_farm_role = db.farm_roles.filter(where).first()

                        if db_farm_role:
                            logger.debug("Message:%s for VPC server:%s" % (msg.messageid, srv.server_id))
                            if srv.remote_ip:
                                ip = srv.remote_ip
                                req_host = "%s:%s" % (srv.remote_ip, port)
                            else:
                                where = and_(
                                    db.farm_role_settings.farm_roleid == db_farm_role.id,
                                    db.farm_role_settings.name == "router.vpc.ip",
                                )
                                ip_query = db.farm_role_settings.filter(where).first()
                                if ip_query and ip_query.value:
                                    ip = ip_query.value
                                    req_host = "%s:80" % ip
                                    headers["X-Receiver-Host"] = srv.local_ip
                                    headers["X-Receiver-Port"] = port
                                else:
                                    ip = None

                    if ip == None or ip == "None":
                        logger.warning("Server: %s Null ip, delete message %s" % (srv.server_id, msg.messageid))
                        db.delete(msg)
                        continue

                    url = "http://%s/%s" % (req_host, "control")
                    req = urllib2.Request(url, data, headers)

                    db.session.expunge(msg)
                    tasks.append({"msg": msg, "req": req})

                wrk_pool.map_async(self._send, tasks)
                gevent.sleep(0)
                wrk_pool.join()

            except (db_exc.OperationalError, db_exc.InternalError):
                logger.error(sys.exc_info())
                time.sleep(10)
            except Exception:
                logger.exception("Exception")
            finally:
                while True:
                    try:
                        db.commit()
                        db.session.close()
                        break
                    except (db_exc.OperationalError, db_exc.InternalError):
                        logger.error(sys.exc_info())
                        time.sleep(10)

            time.sleep(timestep)
    def __call__(self, user_ids, session):
        """
        Parameters:
            user_ids    : list of mediawiki user ids to find edit for
            session     : sqlalchemy session open on a mediawiki database
        
        Returns:
            dictionary from user ids to a dictionary of the form:
            {
                'survived': 1 for True, 0 for False,
                'censored': 1 for True, 0 for False
            }
        """

        survival_hours = int(self.survival_hours.data)
        sunset_in_hours = int(self.sunset_in_hours.data)
        number_of_edits = int(self.number_of_edits.data)

        revisions = (
            session.query(MediawikiUser.user_id, label("rev_count", func.count()))
            .join(Revision)
            .join(Page)
            .group_by(MediawikiUser.user_id)
            .filter(Page.page_namespace.in_(self.namespaces.data))
        )

        revisions = self.filter(revisions, user_ids, column=MediawikiUser.user_id)

        # sunset_in_hours is zero, so we use the first case [T+t,today]
        if sunset_in_hours == 0:
            revisions = revisions.filter(
                between(
                    func.unix_timestamp(Revision.rev_timestamp) - func.unix_timestamp(MediawikiUser.user_registration),
                    (survival_hours * 3600),
                    func.unix_timestamp(func.now()) + 86400,
                )
            )
        # otherwise use the sunset_in_hours [T+t,T+t+s]
        else:
            revisions = revisions.filter(
                between(
                    func.unix_timestamp(Revision.rev_timestamp) - func.unix_timestamp(MediawikiUser.user_registration),
                    (survival_hours * 3600),
                    ((survival_hours + sunset_in_hours) * 3600),
                )
            )

        revisions = revisions.subquery()
        revs = session.query(
            MediawikiUser.user_id,
            MediawikiUser.user_registration,
            label("rev_count", func.coalesce(revisions.c.rev_count, 0)),
        ).outerjoin(revisions, MediawikiUser.user_id == revisions.c.user_id)

        revs = self.filter(revs, user_ids, MediawikiUser.user_id).subquery()

        metric = session.query(
            revs.c.user_id,
            label(Survival.id, func.IF(revs.c.rev_count >= number_of_edits, 1, 0)),
            label(
                CENSORED,
                func.IF(
                    revs.c.rev_count >= number_of_edits,
                    0,
                    func.IF(
                        func.unix_timestamp(func.now())
                        < func.unix_timestamp(revs.c.user_registration) + (survival_hours + sunset_in_hours) * 3600,
                        1,
                        0,
                    ),
                ),
            ),
        )

        data = metric.all()

        metric_results = {u.user_id: {Survival.id: u.survived, CENSORED: u.censored} for u in data}

        r = {uid: metric_results.get(uid, self.default_result) for uid in user_ids or metric_results.keys()}

        # self.debug_print(r, session, user_ids)
        return r
Esempio n. 27
0
def grafanaUrl(uid, parameters = None):
    if uid == "":
        return current_app.config["GRAFANA_BASE_URI"]
    elif uid == "ActiveEventFrameSummary":
        if "elementTemplateId" in parameters:
            elementTemplate = ElementTemplate.query.get_or_404(parameters["elementTemplateId"])
            return current_app.config["GRAFANA_BASE_URI"] + \
                "/d/ActiveEventFrameSummary/active-event-frame-summary?orgId=1" + \
                "&var-enterprise={}".format(elementTemplate.Site.Enterprise.EnterpriseId) + \
                "&var-site={}".format(elementTemplate.Site.SiteId) + \
                "&var-elementTemplates={}".format(elementTemplate.ElementTemplateId) + \
                "&var-eventFrameTemplates=All" + \
                "&var-eventFrameAttributeTemplates=All"
        elif "eventFrameTemplateId" in parameters:
            eventFrameTemplate = EventFrameTemplate.query.get_or_404(parameters["eventFrameTemplateId"])
            return current_app.config["GRAFANA_BASE_URI"] + \
                "/d/ActiveEventFrameSummary/active-event-frame-summary?orgId=1" + \
                "&var-enterprise={}".format(eventFrameTemplate.ElementTemplate.Site.Enterprise.EnterpriseId) + \
                "&var-site={}".format(eventFrameTemplate.ElementTemplate.Site.SiteId) + \
                "&var-elementTemplates={}".format(eventFrameTemplate.ElementTemplate.ElementTemplateId) + \
                "&var-eventFrameTemplates={}".format(eventFrameTemplate.EventFrameTemplateId) + \
                "&var-eventFrameAttributeTemplates=All"
        elif "siteId" in parameters:
            site = Site.query.get_or_404(parameters["siteId"])
            return current_app.config["GRAFANA_BASE_URI"] + \
                "/d/ActiveEventFrameSummary/active-event-frame-summary?orgId=1" + \
                "&var-enterprise={}".format(site.Enterprise.EnterpriseId) + \
                "&var-site={}".format(site.SiteId) + \
                "&var-elementTemplates=All" + \
                "&var-eventFrameTemplates=All" + \
                "&var-eventFrameAttributeTemplates=All"
    elif uid == "ElementSummary":
        if "siteId" in parameters:
            site = Site.query.get_or_404(parameters["siteId"])
            return current_app.config["GRAFANA_BASE_URI"] + \
                "/d/ElementSummary/element-summary?orgId=1" + \
                "&var-enterprise={}".format(site.Enterprise.EnterpriseId) + \
                "&var-site={}".format(parameters["siteId"]) + \
                "&var-elementTemplates=All" + \
                "&var-elements=All" + \
                "&var-elementAttributeTemplates=All"
        elif "elementTemplateId" in parameters:
            elementTemplate = ElementTemplate.query.get_or_404(parameters["elementTemplateId"])
            elements = ""
            for element in elementTemplate.Elements:
                elements += "&var-elements={}".format(element.ElementId)
            return current_app.config["GRAFANA_BASE_URI"] + \
                "/d/ElementSummary/element-summary?orgId=1" + \
                "&var-enterprise={}".format(elementTemplate.Site.Enterprise.EnterpriseId) + \
                "&var-site={}".format(elementTemplate.Site.SiteId) + \
                "&var-elementTemplates={}".format(elementTemplate.ElementTemplateId) + \
                elements + \
                "&var-elementAttributeTemplates=All"
    elif uid == "ElementValuesGraph":
        element = Element.query.get_or_404(parameters["elementId"])
        return current_app.config["GRAFANA_BASE_URI"] + \
            "/d/ElementValuesGraph/element-values-graph?orgId=1" + \
            "&var-enterprise={}".format(element.ElementTemplate.Site.Enterprise.EnterpriseId) + \
            "&var-site={}".format(element.ElementTemplate.Site.SiteId) + \
            "&var-elementTemplates=All" + \
            "&var-elements={}".format(parameters["elementId"]) + \
            "&var-elementAttributeTemplates=All" + \
            "&var-lookups=All"
    elif uid == "EventFramesGraph":
        eventFrame = EventFrame.query.get_or_404(parameters["eventFrameId"])
        startTimestamp = EventFrame.query.with_entities(func.unix_timestamp(EventFrame.StartTimestamp)).filter_by(EventFrameId = eventFrame.EventFrameId). \
            one()[0] - 1
        if eventFrame.EndTimestamp:
            endTimestamp = EventFrame.query.with_entities(func.unix_timestamp(EventFrame.EndTimestamp)).filter_by(EventFrameId = eventFrame.EventFrameId). \
                one()[0] + 1
            return current_app.config["GRAFANA_BASE_URI"] + \
                "/d/EventFramesGraph/event-frames-graph?orgId=1" + \
                "&from={}000".format(startTimestamp) + \
                "&to={}000".format(endTimestamp) + \
                "&var-enterprise={}".format(eventFrame.EventFrameTemplate.ElementTemplate.Site.Enterprise.EnterpriseId) + \
                "&var-site={}".format(eventFrame.EventFrameTemplate.ElementTemplate.Site.SiteId) + \
                "&var-elementTemplate={}".format(eventFrame.EventFrameTemplate.ElementTemplate.ElementTemplateId) + \
                "&var-eventFrameTemplate={}".format(eventFrame.EventFrameTemplate.EventFrameTemplateId) + \
                "&var-eventFrame={}".format(parameters["eventFrameId"]) + \
                "&var-eventFrameAttributeTemplates=All" + \
                "&var-lookups=All"
        else:
            return current_app.config["GRAFANA_BASE_URI"] + \
                "/d/EventFramesGraph/event-frames-graph?orgId=1" + \
                "&from={}000".format(startTimestamp) + \
                "&to=now" + \
                "&var-enterprise={}".format(eventFrame.EventFrameTemplate.ElementTemplate.Site.Enterprise.EnterpriseId) + \
                "&var-site={}".format(eventFrame.EventFrameTemplate.ElementTemplate.Site.SiteId) + \
                "&var-elementTemplate={}".format(eventFrame.EventFrameTemplate.ElementTemplate.ElementTemplateId) + \
                "&var-eventFrameTemplate={}".format(eventFrame.EventFrameTemplate.EventFrameTemplateId) + \
                "&var-eventFrame={}".format(parameters["eventFrameId"]) + \
                "&var-eventFrameAttributeTemplates=All" + \
                "&var-lookups=All"
    elif uid == "TagValuesGraph":
        return current_app.config["GRAFANA_BASE_URI"] + \
            "/d/TagValuesGraph/tag-values-graph?orgId=1" + \
            "&var-enterprises=All" + \
            "&var-sites=All" + \
            "&var-areas=All" + \
            "&var-tags={}".format(parameters["tagId"]) + \
            "&var-lookups=All"
    def __call__(self, user_ids, session):
        """
        Parameters:
            user_ids    : list of mediawiki user ids to find edit for
            session     : sqlalchemy session open on a mediawiki database
        
        Returns:
            dictionary from user ids to a dictionary of the form:
            {
                'threshold': 1 for True, 0 for False,
                'time_to_threshold': number in hours or None,
                'censored': 1 for True, 0 for False
            }
        """
        
        threshold_hours = int(self.threshold_hours.data)
        threshold_secs  = threshold_hours * 3600
        number_of_edits = int(self.number_of_edits.data)
        
        Revision2 = aliased(Revision, name='r2')
        ordered_revisions = session \
            .query(
                Revision.rev_user,
                Revision.rev_timestamp,
                label('number', func.count()),
            ) \
            .join(MediawikiUser) \
            .join(Page) \
            .join(
                Revision2,
                and_(
                    Revision.rev_user == Revision2.rev_user,
                    Revision.rev_timestamp >= Revision2.rev_timestamp
                )
            ) \
            .group_by(Revision.rev_user) \
            .group_by(Revision.rev_timestamp) \
            .filter(Page.page_namespace.in_(self.namespaces.data)) \
            .filter(
                func.unix_timestamp(Revision.rev_timestamp) -
                func.unix_timestamp(MediawikiUser.user_registration) <= threshold_secs
            )
        
        o_r = self.filter(ordered_revisions, user_ids).subquery()
        
        metric = session.query(
            MediawikiUser.user_id,
            label(
                Threshold.id,
                func.IF(o_r.c.rev_timestamp != None, 1, 0)
            ),
            label(
                Threshold.time_to_threshold_id,
                func.IF(
                    o_r.c.rev_timestamp != None,
                    (func.unix_timestamp(o_r.c.rev_timestamp) -
                        func.unix_timestamp(MediawikiUser.user_registration)) / 3600,
                    None
                )
            ),
            label(CENSORED, func.IF(
                o_r.c.rev_timestamp != None,
                0,
                func.IF(
                    func.unix_timestamp(MediawikiUser.user_registration) + threshold_secs
                    >
                    func.unix_timestamp(func.now()),
                    1,
                    0
                )
            ))
        ) \
            .outerjoin(
                o_r,
                and_(
                    MediawikiUser.user_id == o_r.c.rev_user,
                    o_r.c.number == number_of_edits))

        metric = self.filter(metric, user_ids, MediawikiUser.user_id)
        
        return {
            u.user_id: {
                Threshold.id                    : u.threshold,
                Threshold.time_to_threshold_id  : u.time_to_threshold,
                CENSORED                        : u.censored,
            }
            for u in metric.all()
        }
Esempio n. 29
0
    def run(self):
        db = self.db_manager.get_db()

        while True:
            try:
                db.servers
                db.farm_roles
                db.farm_settings
                db.role_behaviors
                db.server_properties
                db.farm_role_settings
                break
            except (db_exc.OperationalError, db_exc.InternalError):
                logger.error(sys.exc_info())
                time.sleep(10)

        timestep = 5
        wrk_pool = gevent.pool.Pool(config['pool_size'])

        while True:
            try:
                where1 = and_(db.messages.type == 'out',
                              db.messages.status == 0,
                              db.messages.message_version == 2)

                where2 = and_(
                        func.unix_timestamp(db.messages.dtlasthandleattempt) +\
                        db.messages.handle_attempts *\
                        config['cratio'] < func.unix_timestamp(func.now()))

                msgs = dict((msg.messageid, msg) for msg in\
                        db.messages.filter(where1, where2).order_by(
                        asc(db.messages.id)).all()[0:config['qsize']])

                if not msgs:
                    time.sleep(timestep)
                    continue

                srvs_id = [msg.server_id for msg in msgs.values()]
                srvs = dict((srv.server_id, srv) for srv in\
                        db.servers.filter(db.servers.server_id.in_(srvs_id)).all())

                where = and_(db.server_properties.server_id.in_(srvs_id),
                             db.server_properties.name == 'scalarizr.key')
                keys_query = db.server_properties.filter(where).all()
                keys = dict((el.server_id, el.value) for el in keys_query)

                where = and_(
                    db.server_properties.server_id.in_(srvs_id),
                    db.server_properties.name == 'scalarizr.ctrl_port')
                ports_query = db.server_properties.filter(where).all()
                ports = dict(
                    (el.server_id, el.value if el and el.value else 8013)
                    for el in ports_query)

                tasks = []
                for msg in msgs.values():
                    try:
                        srv = srvs[msg.server_id]
                    except KeyError:
                        logging.warning(
                            'Server with server_id %s dosn\'t exist. Delete message %s'
                            % (msg.server_id, msg.messageid))
                        db.delete(msg)
                        continue

                    if not self._server_is_active(srv):
                        continue

                    ip = {
                        'public': srv.remote_ip,
                        'local': srv.local_ip,
                        'auto':
                        srv.remote_ip if srv.remote_ip else srv.local_ip
                    }[config['inst_conn_policy']]

                    try:
                        key = keys[msg.server_id]
                    except KeyError:
                        logging.error('Server %s has not scalarizr key' %
                                      msg.server_id)
                        continue

                    try:
                        port = ports[msg.server_id]
                    except KeyError:
                        port = 8013

                    req_host = '%s:%s' % (ip, port)
                    data, headers = self._encrypt(msg.server_id, key,
                                                  msg.message)

                    where = and_(db.farm_settings.farmid == srv.farm_id,
                                 db.farm_settings.name == 'ec2.vpc.id')
                    is_vpc = db.farm_settings.filter(where).first()

                    if (is_vpc):
                        where = and_(db.role_behaviors.behavior == 'router')
                        vpc_roles = [
                            behavior.role_id for behavior in
                            db.role_behaviors.filter(where).all()
                        ]

                        where = and_(db.farm_roles.role_id.in_(vpc_roles),
                                     db.farm_roles.farmid == srv.farm_id)
                        db_farm_role = db.farm_roles.filter(where).first()

                        if db_farm_role:
                            logger.debug('Message:%s for VPC server:%s' %
                                         (msg.messageid, srv.server_id))
                            if srv.remote_ip:
                                ip = srv.remote_ip
                                req_host = '%s:%s' % (srv.remote_ip, port)
                            else:
                                where = and_(
                                    db.farm_role_settings.farm_roleid ==
                                    db_farm_role.id, db.farm_role_settings.name
                                    == 'router.vpc.ip')
                                ip_query = db.farm_role_settings.filter(
                                    where).first()
                                if ip_query and ip_query.value:
                                    ip = ip_query.value
                                    req_host = '%s:80' % ip
                                    headers['X-Receiver-Host'] = srv.local_ip
                                    headers['X-Receiver-Port'] = port
                                else:
                                    ip = None

                    if ip == None or ip == 'None':
                        logger.warning(
                            'Server: %s Null ip, delete message %s' %
                            (srv.server_id, msg.messageid))
                        db.delete(msg)
                        continue

                    url = 'http://%s/%s' % (req_host, 'control')
                    req = urllib2.Request(url, data, headers)

                    db.session.expunge(msg)
                    tasks.append({'msg': msg, 'req': req})

                wrk_pool.map_async(self._send, tasks)
                gevent.sleep(0)
                wrk_pool.join()

            except (db_exc.OperationalError, db_exc.InternalError):
                logger.error(sys.exc_info())
                time.sleep(10)
            except Exception:
                logger.exception('Exception')
            finally:
                while True:
                    try:
                        db.commit()
                        db.session.close()
                        break
                    except (db_exc.OperationalError, db_exc.InternalError):
                        logger.error(sys.exc_info())
                        time.sleep(10)

            time.sleep(timestep)
Esempio n. 30
0
def project_duration(pid, session, count_nr=False):
    jn_mapping = netutils.get_table_object('jn_mapping',
                                         session)

    if(not count_nr):
        s = select([
        func.unix_timestamp(func.min(jn_mapping.c.s_ts)),
        func.unix_timestamp(func.max(jn_mapping.c.f_ts))
               ], and_(jn_mapping.c.project_id==pid,
                       func.unix_timestamp(
                           jn_mapping.c.s_ts)!=0,
                       jn_mapping.c.job_type!='blast_nr')
        )

    else:

    res = session.conn.execute(s)
    row = res.fetchone()
    if(row is None):
        sys.stderr.write("Warning - no jobs set for \
                          project: " +  str(pid) + "\n"
        )

        return 0

    elif(row[1] is None or row[1] is None):
        sys.stderr.write("Warning - received None for \
                          either max finish time or min \
                          start time.\n"
        )
        return 0

    return row[1] - row[0]


def main():

    parser = argparse.ArgumentParser()
    parser.add_argument('--project-ids', dest='pids',
                        default=None, nargs='+',
                        help='The project_ids of the \
                        projects you wish to work on.'
    )

    parser.add_argument('--pid-range', dest='pid_range',
                        default=None, help='A range of \
                        project ids specified in the \
                        form: start:end'
    )

    parser.add_argument('--durations', dest='durations',
                        default=False,
                        const=True, action='store_const',
                        help='Calculate run time durations\
                        for an entire project.'
    )

    args = parser.parse_args()

    s = netutils.make_db_session()

    if(args.durations):
        pids = args.pids
        if(not args.pid_range is None):
            split = args.pid_range.split(":")
            start = int(split[0])
            end = int(split[1])
            pids = [i for i in xrange(start, end + 1)]


        for pid in pids:
            print("\t".join(
                       [str(pid),
                        str(project_duration(int(pid), s))
                       ]
                       )
            )



if __name__ == '__main__':
    main()
Esempio n. 31
0
    def run(self):
        db_manager = util.DBManager(self.config['connections']['mysql'])
        db = db_manager.get_db()

        self._set_new_gm_server(block=True)

        timestep = 5
        while True:
            session = db.session
            try:
                gm_adm_client = gearman.GearmanAdminClient([self.gm_host])
                gm_adm_client.ping_server()

                # fix gearman v2.0.2 memory leak bug
                self.gm_client = gearman.GearmanClient([self.gm_host])

                self._update_submitted_jobs()

                if len(self.submitted_jobs) > 5000:
                    LOG.warning('Too much of a submitted jobs. Skip iteration')
                    time.sleep(timestep)
                    continue

                where1 = and_(db.messages.type == 'out',
                              db.messages.status == 0,
                              db.messages.message_version == 2)

                where2 = and_(
                        func.unix_timestamp(db.messages.dtlasthandleattempt) +\
                        db.messages.handle_attempts *\
                        CRATIO < func.unix_timestamp(func.now()))

                if self.submitted_jobs:
                    where3 = and_(
                        not_(
                            db.messages.messageid.in_(
                                self.submitted_jobs.keys())))
                    msgs = session.query(
                        db.messages.messageid,
                        db.messages.handle_attempts).filter(
                            where1, where2, where3).order_by(
                                asc(db.messages.id)).all()[0:QSIZE]
                else:
                    msgs = session.query(
                        db.messages.messageid,
                        db.messages.handle_attempts).filter(
                            where1, where2).order_by(asc(
                                db.messages.id)).all()[0:QSIZE]

                for msg in msgs:
                    # simple unique version
                    req = self.gm_client.submit_job('message.send',
                                                    msg.messageid,
                                                    unique=msg.messageid[0:64],
                                                    wait_until_complete=False)
                    # sha256 unique version
                    '''
                    req = self.gm_client.submit_job('message.send', msg.messageid,
                            unique=hashlib.sha256(msg.messageid).hexdigest(),
                            wait_until_complete=False)
                    '''
                    self.gm_client.wait_until_jobs_accepted([req])
                    self.submitted_jobs.update({
                        msg.messageid:
                        (req,
                         int(time.time() + CRATIO * (msg.handle_attempts + 1)))
                    })
                    LOG.info('Sumbit message: msg_id:%s' % msg.messageid)

            except db_exc.OperationalError:
                LOG.error(util.exc_info())
                time.sleep(5)
            except gearman.errors.ServerUnavailable:
                LOG.error(util.exc_info())
                self._set_new_gm_server(block=True)
            except:
                LOG.error(util.exc_info())
                raise
            finally:
                session.close()
                session.remove()

            time.sleep(timestep)
Esempio n. 32
0
    def __call__(self, user_ids, session):
        """
        Parameters:
            user_ids    : list of mediawiki user ids to find edit for
            session     : sqlalchemy session open on a mediawiki database
        
        Returns:
            dictionary from user ids to a dictionary of the form:
            {
                'survived': 1 for True, 0 for False,
                'censored': 1 for True, 0 for False
            }
        """

        survival_hours = int(self.survival_hours.data)
        sunset_in_hours = int(self.sunset_in_hours.data)
        number_of_edits = int(self.number_of_edits.data)

        revisions = session \
            .query(
                MediawikiUser.user_id,
                label('rev_count', func.count())
            ) \
            .join(Revision) \
            .join(Page) \
            .group_by(MediawikiUser.user_id) \
            .filter(Page.page_namespace.in_(self.namespaces.data))

        revisions = self.filter(revisions,
                                user_ids,
                                column=MediawikiUser.user_id)

        # sunset_in_hours is zero, so we use the first case [T+t,today]
        if sunset_in_hours == 0:
            revisions = revisions.filter(
                between(
                    func.unix_timestamp(Revision.rev_timestamp) -
                    func.unix_timestamp(MediawikiUser.user_registration),
                    (survival_hours * 3600),
                    func.unix_timestamp(func.now()) + 86400))
        # otherwise use the sunset_in_hours [T+t,T+t+s]
        else:
            revisions = revisions.filter(
                between(
                    func.unix_timestamp(Revision.rev_timestamp) -
                    func.unix_timestamp(MediawikiUser.user_registration),
                    (survival_hours * 3600),
                    ((survival_hours + sunset_in_hours) * 3600)))

        revisions = revisions.subquery()
        revs = session.query(
            MediawikiUser.user_id,
            MediawikiUser.user_registration,
            label(
                'rev_count',
                func.coalesce(revisions.c.rev_count, 0)
            )
        ) \
            .outerjoin(revisions, MediawikiUser.user_id == revisions.c.user_id)

        revs = self.filter(revs, user_ids, MediawikiUser.user_id).subquery()

        metric = session.query(
            revs.c.user_id,
            label(Survival.id,
                  func.IF(revs.c.rev_count >= number_of_edits, 1, 0)),
            label(
                CENSORED,
                func.IF(
                    revs.c.rev_count >= number_of_edits, 0,
                    func.IF(
                        func.unix_timestamp(func.now()) <
                        func.unix_timestamp(revs.c.user_registration) +
                        (survival_hours + sunset_in_hours) * 3600, 1, 0))))

        data = metric.all()

        metric_results = {
            u.user_id: {
                Survival.id: u.survived,
                CENSORED: u.censored,
            }
            for u in data
        }

        r = {
            uid: metric_results.get(uid, self.default_result)
            for uid in user_ids or metric_results.keys()
        }

        #self.debug_print(r, session, user_ids)
        return r
Esempio n. 33
0
    def find_nearest(self,
                     direction,
                     current,
                     bbox_text,
                     requested_products,
                     patterns,
                     search_step=60):
        """ """
        insp = reflection.Inspector.from_engine(self.__engine)
        existing_tables = insp.get_table_names()
        products = [
            p for p in requested_products
            if Storage.get_product_table_name(p) in existing_tables
        ]

        # Spatial filter
        bbox_polygon = None
        if bbox_text is not None:
            west, south, east, north = bbox_text.split(',')
            bbox_polygon = 'POLYGON(({} {}, {} {}, {} {}, {} {}, {} {}))'
            bbox_polygon = bbox_polygon.format(west, north, east, north, east,
                                               south, west, south, west, north)

        after_delta = float('inf')
        before_delta = float('inf')
        for product_id in products:
            pattern = None
            if patterns is not None:
                pattern = patterns.get(product_id, None)

            dataset_cls = self.get_product_table(product_id)

            mid_ts = ((func.unix_timestamp(dataset_cls.begin_datetime) +
                       func.unix_timestamp(dataset_cls.end_datetime)) /
                      2).label('mid_ts')
            current_ts = calendar.timegm(current.timetuple())

            # Look for datasets in the future
            if direction in ['nearest', 'after', None]:
                q = self.__session.query(mid_ts)
                q = q.filter(mid_ts > current_ts + search_step)
                if bbox_polygon is not None:
                    q = q.filter(
                        sqlalchemy.or_(
                            dataset_cls.shape_text == 'POINT(0 0)',
                            func.Intersects(dataset_cls.shape_geometry,
                                            func.GeomFromText(bbox_polygon))))
                if pattern is not None:
                    q = q.filter(
                        dataset_cls.dataset_name.like('%{}%'.format(pattern)))
                q = q.order_by(mid_ts)
                q = q.limit(1)
                result = q.first()
                if result is not None:
                    delta = int(result[0]) - current_ts
                    if delta < after_delta:
                        after_delta = delta

# Look for datasets in the past
            if direction in ['nearest', 'before', None]:
                q = self.__session.query(mid_ts)
                q = q.filter(mid_ts < current_ts - search_step)
                if bbox_polygon is not None:
                    q = q.filter(
                        sqlalchemy.or_(
                            dataset_cls.shape_text == 'POINT(0 0)',
                            func.Intersects(dataset_cls.shape_geometry,
                                            func.GeomFromText(bbox_polygon))))
                if pattern is not None:
                    q = q.filter(
                        dataset_cls.dataset_name.like('%{}%'.format(pattern)))
                q = q.order_by(desc(mid_ts))
                q = q.limit(1)
                result = q.first()
                if result is not None:
                    delta = current_ts - int(result[0])
                    if delta < before_delta:
                        before_delta = delta

        delta = min(after_delta, before_delta)
        if delta == float('inf'):
            new_ts = current_ts
        elif delta == after_delta:
            new_ts = current_ts + delta
        else:
            new_ts = current_ts - delta

        if before_delta == float('inf'):
            before_delta = -1
        if after_delta == float('inf'):
            after_delta = -1

        return new_ts, before_delta, after_delta