def __init__(self, database, status, expID, **config): self.config(height=winHeight, width=winWidth) db = models.get_database(database) or abort(404) radiusDiagram = 0 maxRadiusDiagram = 50 clusterRadiusDiagram = 45 dbRadiusDiagram = 40 radiusClient = winHeight / 2 - maxRadiusDiagram radiusExpID = radiusClient - maxRadiusDiagram radiusCluster = radiusClient / 2 imageMap['db'] = ({"id": str(database), "position": (winHeight / 2, winHeight / 2), "radius": dbRadiusDiagram}) expName = {} numDBStatus = [] dbTable = {} cluster = [] numClusterStatus = [] clusterImageMap = [] problemStatus = [] problemText = [] numDBJobs = 0.0 i = 0 m = 0 q = 0 y1 = 6 y2 = 16 #TODO: db Abfrage Job Status, gridQueue, Cluster passend zu expID #all database queries JOB_STATUS = dict((s.statusCode, s.description) for s in db.session.query(db.StatusCodes).all()) gridQueue = dict((g.idgridQueue, g.location) for g in db.session.query(db.GridQueue).all()) for eID in expID: expName[eID] = db.session.query(db.Experiment.name).filter(db.Experiment.idExperiment == eID).first() for cQueue in db.session.query(db.ExperimentResult.computeQueue).filter( db.ExperimentResult.Experiment_idExperiment == eID).distinct(): if (cQueue[0] is not None and int(cQueue[0]) not in cluster): if (cQueue[0] is not None) and (cQueue[0] != 0): cluster.append(cQueue[0]) if len(cluster) == 1: numCluster = 2.0 else: numCluster = float(len(cluster)) #Legend if status == ['pm']: problemStatus = [-2, -1, 0] problemText = ['crashed', 'blocked', 'forever running'] for t, s in zip(problemText, problemStatus): numClusterStatus.append(0) numDBStatus.append(0) self.create_text(winWidth - 100, y1 + 4, text=t, anchor=NW) self.create_rectangle(winWidth - 115, y1, winWidth - 105, y2, width=1, fill=constants.JOB_STATUS_COLOR[s]) y1 = y1 + 26 y2 = y2 + 26 else: status = map(int, status) for c in status: numClusterStatus.append(0) numDBStatus.append(0) self.create_text(winWidth - 100, y1 + 4, text=JOB_STATUS[c], anchor=NW) self.create_rectangle(winWidth - 115, y1, winWidth - 105, y2, width=1, fill=constants.JOB_STATUS_COLOR[c]) y1 = y1 + 26 y2 = y2 + 26 #paint the diagram in an double for-loop, outer circle is the client circle, inner circle is the cluster circle for c in cluster: clientID = [] clientExpID = [] clientExpName = [] clientImageMap = [] clusterStatusTable = {} numClusterJobs = 0.0 j = 1 l = 0 i = i + 1 clusterCenter = radian(radiusCluster, i / numCluster) self.create_line(winHeight / 2, winHeight / 2, clusterCenter) xy = center(clusterCenter[0], clusterCenter[1], clusterRadiusDiagram) #TODO: db Abfrage, Clients passend zu Cluster for eID in expID: exp = db.session.query(db.Experiment).get(eID) for client in exp.clients: if (client.gridQueue_idgridQueue == c): if (str(client.idClient) not in clientID) and (int(client.idClient) != 0): clientID.append(str(client.idClient)) name = expName[eID] clientExpName.append(name[0]) clientExpID.append(eID) numClients = float(len(clientID)) for cID, eID in zip(clientID, clientExpID): clientStatusTable = {} numClientStatus = [] numClientJobs = 0.0 k = 0 value = (2 * i - 1) / (2 * numCluster) + j / (numCluster * numClients) clientCenter = radian(radiusClient, value - 1 / (2 * numCluster * numClients)) expIDCenter = radian(radiusExpID, value - 1 / (2 * numCluster * numClients)) self.create_line(clusterCenter, clientCenter) rD = ((radiusClient * 3.14) / (numClients * numCluster)) - 1 if rD < maxRadiusDiagram: radiusDiagram = rD else: radiusDiagram = maxRadiusDiagram cxy = center(clientCenter[0], clientCenter[1], radiusDiagram) clientImageMap.append({"id": cID, "position": clientCenter, "radius": radiusDiagram}) clientStatusTable['expID'] = expName[eID][0] clientSTable = [] if status == ['pm']: problemCount = [] #TODO: dbAbfrage, Problemmodus, Statusanzeige crashed = db.session.query(db.ExperimentResult.status).filter( db.ExperimentResult.status < -1).filter(db.ExperimentResult.computeNode == eID).count() problemCount.append(crashed) blocked = db.session.query(db.ExperimentResult.status).filter( db.ExperimentResult.status == -1).filter(db.ExperimentResult.priority < 0).filter( db.ExperimentResult.computeNode == eID).count() problemCount.append(blocked) foreverRunning = db.session.query(db.ExperimentResult.status) \ .filter(db.ExperimentResult.status == 0).filter( func.timestampdiff(text("SECOND"), db.ExperimentResult.startTime, func.now()) \ > db.ExperimentResult.CPUTimeLimit + 20) \ .filter(db.ExperimentResult.computeNode == cID).count() problemCount.append(foreverRunning) for pC in problemCount: numClientJobs += pC p = 0 for pC, cT in zip(problemCount, problemText): if (numClientJobs): perc = round((pC / numClientJobs) * 10000) perc = perc / 100 else: perc = 0 numClientStatus.append(pC) numClusterStatus[k] = numClusterStatus[k] + pC clientSTable.append({"name": cT, "val": int(pC), "perc": str(perc) + '%'}) k = k + 1 clientStatusTable['table'] = clientSTable self.circleDiagram(cxy, problemStatus, numClientStatus, expName[eID][0], radiusDiagram) else: for s in status: #TODO: db Abfrage, Gesamtanzahl der Clients// numClientJobs = numClientJobs + db.session.query(db.ExperimentResult.status).filter( db.ExperimentResult.computeNode == cID).filter(db.ExperimentResult.status == s).count() for s in status: #TODO: db Abfrage, Anzahl der Clients count = float( db.session.query(db.ExperimentResult.status).filter(db.ExperimentResult.status == s).filter( db.ExperimentResult.computeNode == cID).count()) if (numClientJobs): perc = round((count / numClientJobs) * 10000) perc = perc / 100 else: perc = 0 name = JOB_STATUS[s] numClientStatus.append(count) numClusterStatus[k] = numClusterStatus[k] + count clientSTable.append({"name": name, "val": int(count), "perc": str(perc) + '%'}) k = k + 1 clientStatusTable['table'] = clientSTable self.circleDiagram(cxy, status, numClientStatus, expName[eID][0], radiusDiagram) numClusterJobs = numClusterJobs + numClientJobs statusTable[cID] = clientStatusTable j = j + 1 numDBJobs += numClusterJobs liste = list(set(clientExpName)) clusterStatusTable['expID'] = liste q += 1 if status == ['pm']: clusterStatus = problemStatus else: clusterStatus = status if c in gridQueue: location = gridQueue[c] else: location = str(c) self.circleDiagram(xy, clusterStatus, numClusterStatus, location, clusterRadiusDiagram) clusterSTable = [] for s in clusterStatus: count = numClusterStatus[l] numClusterStatus[l] = 0 numDBStatus[l] = numDBStatus[l] + count if (numClusterJobs): perc = round((count / numClusterJobs) * 10000) perc = perc / 100 else: perc = 0 name = JOB_STATUS[s] clusterSTable.append({"name": name, "val": int(count), "perc": str(perc) + '%'}) l = l + 1 clusterStatusTable['table'] = clusterSTable if c in gridQueue: location = gridQueue[c] else: location = str(c) clusterImageMap.append( {"id": location, "position": clusterCenter, "clients": clientImageMap, "radius": radiusDiagram}) statusTable[location] = clusterStatusTable imageMap['cluster'] = clusterImageMap #database db_xy = center(winHeight / 2, winHeight / 2, dbRadiusDiagram) name = expName.values() dbTable['expID'] = name if status == ['pm']: dbStatus = problemStatus else: dbStatus = status dbSTable = [] for s in dbStatus: count = numDBStatus[m] m = m + 1 name = JOB_STATUS[s] if (numDBJobs): perc = round((count / numDBJobs) * 10000) perc = perc / 100 else: perc = 0 dbSTable.append({"name": name, "val": int(count), "perc": str(perc) + '%'}) dbTable['table'] = dbSTable statusTable[str(database)] = dbTable self.circleDiagram(db_xy, dbStatus, numDBStatus, database, dbRadiusDiagram)
def _perform_sql(self): filter = {} filter["last_check"] = "now()" query = ( self.session.query(self.tables.monitor_obs) .add_column((func.timediff(self.tables.monitor_obs.last_check, func.now())).label("test")) .filter(self.tables.monitor_obs.locked == False) .filter( func.timestampdiff(MINUTE, self.tables.monitor_obs.last_check, func.now()) <= func.TIMESTAMP(text("0000-00-00 00:") + self.tables.monitor_obs.frequency + ":0") ) .order_by("last_check") ) logger.debug(query)
def band(cls): return case([ (func.abs(func.timestampdiff(text('SECOND'), jcmt.COMMON.wvmdatst, jcmt.COMMON.date_obs)) > _TIMESTAMP_OFFSET_ALLOWANCE, 'unknown'), (func.abs(func.timestampdiff(text('SECOND'), jcmt.COMMON.wvmdaten, jcmt.COMMON.date_end)) > _TIMESTAMP_OFFSET_ALLOWANCE, 'unknown'), ((jcmt.COMMON.wvmtauen + jcmt.COMMON.wvmtaust)/2.0 < 0.05, 1), ((jcmt.COMMON.wvmtauen + jcmt.COMMON.wvmtaust)/2.0 < 0.08, 2), ((jcmt.COMMON.wvmtauen + jcmt.COMMON.wvmtaust)/2.0 < 0.12, 3), ((jcmt.COMMON.wvmtauen + jcmt.COMMON.wvmtaust)/2.0 < 0.20, 4), ((jcmt.COMMON.wvmtauen + jcmt.COMMON.wvmtaust)/2.0 < 10, 5), ], else_='unknown')
def get_dag_duration_info(): '''get duration of currently running DagRuns :return dag_info ''' driver = Session.bind.driver durations = { 'pysqlite': func.sum( (func.julianday(func.current_timestamp()) - func.julianday(DagRun.start_date)) * 86400.0 ), 'mysqldb': func.sum(func.timestampdiff(text('second'), DagRun.start_date, func.now())), 'default': func.sum(func.now() - DagRun.start_date) } duration = durations.get(driver, durations['default']) with session_scope(Session) as session: return session.query( DagRun.dag_id, DagRun.run_id, duration.label('duration') ).group_by( DagRun.dag_id, DagRun.run_id ).filter( DagRun.state == State.RUNNING ).all()
def top_games_by_play_time(): results = select([ config.STATS_GAMES_TABLE.c.name, func.sum( func.timestampdiff( text('SECOND'), config.STATS_STATS_TABLE.c.startTime, config.STATS_STATS_TABLE.c.endTime)).label('time_played') ]).select_from( config.STATS_STATS_TABLE.join( config.STATS_GAMES_TABLE, config.STATS_GAMES_TABLE.c.id == config.STATS_STATS_TABLE.c.gameId, )).where( and_( config.STATS_STATS_TABLE.c.endTime != None, ~config.STATS_STATS_TABLE.c.userId.in_(user_blacklist), )).group_by(config.STATS_GAMES_TABLE.c.id, ).order_by( desc('time_played')).limit(15).execute().fetchall() top_games = [] for game in results: top_games.append({ 'name': game.name, 'data': [float(game.time_played / 60 / 60)] }) return Response(json.dumps(top_games), mimetype='application/json')
def user_heatmap(user_id=0): if user_id != 0: where_clause = config.STATS_STATS_TABLE.c.userId == user_id else: where_clause = config.STATS_STATS_TABLE.c.userId != user_id results = select([ config.STATS_GAMES_TABLE.c.name, func.sum( func.timestampdiff( text('SECOND'), config.STATS_STATS_TABLE.c.startTime, config.STATS_STATS_TABLE.c.endTime)).label('time_played') ]).select_from( config.STATS_GAMES_TABLE.join( config.STATS_STATS_TABLE, config.STATS_GAMES_TABLE.c.id == config.STATS_STATS_TABLE.c.gameId)).where( and_( ~config.STATS_STATS_TABLE.c.userId.in_(user_blacklist), where_clause, )).group_by(config.STATS_STATS_TABLE.c.gameId).order_by( desc('time_played', )).limit(50).execute().fetchall() data = [] for result in results: if result.time_played: data.append({ 'name': result.name, 'value': float(result.time_played / 60 / 60), 'colorValue': int(result.time_played / 60 / 60), }) else: data.append({'name': result.name, 'value': 0.0, 'colorValue': 0}) return Response(json.dumps(data), mimetype='application/json')
def overview_data(): data = { 'time': 0, } # begin overview data results = select([ func.count(config.STATS_GAMES_TABLE.c.id), ]).execute().fetchone() data['games'] = results[0] results = select([ func.sum( func.timestampdiff(text('SECOND'), config.STATS_STATS_TABLE.c.startTime, config.STATS_STATS_TABLE.c.endTime)) ]).execute().fetchone() data['time'] = int(results[0] / 60 / 60) # hours results = select([ func.count(config.DISCORD_USER_TABLE.c.username), ]).execute().fetchone() data['users'] = results[0] # end overview data data['game_overview'] = game_stats(0).json data['game_heatmap'] = game_heatmap(0).json data['user_overview'] = user_stats(0).json data['user_heatmap'] = user_heatmap(0).json data['game_count'] = game_user_count().json return data
def user_stats(user_id=0): data = { 'hours': 0, 'games': 0, 'days': 0, } if user_id != 0: where_clause = config.STATS_STATS_TABLE.c.userId == user_id else: where_clause = config.STATS_STATS_TABLE.c.userId != user_id print(user_id) results = select([ config.STATS_STATS_TABLE.c.gameId, func.sum( func.timestampdiff( text('SECOND'), config.STATS_STATS_TABLE.c.startTime, config.STATS_STATS_TABLE.c.endTime)).label('time_played') ]).where( and_( ~config.STATS_STATS_TABLE.c.userId.in_(user_blacklist), where_clause, )).group_by(config.STATS_STATS_TABLE.c.gameId).execute().fetchall() for result in results: if result.time_played: data['hours'] += int(result.time_played / 60 / 60) data['games'] = len(set([x['gameId'] for x in results])) data['days'] = (arrow.now() - arrow.get('2016-02-07')).days return Response(json.dumps(data), mimetype='application/json')
def online_client_count(cls): # return (select([func.count(Client.id)]).where(Client.user_id == cls.id)) return (select([func.count(Client.id)]).where( and_( Client.user_id == cls.id, func.timestampdiff(text('second'), Client.update_time, func.now()) < app.config['HEART_SECONDS'])))
def get_dag_duration_info() -> List[DagDurationInfo]: '''get duration of currently running DagRuns :return dag_info ''' driver = Session.bind.driver # pylint: disable=no-member durations = { 'pysqlite': func.julianday(func.current_timestamp() - func.julianday(DagRun.start_date)) * 86400.0, 'mysqldb': func.timestampdiff(text('second'), DagRun.start_date, func.now()), 'pyodbc': func.sum(func.datediff(text('second'), DagRun.start_date, func.now())), 'default': func.now() - DagRun.start_date } duration = durations.get(driver, durations['default']) sql_res = Session.query( # pylint: disable=no-member DagRun.dag_id, func.max(duration).label('duration')).group_by( DagRun.dag_id).filter(DagRun.state == State.RUNNING).all() res = [] for i in sql_res: if driver == 'mysqldb' or driver == 'pysqlite': dag_duration = i.duration else: dag_duration = i.duration.seconds res.append(DagDurationInfo(dag_id=i.dag_id, duration=dag_duration)) return res
class Obs(jcmt.COMMON): __tablename__ = 'COMMON' __table_args__ = ( {'schema':'jcmt', 'extend_existing':False} ) duration = column_property(func.timestampdiff(text('SECOND'), jcmt.COMMON.date_obs, jcmt.COMMON.date_end)) scanmode = column_property(case([(jcmt.COMMON.sam_mode=='scan', jcmt.COMMON.scan_pat)], else_=jcmt.COMMON.sam_mode))
def game_stats(game_id=0): data = { 'hours': 0, 'players': 0, 'percent_players': 0, 'avg_session': 0, } if game_id != 0: where_clause = config.STATS_STATS_TABLE.c.gameId == game_id else: where_clause = config.STATS_STATS_TABLE.c.gameId != game_id results = select([ config.STATS_STATS_TABLE.c.userId, func.sum( func.timestampdiff( text('SECOND'), config.STATS_STATS_TABLE.c.startTime, config.STATS_STATS_TABLE.c.endTime)).label('time_played') ]).where(where_clause, ).group_by( config.STATS_STATS_TABLE.c.userId).execute().fetchall() players = [] for result in results: players.append(result.userId) data['hours'] += int(result.time_played / 60 / 60) if results: data['avg_session'] = round( float(data['hours'] / len( select([config.STATS_STATS_TABLE.c.gameId ]).where(where_clause).execute().fetchall())), 2) else: data['avg_session'] = 0 data['players'] = len(set(players)) data['percent_players'] = int( len(set(players)) / len( select([config.DISCORD_USER_TABLE.c.username ]).execute().fetchall()) * 100) return Response(json.dumps(data), mimetype='application/json')
def get_dag_duration_info(): '''get duration of currently running DagRuns :return dag_info ''' driver = Session.bind.driver # pylint: disable=no-member durations = { 'pysqlite': func.julianday(func.current_timestamp() - func.julianday(DagRun.start_date)) * 86400.0, 'mysqldb': func.timestampdiff(text('second'), DagRun.start_date, func.now()), 'default': func.now() - DagRun.start_date } duration = durations.get(driver, durations['default']) return Session.query( DagRun.dag_id, func.max(duration).label('duration')).group_by( DagRun.dag_id).filter(DagRun.state == State.RUNNING).all()
def _getCloudwatchMetricReadinessPredicate(): """ Generate an sqlAlchemy predicate that determines whether the metric is ready for data collection. :returns: sqlAlchemy predicate for use in `where` clause """ # NOTE: the time arithmetic must be coordinated with # grok.app.aws.cloudwatch_utils.getMetricCollectionBackoffSeconds() # NOTE: the time difference logic here is a heuristic fine-tuned for # cloudwatch-based metrics as follows: # * Cloudwatch metrics aggregated over a specific period (e.g., 5 minutes) # appear to be arranged in contiguous time buckets each with a specific # start and end time; we don't have visibility into the actual start time # of any bucket, which appears to depend on when the cloudwatch metric was # created. # * In the higher-level logic, the first time we query a metric, we pick the # metric's virtual starting time based on a 14-day backoff from current # time (which is not the actual metric time bucket's start time) and # subsequently add the metric's period to arrive at the next metric # value's virtual start time, and so on. # * Based on experiments with 5-minute-aggregated metrics, it appears that a # metric's aggregated value becomes availabe one period after the true end # of the metric value's time bucket. If you don't wait long enough, you # either don't get any value from cloudwatch (which is a wasted slow call # that contributes to API throttling) or you might get a non-final # value. # * Since we don't know the true start time of the time bucket, we # compensate for it: first, we add the metric period to the virtual start # time, which should get us at least to the end of the true time bucket; # then we add another period to get us at least to the point in time where # the time-bucket's data becomes available, and finally add a fudge value # (60 seconds at time of this writing) for the metric value to stabilize return ( (schema.metric.c.last_timestamp == None) | (func.timestampdiff(text("SECOND"), schema.metric.c.last_timestamp, func.utc_timestamp()) >= (schema.metric.c.poll_interval + (schema.metric.c.poll_interval + 60))))
def __init__(self, database, expID, **config): self.config(height=winHeight, width=winWidth) db = models.get_database(database) or abort(404) maxRadiusDiagram = 50 radiusClusterDiagram = 45 radiusDBDiagram = 40 radiusClient = winHeight / 2 - maxRadiusDiagram radiusCluster = radiusClient / 2 clusterImageMap = [] i = 1 #Dictionary which store all clients for each experiment clientIsInExperimentName = {} clientIsInExperimentID = {} clusterHasClient = {} cluster = [] for exp in expID: eClient = [] expName = db.session.query(db.Experiment.name).filter( db.Experiment.idExperiment == exp).first() for e in db.session.query( db.Experiment_has_Client.Client_idClient ).filter(db.Experiment_has_Client.Experiment_idExperiment == exp): eClient.append(e[0]) clientIsInExperimentName[int(e[0])] = expName[0] clientIsInExperimentID[int(e[0])] = exp #Dictionary which store all clients for each grid for c in db.session.query(db.Client.gridQueue_idgridQueue).filter( db.Client.idClient == db.Experiment_has_Client.Client_idClient): if int(c[0]) not in cluster: cluster.append(int(c[0])) for clu in cluster: cClient = [] for c in db.session.query(db.Client.idClient).filter( db.Client.gridQueue_idgridQueue == clu): cClient.append(int(c[0])) clusterHasClient[clu] = cClient #Dictionary which store the locationname for each grid gridQueue = dict((g.idgridQueue, g.location or g.name) for g in db.session.query(db.GridQueue).all()) #returns a tuple out of ClientID, Timedifference in seconds timestampdiff = dict( db.session.query( db.Client.idClient, func.timestampdiff(text("SECOND"), db.Client.lastReport, func.now())).all()) #double loop which draws the diagramm if len(clusterHasClient) == 1: numCluster = 2.0 else: numCluster = float(len(clusterHasClient)) #TODO: nach ExperimentID auswaehlen und ID an Webfrontend uebergeben for c in clusterHasClient: clientImageMap = [] j = 1 clusterCenter = radian(radiusCluster, i / numCluster) self.create_line(winHeight / 2, winHeight / 2, clusterCenter) xy = center(clusterCenter[0], clusterCenter[1], radiusClusterDiagram) numClients = float(len(clusterHasClient[c])) for cID in clusterHasClient[c]: value = (2 * i - 1) / (2 * numCluster) + j / (numCluster * numClients) clientCenter = radian( radiusClient, value - 1 / (2 * numCluster * numClients)) self.create_line(clusterCenter, clientCenter) rD = ((radiusClient * 3.14) / (numClients * numCluster)) - 1 if rD < maxRadiusDiagram: radiusDiagram = rD else: radiusDiagram = maxRadiusDiagram cxy = center(clientCenter[0], clientCenter[1], radiusDiagram) clientImageMap.append({ "id": cID, "position": clientCenter, "radius": radiusDiagram, "exp": clientIsInExperimentID[cID] }) if timestampdiff[cID] <= 10: self.circle(cxy, 'active') elif timestampdiff[cID] > 10: self.circle(cxy, 'passive') expText = clientIsInExperimentName[cID] self.create_text(clientCenter) j = j + 1 clusterImageMap.append({ "id": str(c), "position": clusterCenter, "clients": clientImageMap }) location = gridQueue[c] self.circleCluster(xy, location, radiusClusterDiagram) i = i + 1 #draws the db point db_xy = center(winHeight / 2, winHeight / 2, radiusDBDiagram) self.circleCluster(db_xy, database, radiusDBDiagram) imageMap['cluster'] = clusterImageMap
def is_stuck(self, mins=10): diff = func.timestampdiff(text('minute'), self.updated_at, func.utc_timestamp()) return (self.updated_at == None) | (diff > mins)
class User(db.Model): __tablename__ = 'users' id = db.Column(db.Integer, primary_key=True, nullable=False) software_id = db.Column(db.Integer, db.ForeignKey('softwares.id'), nullable=False) user_name = db.Column(db.String(255), unique=True, nullable=False) password = db.Column(db.String(255), nullable=False) #md5 password_question = db.Column(db.String(255), nullable=False) password_answer = db.Column(db.String(255), nullable=False) qq = db.Column(db.String(255), nullable=False) email = db.Column(db.String(255), nullable=False) phone = db.Column(db.String(255), nullable=False) version = db.Column(db.String(255), nullable=False) serial_no = db.Column(db.String(255), nullable=False) token = db.Column(db.String(255), nullable=False) client_count = db.Column(db.Integer, nullable=False) is_bind = db.Column(db.Boolean, nullable=False) remark = db.Column(db.String(255), nullable=False) unbind_date = db.Column(db.DateTime, nullable=False) terminate_date = db.Column(db.DateTime, nullable=False) is_enable = db.Column(db.Boolean, nullable=False) update_time = db.Column(db.DateTime, nullable=False) create_time = db.Column(db.DateTime, nullable=False) recharge_logs = db.relationship('RechargeLog', lazy='dynamic', backref='user') # clients = db.relationship('Client', backref='user') #不会触发online_client_count(cls) # clients = db.relationship('Client', lazy='dynamic', primaryjoin=and_(Client.user_id == id, func.timestampdiff(text('second'), Client.update_time, func.now()) < app.config['HEART_SECONDS']), backref='user') #all_client_count(self) -> client_count用len不能用lazy load clients = db.relationship( 'Client', primaryjoin=and_( Client.user_id == id, func.timestampdiff(text('second'), Client.update_time, func.now()) < app.config['HEART_SECONDS']), backref='user') def __repr__(self): return '<User %r>' % self.id @hybrid_property def is_enable_text(self): if self.is_enable: return "是" else: return "否" @hybrid_property def all_client_count(self): return len(self.clients) @hybrid_property def online_client_count(self): count = 0 for client in self.clients: if (datetime.now() - client.update_time ).total_seconds() < app.config['HEART_SECONDS']: count = count + 1 return count @online_client_count.expression def online_client_count(cls): # return (select([func.count(Client.id)]).where(Client.user_id == cls.id)) return (select([func.count(Client.id)]).where( and_( Client.user_id == cls.id, func.timestampdiff(text('second'), Client.update_time, func.now()) < app.config['HEART_SECONDS'])))
def telemetry(age=0, sort_key='downloads', sort_direction='up'): """ Show firmware component information """ # only Analyst users can view this data if not g.user.check_acl('@view-analytics'): return _error_permission_denied( 'Unable to view telemetry as not Analyst') # get data total_downloads = 0 total_success = 0 total_failed = 0 total_issue = 0 show_duplicate_warning = False fwlines = [] age_seconds = age * 60 * 60 * 24 for fw in db.session.query(Firmware).options(joinedload('reports')).all(): # not allowed to view if not g.user.check_acl( '@admin') and fw.vendor.group_id != g.user.vendor.group_id: continue if len(fw.mds) == 0: continue if not fw.remote.is_public: continue # reports if age == 0: cnt_download = fw.download_cnt rpts = fw.reports else: cnt_download = _execute_count_star(db.session.query(Client).\ filter(Client.firmware_id == fw.firmware_id).\ filter(func.timestampdiff(text('DAY'), Client.timestamp, func.current_timestamp()) < age)) rpts = [] for rpt in fw.reports: if (datetime.datetime.now() - rpt.timestamp).total_seconds() < age_seconds: rpts.append(rpt) cnt_success = 0 cnt_failed = 0 cnt_issue = 0 for rpt in rpts: if rpt.state == 2: cnt_success += 1 if rpt.state == 3: if rpt.issue_id: cnt_issue += 1 else: cnt_failed += 1 total_success += cnt_success total_failed += cnt_failed total_issue += cnt_issue total_downloads += cnt_download # add lines res = {} res['downloads'] = cnt_download res['success'] = cnt_success res['failed'] = cnt_failed res['issue'] = cnt_issue res['names'] = _get_split_names_for_firmware(fw) res['version'] = fw.version_display if not res['version']: res['version'] = fw.mds[0].version res['nameversion'] = res['names'][0] + ' ' + res['version'] res['firmware_id'] = fw.firmware_id res['target'] = fw.remote.name res['duplicate'] = len(fw.mds) fwlines.append(res) # show the user a warning if len(fw.mds) > 1: show_duplicate_warning = True if sort_direction == 'down': fwlines.sort(key=lambda x: x['downloads']) fwlines.sort(key=lambda x: x[sort_key]) else: fwlines.sort(key=lambda x: x['downloads'], reverse=True) fwlines.sort(key=lambda x: x[sort_key], reverse=True) return render_template('telemetry.html', age=age, sort_key=sort_key, sort_direction=sort_direction, firmware=fwlines, group_id=g.user.vendor.group_id, show_duplicate_warning=show_duplicate_warning, total_failed=total_failed, total_issue=total_issue, total_downloads=total_downloads, total_success=total_success)
def __init__(self, database, expID, **config): self.config(height=winHeight, width=winWidth) db = models.get_database(database) or abort(404) maxRadiusDiagram = 50 radiusClusterDiagram = 45 radiusDBDiagram = 40 radiusClient = winHeight / 2 - maxRadiusDiagram radiusCluster = radiusClient / 2 clusterImageMap = [] i = 1 #Dictionary which store all clients for each experiment clientIsInExperimentName = {} clientIsInExperimentID = {} clusterHasClient = {} cluster = [] for exp in expID: eClient = [] expName = db.session.query(db.Experiment.name).filter(db.Experiment.idExperiment == exp).first() for e in db.session.query(db.Experiment_has_Client.Client_idClient).filter( db.Experiment_has_Client.Experiment_idExperiment == exp): eClient.append(e[0]) clientIsInExperimentName[int(e[0])] = expName[0] clientIsInExperimentID[int(e[0])] = exp #Dictionary which store all clients for each grid for c in db.session.query(db.Client.gridQueue_idgridQueue).filter( db.Client.idClient == db.Experiment_has_Client.Client_idClient): if int(c[0]) not in cluster: cluster.append(int(c[0])) for clu in cluster: cClient = [] for c in db.session.query(db.Client.idClient).filter(db.Client.gridQueue_idgridQueue == clu): cClient.append(int(c[0])) clusterHasClient[clu] = cClient #Dictionary which store the locationname for each grid gridQueue = dict((g.idgridQueue, g.location or g.name) for g in db.session.query(db.GridQueue).all()) #returns a tuple out of ClientID, Timedifference in seconds timestampdiff = dict(db.session.query(db.Client.idClient, func.timestampdiff(text("SECOND"), db.Client.lastReport, func.now())).all()) #double loop which draws the diagramm if len(clusterHasClient) == 1: numCluster = 2.0 else: numCluster = float(len(clusterHasClient)) #TODO: nach ExperimentID auswaehlen und ID an Webfrontend uebergeben for c in clusterHasClient: clientImageMap = [] j = 1 clusterCenter = radian(radiusCluster, i / numCluster) self.create_line(winHeight / 2, winHeight / 2, clusterCenter) xy = center(clusterCenter[0], clusterCenter[1], radiusClusterDiagram) numClients = float(len(clusterHasClient[c])) for cID in clusterHasClient[c]: value = (2 * i - 1) / (2 * numCluster) + j / (numCluster * numClients) clientCenter = radian(radiusClient, value - 1 / (2 * numCluster * numClients)) self.create_line(clusterCenter, clientCenter) rD = ((radiusClient * 3.14) / (numClients * numCluster)) - 1 if rD < maxRadiusDiagram: radiusDiagram = rD else: radiusDiagram = maxRadiusDiagram cxy = center(clientCenter[0], clientCenter[1], radiusDiagram) clientImageMap.append( {"id": cID, "position": clientCenter, "radius": radiusDiagram, "exp": clientIsInExperimentID[cID]}) if timestampdiff[cID] <= 10: self.circle(cxy, 'active') elif timestampdiff[cID] > 10: self.circle(cxy, 'passive') expText = clientIsInExperimentName[cID] self.create_text(clientCenter) j = j + 1 clusterImageMap.append({"id": str(c), "position": clusterCenter, "clients": clientImageMap}) location = gridQueue[c] self.circleCluster(xy, location, radiusClusterDiagram) i = i + 1 #draws the db point db_xy = center(winHeight / 2, winHeight / 2, radiusDBDiagram) self.circleCluster(db_xy, database, radiusDBDiagram) imageMap['cluster'] = clusterImageMap
def _getCloudwatchMetricReadinessPredicate(conn): """ Generate an sqlAlchemy predicate that determines whether the metric is ready for data collection. :returns: sqlAlchemy predicate for use in `where` clause """ # NOTE: the time arithmetic must be coordinated with # grok.app.aws.cloudwatch_utils.getMetricCollectionBackoffSeconds() # NOTE: the time difference logic here is a heuristic fine-tuned for # cloudwatch-based metrics as follows: # * Cloudwatch metrics aggregated over a specific period (e.g., 5 minutes) # appear to be arranged in contiguous time buckets each with a specific # start and end time; we don't have visibility into the actual start time # of any bucket, which appears to depend on when the cloudwatch metric was # created. # * In the higher-level logic, the first time we query a metric, we pick the # metric's virtual starting time based on a 14-day backoff from current # time (which is not the actual metric time bucket's start time) and # subsequently add the metric's period to arrive at the next metric # value's virtual start time, and so on. # * Based on experiments with 5-minute-aggregated metrics, it appears that a # metric's aggregated value becomes availabe one period after the true end # of the metric value's time bucket. If you don't wait long enough, you # either don't get any value from cloudwatch (which is a wasted slow call # that contributes to API throttling) or you might get a non-final # value. # * Since we don't know the true start time of the time bucket, we # compensate for it: first, we add the metric period to the virtual start # time, which should get us at least to the end of the true time bucket; # then we add another period to get us at least to the point in time where # the time-bucket's data becomes available, and finally add a fudge value # (60 seconds at time of this writing) for the metric value to stabilize return ( (schema.metric.c.last_timestamp == None) | (func.timestampdiff(text("SECOND"), schema.metric.c.last_timestamp, func.utc_timestamp()) >= (schema.metric.c.poll_interval + (schema.metric.c.poll_interval + 60))))