def describe_alarms_for_metric(self, project_id, namespace, metric_name, dimensions=None, period=None, statistic=None, unit=None): metric_key = self.get_metric_key(project_id, namespace, metric_name, dimensions) if not metric_key: raise exception.InvalidParameterValue("no metric") expr_list = [create_index_expression("metric_key", metric_key)] if period: expr = create_index_expression("period", int(period)) expr_list.append(expr) if statistic: expr = create_index_expression("statistic", statistic) expr_list.append(expr) if unit: expr = create_index_expression("unit", unit) expr_list.append(expr) LOG.info("expr %s" % expr_list) index_clause = pycassa.create_index_clause(expr_list) items = self.cf_metric_alarm.get_indexed_slices(index_clause) return items
def getEnv(self, key, iter=0, first=False): val = self.base.getModel(key) print key, val obj = None obj_list = [] parent_tf = tf.identity_matrix() if self.base.typeOf(key) == self.base.ROBOTS.ident: obj = self._getRobot(key) obj_list.append(obj) parent_tf = obj.GetTransform() elif self.base.typeOf(key) == self.base.OBJECTS.ident: obj = self._getObject(key) obj_list.append(obj) parent_tf = obj.GetTransform() elif self.base.typeOf(key) == self.base.LOCATIONS.ident: obj = self._getLocation(key) obj_list.append(obj) parent_tf = obj.GetTransform() elif self.base.typeOf(key) == self.base.SENSORS.ident: obj = self._getSensor(key) obj_list.append(obj) parent_tf = obj.GetTransform() else: if val.has_key("translation"): translation = map(lambda x: float(x), val["translation"].split(" ")) parent_tf = tf.concatenate_matrices(parent_tf, tf.compose_matrix(translate = translation)) if val.has_key("quat"): quat = map(lambda x: float(x), val["quat"].split(" ")) rot = rave.axisAngleFromQuat(quat) m2 = tf.compose_matrix(angles = rot) parent_tf = tf.concatenate_matrices(parent_tf, m2) if first: parent_tf = tf.identity_matrix() if obj != None: obj.SetTransform(parent_tf) if iter==0: return obj_list # search for ancestors child_expr = pycassa.create_index_expression('base', key) clild_clause = pycassa.create_index_clause([child_expr]) for child_key, _ in self.base.col.get_indexed_slices(clild_clause): child_obj = self.getEnv(child_key, iter-1) for obj in child_obj: if type(obj) != type(None): obj.SetTransform(tf.concatenate_matrices(parent_tf, obj.GetTransform())) obj_list.append(obj) return obj_list
def retrieve_date(start_time, end_time, groupby, generate, **kwargs): """start_time and end_time should be like this '2010-01-01' The available kwargs can be: "JobGroup","FinalMajorStatus","User","JobType","JobClass","ProcessingType", "UserGroup","FinalMinorStatus","Site"... """ start_timestamp = int(time.mktime(time.strptime(start_time, "%Y-%m-%d"))) end_timestamp = int(time.mktime(time.strptime(end_time, "%Y-%m-%d"))) # print start_timestamp # print end_timestamp expr_list = [] bucketLength_expr = pycassa.create_index_expression("bucketLength", 604800) expr_list.append(bucketLength_expr) start_expr = pycassa.create_index_expression("startTime", start_timestamp, pycassa.GTE) expr_list.append(start_expr) end_expr = pycassa.create_index_expression("startTime", end_timestamp, pycassa.LTE) expr_list.append(end_expr) if len(kwargs) != 0: for key, value in eval_prefs(**kwargs).items(): kwarg_expr = pycassa.create_index_expression(key, value) expr_list.append(kwarg_expr) clause = pycassa.create_index_clause(expr_list, count=600000) data_dict = Counter() """ for key,columns in cf.get_indexed_slices(clause): #print columns[groupby],columns[generate],columns["Site"],columns["JobClass"],columns["startTime"] data_dict[columns[groupby]] +=columns[generate] return data_dict """ return cf.get_indexed_slices(clause)
def getRandom(deck_id): if not isinstance(deck_id, uuid.UUID): deck_id = uuid.UUID(deck_id) deck_expr = pycassa.create_index_expression('deck', deck_id) rand_expr = pycassa.create_index_expression('order', random.randrange(1, 4)) clause = pycassa.create_index_clause([deck_expr, rand_expr], count=1) ans = list(PhraseCard.table.get_indexed_slices(clause))[0] return PhraseCard.fromCassa(ans)
def getByEmail(email): expr = pycassa.create_index_expression('email', email) clause = pycassa.create_index_clause([expr], count=1) ans = list(User.table.get_indexed_slices(clause)) if len(ans) == 0: return None return User.fromCassa(ans[0])
def getByUsername(username): expr = pycassa.create_index_expression('username', username) clause = pycassa.create_index_clause([expr], count=1) ans = list(User.table.get_indexed_slices(clause)) if len(ans) == 0: return None return User.fromCassa(ans[0])
def retrieve_pie_date(start_time,finish_time,generate="unknow",groupby="unknow",**kwargs): '''start_time and end_time should be like this '2010-01-01' The available kwargs can be: "JobGroup","FinalMajorStatus","User","JobType","JobClass","ProcessingType", "UserGroup","FinalMinorStatus","Site"... ''' if((generate=="unknow") or (groupby=="unknow")): print "error" return -1 start_timestamp = int(time.mktime(time.strptime(start_time,"%Y-%m-%d"))) finsih_timestamp = int(time.mktime(time.strptime(finish_time,"%Y-%m-%d"))) if len(kwargs)==0: print "no kwargs" #return 0 groupby = groupby.lower() cf_name = "new_cum_groupby_%s"%groupby cf = pycassa.ColumnFamily(pool,cf_name) groupby_list = [] sum_generate_list = [] sum_generate = 0 start = time.time() for key,columns in cf.get_range(column_count=365000,column_start=(generate,start_time),column_finish=(generate,finish_time)): groupby_list.append(key) for value in columns.values(): sum_generate +=(value) sum_generate_list.append(sum_generate) sum_generate = 0 pairs = zip(groupby_list,sum_generate_list) else: print "has kwargs" #return 0 cf = pycassa.ColumnFamily(pool,'bucket_data_cli') expr_list = [] for key,value in eval_prefs(**kwargs).items(): kwarg_expr = pycassa.create_index_expression(key,value) expr_list.append(kwarg_expr) bucketLength_expr = pycassa.create_index_expression("bucketLength",604800) expr_list.append(bucketLength_expr) start_expr = pycassa.create_index_expression("startTime",start_timestamp,pycassa.GTE) expr_list.append(start_expr) end_expr = pycassa.create_index_expression("startTime",finsih_timestamp,pycassa.LTE) expr_list.append(end_expr) #print len(expr_list) #return 0 clause = pycassa.create_index_clause(expr_list,count=600000) data_dict = Counter() for key,columns in cf.get_indexed_slices(clause): data_dict[columns[groupby]] +=columns[generate] pairs = data_dict.items() return pairs
def load_alarms(self, metric_key): expr_list = [ pycassa.create_index_expression("metric_key", metric_key), ] index_clause = pycassa.create_index_clause(expr_list) try: items = self.cf_metric_alarm.get_indexed_slices(index_clause) except pycassa.NotFoundException: items = {} return items
def get_metric_alarm_key(self, project_id, alarm_name): expr_list = [ pycassa.create_index_expression("project_id", project_id), pycassa.create_index_expression("alarm_name", alarm_name) ] index_clause = pycassa.create_index_clause(expr_list) items = self.cf_metric_alarm.get_indexed_slices(index_clause) for k, v in items: return k return None
def _describe_alarms_by_names(self, project_id, alarm_names): for alarm_name in alarm_names: expr_list = [ pycassa.create_index_expression("project_id", project_id), pycassa.create_index_expression("alarm_name", alarm_name) ] index_clause = pycassa.create_index_clause(expr_list) items = self.cf_metric_alarm.get_indexed_slices(index_clause) for k, v in items: yield k, v
def filterByRound(round_id): ''' Gets the nomination by the round. ''' if not isinstance(round_id, uuid.UUID): round_id = uuid.UUID(round_id) expr = pycassa.create_index_expression('round_id', round_id) clause = pycassa.create_index_clause([expr]) ans = list(Nomination.table.get_indexed_slices(clause)) return [Nomination.fromCassa(cassRep) for cassRep in ans]
def filterByUser(user_id): ''' Gets the party members by party. ''' if not isinstance(user_id, uuid.UUID): user_id = uuid.UUID(user_id) expr = pycassa.create_index_expression('user_id', user_id) clause = pycassa.create_index_clause([expr]) ans = list(GameMember.table.get_indexed_slices(clause)) return [GameMember.fromCassa(cassRep) for cassRep in ans]
def load_alarms(self, metric_key): expr_list = [ pycassa.create_index_expression("metric_key", metric_key), ] index_clause = pycassa.create_index_clause(expr_list) try: items = self.cf_metric_alarm.get_indexed_slices(index_clause) except pycassa.NotFoundException: LOG.debug("no alarm found") items = {} return items
def delete_metric(self, key): try: expr_list = [create_index_expression("metric_key", key)] index_clause = pycassa.create_index_clause(expr_list) items = self.cf_metric_alarm.get_indexed_slices(index_clause) for k, v in items: project_id = v.get('project_id') self.delete_metric_alarm(k, project_id) self.scf_stat_archive.remove(key) self.cf_metric.remove(key) LOG.debug("metric is deleted(%s)" % str(key)) except pycassa.NotFoundException: LOG.error("failed to delete metric(%s)" % str(key))
def describe_alarm_history(self, project_id, alarm_name=None, end_date=None, history_item_type=None, max_records=100, next_token=None, start_date=None): """ params: project_id: string alarm_name: string end_date: datetime history_item_type: string (ConfigurationUpdate | StateUpdate | Action) max_records: integer next_token: string (uuid type) start_date: datetime """ next_token = uuid.UUID(next_token) if next_token else '' expr_list = [ pycassa.create_index_expression("project_id", project_id), ] if alarm_name: expr = create_index_expression("alarm_name", alarm_name) expr_list.append(expr) if end_date: expr = create_index_expression("timestamp", end_date, LTE) expr_list.append(expr) if start_date: expr = create_index_expression("timestamp", start_date, GTE) expr_list.append(expr) if history_item_type: expr = create_index_expression("history_item_type", history_item_type) expr_list.append(expr) index_clause = pycassa.create_index_clause(expr_list=expr_list, start_key=next_token, count=max_records) items = self.cf_alarm_history.get_indexed_slices(index_clause) return items
def get_metric_key(self, project_id, namespace, metric_name, dimensions): dimensions = pack_dimensions(dimensions) expr_list = [ pycassa.create_index_expression("project_id", project_id), pycassa.create_index_expression("name", metric_name), pycassa.create_index_expression("namespace", namespace), pycassa.create_index_expression("dimensions", dimensions) ] index_clause = pycassa.create_index_clause(expr_list) items = self.cf_metric.get_indexed_slices(index_clause) for k, v in items: return k return None
def describe_alarms(self, project_id, action_prefix=None, alarm_name_prefix=None, alarm_names=None, max_records=100, next_token=None, state_value=None): """ params: project_id: string action_prefix: TODO: not implemented yet. alarm_name_prefix: string alarm_names: string list max_records: integer next_token: string (uuid type) state_value: string (OK | ALARM | INSUFFICIENT_DATA) """ if alarm_names: return self._describe_alarms_by_names(project_id, alarm_names) next_token = uuid.UUID(next_token) if next_token else '' expr_list = [] prj_expr = create_index_expression("project_id", project_id) expr_list.append(prj_expr) if alarm_name_prefix: expr_s = create_index_expression("alarm_name", alarm_name_prefix, GTE) expr_e = create_index_expression( "alarm_name", utils.prefix_end(alarm_name_prefix), LT) expr_list.append(expr_s) expr_list.append(expr_e) if state_value: expr = create_index_expression("state_value", state_value) expr_list.append(expr) LOG.info("expr %s" % expr_list) index_clause = create_index_clause(expr_list=expr_list, start_key=next_token, count=max_records) items = self.cf_metric_alarm.get_indexed_slices(index_clause) return items
def get_metric_key(self, project_id, namespace, metric_name, dimensions): dimensions = utils.pack_dimensions(dimensions) expr_list = [ pycassa.create_index_expression("project_id", project_id), pycassa.create_index_expression("name", metric_name), pycassa.create_index_expression("namespace", namespace), pycassa.create_index_expression("dimensions", dimensions) ] index_clause = pycassa.create_index_clause(expr_list) items = self.cf_metric.get_indexed_slices(index_clause) for k, v in items: return k else: return None
def describe_alarms(self, project_id, action_prefix=None, alarm_name_prefix=None, alarm_names=None, max_records=100, next_token=None, state_value=None): """ params: project_id: string action_prefix: TODO: not implemented yet. alarm_name_prefix: string alarm_names: string list max_records: integer next_token: string (uuid type) state_value: string (OK | ALARM | INSUFFICIENT_DATA) """ if alarm_names: return self._describe_alarms_by_names(project_id, alarm_names) next_token = uuid.UUID(next_token) if next_token else '' expr_list = [] prj_expr = create_index_expression("project_id", project_id) expr_list.append(prj_expr) if alarm_name_prefix: expr_s = create_index_expression("alarm_name", alarm_name_prefix, GTE) expr_e = create_index_expression("alarm_name", utils.prefix_end(alarm_name_prefix), LT) expr_list.append(expr_s) expr_list.append(expr_e) if state_value: expr = create_index_expression("state_value", state_value) expr_list.append(expr) LOG.info("expr %s" % expr_list) index_clause = create_index_clause(expr_list=expr_list, start_key=next_token, count=max_records) items = self.cf_metric_alarm.get_indexed_slices(index_clause) return items
def list_metrics(self, project_id, namespace=None, metric_name=None, dimensions=None, next_token=""): def to_dict(v): return {'project_id': v['project_id'], 'dimensions': json.loads(v['dimensions']), 'name': v['name'], 'namespace': v['namespace']} def check_dimension(item): if isinstance(dimensions, dict): def to_set(d): return set(d.items()) l_set = to_set(dimensions) r_set = to_set(json.loads(item['dimensions'])) return l_set.issubset(r_set) return True next_token = uuid.UUID(next_token) if next_token else '' expr_list = [pycassa.create_index_expression("project_id", project_id), ] if namespace: expr = pycassa.create_index_expression("namespace", namespace) expr_list.append(expr) if metric_name: expr = pycassa.create_index_expression("name", metric_name) expr_list.append(expr) if dimensions: packed_dimensions = pack_dimensions(dimensions) expr = pycassa.create_index_expression("dimensions", packed_dimensions) expr_list.append(expr) index_clause = pycassa.create_index_clause(expr_list, start_key=next_token, count=501) items = self.cf_metric.get_indexed_slices(index_clause) metrics = ((k, to_dict(v)) for k, v in items) return metrics
def get_by_symbol(self, symbol): sym_expr = pycassa.create_index_expression("symbol", symbol) clause = pycassa.create_index_clause([sym_expr]) result = self.STOCKS.get_indexed_slices(clause) return result
def _list_metrics(self, project_id, namespace=None, metric_name=None, filters=None, next_token=""): def to_dict(v): return { 'project_id': v['project_id'], 'dimensions': json.loads(v['dimensions']), 'name': v['name'], 'namespace': v['namespace'] } def apply_filter(metric, filters): if not filters: return True dimensions = metric.get('dimensions') dimensions = json.loads(dimensions) if dimensions else {} full_filter, name_filter, value_filter = filters if full_filter: if not set(full_filter).issubset(set(dimensions.items())): return False if name_filter: if set(dimensions.keys()) != set(name_filter): return False if value_filter: for v_in_dim in dimensions.values(): for v in value_filter: if v in utils.utf8(v_in_dim): return True return False return True next_token = uuid.UUID(next_token) if next_token else '' new_next_token = None expr_list = [ pycassa.create_index_expression("project_id", project_id), ] if namespace: expr = pycassa.create_index_expression("namespace", namespace) expr_list.append(expr) if metric_name: expr = pycassa.create_index_expression("name", metric_name) expr_list.append(expr) index_clause = pycassa.create_index_clause(expr_list, count=501, start_key=next_token) items = self.cf_metric.get_indexed_slices(index_clause, column_count=100) last_token = None metrics = [] for key, value in items: new_next_token = key if value and apply_filter(value, filters): last_token = key metrics.append((key, to_dict(value))) skip_first = last_token and last_token == new_next_token LOG.info("%s %s %s", next_token, new_next_token, last_token) new_next_token = str(new_next_token) if new_next_token \ else new_next_token return metrics, new_next_token, skip_first
def generate_linegraph(start_time="",end_time="",generate="unknow",groupby="unknow",cumulative=False,**kwargs): '''generate:CPUTime,DiskSpace,ExecTime,InputSandBoxSize,OutPutSandBoxSize,JobCount groupby:site,user,processingtype,country,grid dafaule cumulative=Falese,if you want to generate a cumulative graph,set it True ''' if((generate=="unknow") or(groupby=="unknow")): print "error" return -1 start_timestamp = int(time.mktime(time.strptime(start_time,"%Y-%m-%d"))) end_timestamp = int(time.mktime(time.strptime(end_time,"%Y-%m-%d"))) if len(kwargs)==0: cf = pycassa.ColumnFamily(pool,'bucket_data_cli') expr_list = [] bucketLength_expr = pycassa.create_index_expression("bucketLength",604800) expr_list.append(bucketLength_expr) start_expr = pycassa.create_index_expression("startTime",start_timestamp,pycassa.GTE) expr_list.append(start_expr) end_expr = pycassa.create_index_expression("startTime",end_timestamp,pycassa.LTE) expr_list.append(end_expr) for key,value in eval_prefs(**kwargs).items(): kwarg_expr = pycassa.create_index_expression(key,value) expr_list.append(kwarg_expr) clause = pycassa.create_index_clause(expr_list,count=600000) start = time.time() data_dict = {} for key,columns in cf.get_indexed_slices(clause): data_dict.setdefault(columns[groupby],Counter()) data_dict[columns[groupby]][columns["startTime"]] += columns[generate] #print data_dict new_data_dict = {} for key,values in data_dict.items(): new_data_dict.setdefault(key,[]) new_data_dict[key] = sorted(data_dict[key].items(),key=lambda k:k[0]) #print new_data_dict #return 0 if cumulative: print "True" for key,values in new_data_dict.items(): cum_value = make_cumulative_data(zip(*values)[1]) new_data_dict[key] = zip(zip(*values)[0],cum_value) #print new_data_dict #return 0 groupby_list = [] time_list = [] value_list = [] fig = pylab.figure() ax = pylab.axes() zorder = 0.0 y_max = 0.0 y_max_list = [] for key,values in new_data_dict.items(): groupby_list.append(key) color = generate_color(str(key)) time_list,value_list = zip(*values) x_min = time_list[0] x_max = time_list[-1] y_tmp_max = max(value_list) if y_max<y_tmp_max: y_max = y_tmp_max y_max_list.append(y_max) pairs = [(x_min,0)]+values+[(x_max,0)] poly = pylab.Polygon(pairs,fill=True,facecolor=color,linewidth=.2,zorder=-y_max) ax.add_patch(poly) #zorder -=0.1 #stamp_start_time = int(time.mktime(time.strptime(start_time,'%Y-%m-%d'))) #stamp_end_time = int(time.mktime(time.strptime(end_time,'%Y-%m-%d'))) ax.set_xlim(start_timestamp,end_timestamp+1) ax.set_ylim(0,max(y_max_list)+1) ax.set_xticklabels([time.strftime('%y-%m-%d',time.localtime(x_time)) for x_time in ax.get_xticks()]) #set the legend fontP = FontProperties() #set legend size fontP.set_size('xx-small') ax.legend(groupby_list,loc=0,bbox_to_anchor=(1,1.05),prop = fontP) pylab.title('%s groupby %s from %s to %s'%(generate,groupby,start_time,end_time)) end = time.time() pylab.xlabel('Processing time is: %.5ss'%(end-start)) pylab.savefig('linegraph') imgData = cStringIO.StringIO() pylab.savefig(imgData, format='png') imgData.seek(0) pylab.close() return imgData else: print 'no kwargs' groupby = groupby.lower() cf_name = "new_cum_groupby_%s"%groupby cf = pycassa.ColumnFamily(pool,cf_name) groupby_list = [] time_list = [] value_list = [] y_max_list = [] fig = pylab.figure() ax = pylab.axes() start = time.time() zorder = 0.0 y_max = 0.0 for key,columns in cf.get_range(column_count=365000,column_start=(generate,start_time),column_finish=(generate,end_time)): #print key,columns #''' groupby_list.append(key) color = generate_color(str(key)) for name,value in columns.items(): #time_list.append(int(time.mktime(time.strptime(name[1],'%Y-%m-%d'))/86400)) time_list.append(int(time.mktime(time.strptime(name[1],'%Y-%m-%d')))) value_list.append(value) x_min = time_list[0] x_max = time_list[-1] if cumulative: value_list = make_cumulative_data(value_list) y_tmp_max = max(value_list) if y_max<y_tmp_max: y_max=y_tmp_max pairs = zip(time_list,value_list) pairs = [(x_min,0)]+pairs+[(x_max,0)] #print pairs time_list = [] #clean time_list value_list = [] #clean value_list poly = pylab.Polygon(pairs,fill=True,facecolor=color,linewidth=.2,zorder=zorder) ax.add_patch(poly) zorder -=0.1 ax.set_xlim(start_timestamp,end_timestamp+1) ax.set_ylim(0,y_max+1) ax.set_xticklabels([time.strftime('%y-%m-%d',time.localtime(int(day))) for day in ax.get_xticks()]) #set the legend fontP = FontProperties() fontP.set_size('xx-small') ax.legend(groupby_list,loc=0,bbox_to_anchor=(1,1.05),prop = fontP) pylab.title('%s groupby %s from %s to %s'%(generate,groupby,start_time,end_time)) end = time.time() pylab.xlabel('Processing time is: %.5ss'%(end-start)) #day_finish_time = int(time.mktime(time.strptime(finish_time,'%Y-%m-%d'))/86400) #print 'pricessing time is:%s'%(end-start) pylab.savefig('linegraph') imgData = cStringIO.StringIO() pylab.savefig(imgData, format='png') imgData.seek(0) pylab.close() return imgData
def create_index_clause(conditions): expressions = [] for condition in conditions: expression = pycassa.create_index_expression(condition[0], condition[1], op=condition[2]) expressions.append(expression) return pycassa.create_index_clause(expressions)
def _list_metrics(self, project_id, namespace=None, metric_name=None, filters=None, next_token=""): def to_dict(v): return {'project_id': v['project_id'], 'dimensions': json.loads(v['dimensions']), 'name': v['name'], 'namespace': v['namespace']} def apply_filter(metric, filters): if not filters: return True dimensions = metric.get('dimensions') dimensions = json.loads(dimensions) if dimensions else {} full_filter, name_filter, value_filter = filters if full_filter: if not set(full_filter).issubset(set(dimensions.items())): return False if name_filter: if set(dimensions.keys()) != set(name_filter): return False if value_filter: for v_in_dim in dimensions.values(): for v in value_filter: if v in utils.utf8(v_in_dim): return True return False return True next_token = uuid.UUID(next_token) if next_token else '' new_next_token = None expr_list = [pycassa.create_index_expression("project_id", project_id), ] if namespace: expr = pycassa.create_index_expression("namespace", namespace) expr_list.append(expr) if metric_name: expr = pycassa.create_index_expression("name", metric_name) expr_list.append(expr) index_clause = pycassa.create_index_clause(expr_list, count=501, start_key=next_token) items = self.cf_metric.get_indexed_slices(index_clause, column_count=100) last_token = None metrics = [] for key, value in items: new_next_token = key if value and apply_filter(value, filters): last_token = key metrics.append((key, to_dict(value))) skip_first = last_token and last_token == new_next_token LOG.info("%s %s %s", next_token, new_next_token, last_token) new_next_token = str(new_next_token) if new_next_token \ else new_next_token return metrics, new_next_token, skip_first