예제 #1
0
    def run(self):
        if self.parse_args() != 0:
            return

        if len(self._args.select)==0 and self._args.dtable is None: 
            tab_url = "http://" + self._args.analytics_api_ip + ":" +\
                self._args.analytics_api_port +\
                "/analytics/table/StatTable." + self._args.table
            schematxt = OpServerUtils.get_url_http(tab_url + "/schema",
                self._args.admin_user, self._args.admin_password)
            schema = json.loads(schematxt.text)['columns']
            for pp in schema:
                if pp.has_key('suffixes') and pp['suffixes']:
                    des = "%s %s" % (pp['name'],str(pp['suffixes']))
                else:
                    des = "%s" % pp['name']
                if pp['index']:
                    valuetxt = OpServerUtils.get_url_http(
                        tab_url + "/column-values/" + pp['name'],
                        self._args.admin_user, self._args.admin_password)
                    print "%s : %s %s" % (des,pp['datatype'], valuetxt.text)
                else:
                    print "%s : %s" % (des,pp['datatype'])
        else:
            result = self.query()
            self.display(result)
예제 #2
0
def main():
    querier = StatQuerier()
    if querier.parse_args() != 0:
        return


    if len(querier._args.select)==0 and querier._args.dtable is None: 
        tab_url = "http://" + querier._args.analytics_api_ip + ":" +\
            querier._args.analytics_api_port +\
            "/analytics/table/StatTable." + querier._args.table
        schematxt = OpServerUtils.get_url_http(tab_url + "/schema")
        schema = json.loads(schematxt.text)['columns']
        for pp in schema:
            if pp.has_key('suffixes') and pp['suffixes']:
                des = "%s %s" % (pp['name'],str(pp['suffixes']))
            else:
                des = "%s" % pp['name']
            if pp['index']:
                valuetxt = OpServerUtils.get_url_http(tab_url + "/column-values/" + pp['name'])
                print "%s : %s %s" % (des,pp['datatype'], valuetxt.text)
            else:
                print "%s : %s" % (des,pp['datatype'])
    else:
        result = querier.query()
        querier.display(result)
예제 #3
0
    def run(self):
        index = 0
        analytics_api_ip = self._defaults["analytics_api_ip"]
        analytics_api_port = self._defaults["analytics_api_port"]
        username = self._defaults["username"]
        password = self._defaults["password"]
        stat_table_list = [xx.stat_type + "." + xx.stat_attr for xx in VizConstants._STAT_TABLES]
        stat_schema_files = []
        for arg in sys.argv:
            index = index + 1
            if arg == "--analytics-api-ip":
                analytics_api_ip = sys.argv[index]
            elif arg == "--analytics-api-port":
                analytics_api_port = sys.argv[index]
            elif arg == "--admin-user":
                username = sys.argv[index]
            elif arg == "--admin-password":
                password = sys.argv[index]
        tab_url = "http://" + analytics_api_ip + ":" + analytics_api_port + "/analytics/tables"
        tables = OpServerUtils.get_url_http(tab_url, username, password)
        if tables != {}:
            table_list = json.loads(tables.text)
            for table in table_list:
                if table["type"] == "STAT":
                    table_name = ".".join(table["name"].split(".")[1:])
                    # append to stat_table_list only if not existing
                    if table_name not in stat_table_list:
                        stat_table_list.append(table_name)

        if self.parse_args(stat_table_list) != 0:
            return

        if len(self._args.select) == 0 and self._args.dtable is None:
            tab_url = (
                "http://"
                + self._args.analytics_api_ip
                + ":"
                + self._args.analytics_api_port
                + "/analytics/table/StatTable."
                + self._args.table
            )
            schematxt = OpServerUtils.get_url_http(
                tab_url + "/schema", self._args.admin_user, self._args.admin_password
            )
            schema = json.loads(schematxt.text)["columns"]
            for pp in schema:
                if pp.has_key("suffixes") and pp["suffixes"]:
                    des = "%s %s" % (pp["name"], str(pp["suffixes"]))
                else:
                    des = "%s" % pp["name"]
                if pp["index"]:
                    valuetxt = OpServerUtils.get_url_http(
                        tab_url + "/column-values/" + pp["name"], self._args.admin_user, self._args.admin_password
                    )
                    print "%s : %s %s" % (des, pp["datatype"], valuetxt.text)
                else:
                    print "%s : %s" % (des, pp["datatype"])
        else:
            result = self.query()
            self.display(result)
예제 #4
0
    def query(self):
        query_url = OpServerUtils.opserver_query_url(
            self._args.analytics_api_ip,
            self._args.analytics_api_port)

        if self._args.dtable is not None:
            rtable = self._args.dtable
        else:
            rtable = self._args.table
 
        query_dict = OpServerUtils.get_query_dict(
                "StatTable." + rtable, str(self._start_time), str(self._end_time),
                select_fields = self._args.select,
                where_clause = "AND".join(self._args.where),
                sort_fields = self._args.sort)
        
        print json.dumps(query_dict)
        resp = OpServerUtils.post_url_http(
            query_url, json.dumps(query_dict), sync = True)

        res = None
        if resp is not None:
            res = json.loads(resp)
            res = res['value']

        return res
예제 #5
0
    def get_pending_compaction_tasks(self, ip, port, user, password):
        """Collects pending compaction tasks from all db nodes
        Returns:
        A dictionary with db node name as key and pending compaction
        tasks in % as value
        """

        to_return = {}
        try:
            uve_url = "http://" + ip + ":" + str(port) + \
                "/analytics/uves/database-nodes?cfilt=" \
                "CassandraStatusData:cassandra_compaction_task"
            data = OpServerUtils.get_url_http(uve_url, user, password)
            node_dburls = json.loads(data.text)

            for node_dburl in node_dburls:
                # get pending compaction tasks for analytics in each
                # cassandra node
                db_uve_data = OpServerUtils.get_url_http(
                    node_dburl['href'], user, password)
                db_uve_state = json.loads(db_uve_data.text)
                pending_compaction_tasks = \
                    int(db_uve_state['CassandraStatusData']
                        ['cassandra_compaction_task']
                        ['pending_compaction_tasks'])
                to_return[node_dburl['name']] = pending_compaction_tasks

        except Exception as inst:
            self._logger.error("Exception: Could not retrieve pending"
                               " compaction tasks information %s" %
                               str(type(inst)))

        self._logger.info("pending compaction tasks :" + str(to_return))
        return to_return
    def post_query(self, table, start_time=None, end_time=None,
                   select_fields=None,
                   where_clause='',
                   sort_fields=None, sort=None, limit=None, filter=None, dir=None):
        res = None
        try:
            flows_url = OpServerUtils.opserver_query_url(
                self._ip, str(self._port))
            print flows_url
            query_dict = OpServerUtils.get_query_dict(
                table, start_time, end_time,
                select_fields,
                where_clause,
                sort_fields, sort, limit, filter, dir)

            print json.dumps(query_dict)
            res = []
            resp = OpServerUtils.post_url_http(
                flows_url, json.dumps(query_dict))
            if resp is not None:
                resp = json.loads(resp)
                qid = resp['href'].rsplit('/', 1)[1]
                result = OpServerUtils.get_query_result(
                    self._ip, str(self._port), qid)
                for item in result:
                    res.append(item)
        except Exception as e:
            print str(e)
        finally:
            return res
예제 #7
0
 def __init__(self, query_json, analytics_api_ip, analytics_api_port, user,
              password, logger):
     self.query_json = query_json
     self._analytics_api_ip = analytics_api_ip
     self._analytics_api_port = analytics_api_port
     self._user = user
     self._password = password
     self._logger = logger
     if self.query_json is not None:
         self._start_time = self.query_json['start_time']
         self._end_time = self.query_json['end_time']
         # If the start_time/end_time in the query is specified as
         # relative time, then the actual start_time/end_time for the
         # FlowRecordTable query and UFlowData query would be different.
         # Since the FlowRecordTable is queried first and the result of
         # which is used to query the UFlowData table, the result may
         # not be correct if the start_time/end_time is different for
         # FlowRecord and UFlowData queries. Therefore, convert the
         # relative start/end time to absolute time.
         if not str(self._start_time).isdigit():
             self._start_time = \
                 OpServerUtils.convert_to_utc_timestamp_usec(self._start_time)
         if not str(self._end_time).isdigit():
             self._end_time = \
                 OpServerUtils.convert_to_utc_timestamp_usec(self._end_time)
예제 #8
0
    def query(self):
        query_url = OpServerUtils.opserver_query_url(
            self._args.analytics_api_ip, self._args.analytics_api_port)

        if self._args.dtable is not None:
            rtable = self._args.dtable
        else:
            rtable = self._args.table

        query_dict = OpServerUtils.get_query_dict(
            "StatTable." + rtable,
            str(self._start_time),
            str(self._end_time),
            select_fields=self._args.select,
            where_clause="AND".join(self._args.where),
            sort_fields=self._args.sort)

        print json.dumps(query_dict)
        resp = OpServerUtils.post_url_http(query_url,
                                           json.dumps(query_dict),
                                           self._args.admin_user,
                                           self._args.admin_password,
                                           sync=True)

        res = None
        if resp is not None:
            res = json.loads(resp)
            res = res['value']

        return res
예제 #9
0
    def get_dbusage_info(self, ip, port, user, password):
        """Collects database usage information from all db nodes
        Returns:
        A dictionary with db node name as key and db usage in % as value
        """

        to_return = {}
        try:
            uve_url = "http://" + ip + ":" + str(port) + \
                "/analytics/uves/database-nodes?cfilt=DatabaseUsageInfo"
            data = OpServerUtils.get_url_http(uve_url, user, password)
            node_dburls = json.loads(data.text)

            for node_dburl in node_dburls:
                # calculate disk usage percentage for analytics in each
                # cassandra node
                db_uve_data = OpServerUtils.get_url_http(node_dburl['href'],
                    user, password)
                db_uve_state = json.loads(db_uve_data.text)
                db_usage_in_perc = (100*
                        float(db_uve_state['DatabaseUsageInfo']['database_usage'][0]['analytics_db_size_1k'])/
                        float(db_uve_state['DatabaseUsageInfo']['database_usage'][0]['disk_space_available_1k'] +
                        db_uve_state['DatabaseUsageInfo']['database_usage'][0]['disk_space_used_1k']))
                to_return[node_dburl['name']] = db_usage_in_perc
        except Exception as inst:
            self._logger.error(type(inst))     # the exception instance
            self._logger.error(inst.args)      # arguments stored in .args
            self._logger.error(inst)           # __str__ allows args to be printed directly
            self._logger.error("Could not retrieve db usage information")

        self._logger.info("db usage:" + str(to_return))
        return to_return
    def get_pending_compaction_tasks(self):
        """Collects pending compaction tasks from all db nodes
        Returns:
        A dictionary with db node name as key and pending compaction
        tasks in % as value
        """

        to_return = {}
        try:
            uve_url = "http://" + self._ip + ":" + str(self._admin_port) + \
                "/analytics/uves/database-nodes?cfilt=" \
                "CassandraStatusData:cassandra_compaction_task"
            data = OpServerUtils.get_url_http(uve_url, self._admin_user,
                    self._admin_password)
            node_dburls = json.loads(data.text)

            for node_dburl in node_dburls:
                # get pending compaction tasks for analytics in each
                # cassandra node
                db_uve_data = OpServerUtils.get_url_http(node_dburl['href'],
                    self._admin_user, self._admin_password)
                db_uve_state = json.loads(db_uve_data.text)
                pending_compaction_tasks = \
                    int(db_uve_state['CassandraStatusData']
                        ['cassandra_compaction_task']
                        ['pending_compaction_tasks'])
                to_return[node_dburl['name']] = pending_compaction_tasks

        except Exception as inst:
            self._logger.error("Exception: Could not retrieve pending"
                               " compaction tasks information %s" %
                               str(type(inst)))

        self._logger.info("pending compaction tasks :" + str(to_return))
        return to_return
예제 #11
0
    def run(self):
        if self.parse_args() != 0:
            return

        if len(self._args.select) == 0 and self._args.dtable is None:
            tab_url = "http://" + self._args.analytics_api_ip + ":" +\
                self._args.analytics_api_port +\
                "/analytics/table/StatTable." + self._args.table
            schematxt = OpServerUtils.get_url_http(tab_url + "/schema")
            schema = json.loads(schematxt.text)['columns']
            for pp in schema:
                if pp.has_key('suffixes') and pp['suffixes']:
                    des = "%s %s" % (pp['name'], str(pp['suffixes']))
                else:
                    des = "%s" % pp['name']
                if pp['index']:
                    valuetxt = OpServerUtils.get_url_http(tab_url +
                                                          "/column-values/" +
                                                          pp['name'])
                    print "%s : %s %s" % (des, pp['datatype'], valuetxt.text)
                else:
                    print "%s : %s" % (des, pp['datatype'])
        else:
            result = self.query()
            self.display(result)
예제 #12
0
 def run(self):
     try:
         index = 0
         analytics_api_ip = self._defaults['analytics_api_ip']
         analytics_api_port = self._defaults['analytics_api_port']
         username = self._defaults['username']
         password = self._defaults['password']
         for arg in sys.argv:
             index = index + 1
             if arg == "--analytics-api-ip":
                 analytics_api_ip = sys.argv[index]
             elif arg == "--analytics-api-port":
                 analytics_api_port = sys.argv[index]
             elif arg == "--admin-user":
                 username = sys.argv[index]
             elif arg == "--admin-password":
                 password = sys.argv[index]
         tab_url = "http://" + analytics_api_ip + ":" +\
             analytics_api_port + "/analytics/tables"
         tables = OpServerUtils.get_url_http(tab_url,
             username, password)
         if tables != {}:
             table_list = json.loads(tables.text)
             for table in table_list:
                 if table['type'] == 'OBJECT':
                     OBJECT_TYPE_LIST.append(str(table['display_name']))
         if self.parse_args() != 0:
             return
         if self._args.tail:
             start_time = UTCTimestampUsec() - 10*pow(10,6)
             while True:
                 self._start_time = start_time
                 self._end_time = UTCTimestampUsec()
                 start_time = self._end_time + 1
                 time.sleep(3)
                 result = self.query()
                 if result == -1:
                     return
                 self.display(result)
         else:
             start_time = self._args.start_time
             end_time = self._args.end_time
             if not self._args.start_time:
                 start_time = "now-10m"
             if not self._args.end_time:
                 end_time = "now"
             try:
                 self._start_time, self._end_time = \
                     OpServerUtils.parse_start_end_time(
                         start_time = start_time,
                         end_time = end_time,
                         last = self._args.last)
             except:
                 return -1
             result = self.query()
             if result == -1:
                 return
             self.display(result)
     except KeyboardInterrupt:
         return
 def __init__(self, query_json, analytics_api_ip,
              analytics_api_port, user, password, logger):
     self.query_json = query_json
     self._analytics_api_ip = analytics_api_ip
     self._analytics_api_port = analytics_api_port
     self._user = user
     self._password = password
     self._logger = logger
     if self.query_json is not None:
         self._start_time = self.query_json['start_time']
         self._end_time = self.query_json['end_time']
         # If the start_time/end_time in the query is specified as
         # relative time, then the actual start_time/end_time for the
         # FlowRecordTable query and UFlowData query would be different.
         # Since the FlowRecordTable is queried first and the result of
         # which is used to query the UFlowData table, the result may
         # not be correct if the start_time/end_time is different for
         # FlowRecord and UFlowData queries. Therefore, convert the
         # relative start/end time to absolute time.
         if not str(self._start_time).isdigit():
             self._start_time = \
                 OpServerUtils.convert_to_utc_timestamp_usec(self._start_time)
         if not str(self._end_time).isdigit():
             self._end_time = \
                 OpServerUtils.convert_to_utc_timestamp_usec(self._end_time)
예제 #14
0
 def run(self):
     try:
         index = 0
         analytics_api_ip = self._defaults['analytics_api_ip']
         analytics_api_port = self._defaults['analytics_api_port']
         username = self._defaults['username']
         password = self._defaults['password']
         for arg in sys.argv:
             index = index + 1
             if arg == "--analytics-api-ip":
                 analytics_api_ip = sys.argv[index]
             elif arg == "--analytics-api-port":
                 analytics_api_port = sys.argv[index]
             elif arg == "--admin-user":
                 username = sys.argv[index]
             elif arg == "--admin-password":
                 password = sys.argv[index]
         tab_url = "http://" + analytics_api_ip + ":" +\
             analytics_api_port + "/analytics/tables"
         tables = OpServerUtils.get_url_http(tab_url, username, password)
         if tables != {}:
             table_list = json.loads(tables.text)
             for table in table_list:
                 if table['type'] == 'OBJECT':
                     OBJECT_TYPE_LIST.append(str(table['display_name']))
         if self.parse_args() != 0:
             return
         if self._args.tail:
             start_time = UTCTimestampUsec() - 10 * pow(10, 6)
             while True:
                 self._start_time = start_time
                 self._end_time = UTCTimestampUsec()
                 start_time = self._end_time + 1
                 time.sleep(3)
                 result = self.query()
                 if result == -1:
                     return
                 self.display(result)
         else:
             start_time = self._args.start_time
             end_time = self._args.end_time
             if not self._args.start_time:
                 start_time = "now-10m"
             if not self._args.end_time:
                 end_time = "now"
             try:
                 self._start_time, self._end_time = \
                     OpServerUtils.parse_start_end_time(
                         start_time = start_time,
                         end_time = end_time,
                         last = self._args.last)
             except:
                 return -1
             result = self.query()
             if result == -1:
                 return
             self.display(result)
     except KeyboardInterrupt:
         return
예제 #15
0
    def aggregate(self, key, flat):
        '''
        This function does parallel aggregation aggregation of this UVE's state.
        It aggregates across all sources and return the global state of the UVE
        '''
        result = {}
        try:
            for typ in self._state[key].keys():
                result[typ] = {}
                for objattr in self._state[key][typ].keys():
                    if self._is_sum(self._state[key][typ][objattr]):
                        sum_res = self._sum_agg(self._state[key][typ][objattr])
                        if flat:
                            result[typ][objattr] = \
                                OpServerUtils.uve_attr_flatten(sum_res)
                        else:
                            result[typ][objattr] = sum_res
                    elif self._is_union(self._state[key][typ][objattr]):
                        union_res = self._union_agg(
                            self._state[key][typ][objattr])
                        if flat:
                            result[typ][objattr] = \
                                OpServerUtils.uve_attr_flatten(union_res)
                        else:
                            result[typ][objattr] = union_res
                    elif self._is_append(self._state[key][typ][objattr]):
                        result[typ][objattr] = self._append_agg(
                            self._state[key][typ][objattr])
                        append_res = ParallelAggregator.consolidate_list(
                            result, typ, objattr)

                        if flat:
                            result[typ][
                                objattr] = OpServerUtils.uve_attr_flatten(
                                    append_res)
                        else:
                            result[typ][objattr] = append_res

                    else:
                        default_res = self._default_agg(
                            self._state[key][typ][objattr])
                        if flat:
                            if (len(default_res) == 1):
                                result[typ][
                                    objattr] = OpServerUtils.uve_attr_flatten(
                                        default_res[0][0])
                            else:
                                nres = []
                                for idx in range(len(default_res)):
                                    nres.append(default_res[idx])
                                    nres[idx][
                                        0] = OpServerUtils.uve_attr_flatten(
                                            default_res[idx][0])
                                result[typ][objattr] = nres
                        else:
                            result[typ][objattr] = default_res
        except KeyError:
            pass
        return result
예제 #16
0
    def run(self):
        index = 0
        analytics_api_ip = self._defaults['analytics_api_ip']
        analytics_api_port = self._defaults['analytics_api_port']
        username = self._defaults['username']
        password = self._defaults['password']
        stat_table_list = [
            xx.stat_type + "." + xx.stat_attr
            for xx in VizConstants._STAT_TABLES
        ]
        stat_schema_files = []
        for arg in sys.argv:
            index = index + 1
            if arg == "--analytics-api-ip":
                analytics_api_ip = sys.argv[index]
            elif arg == "--analytics-api-port":
                analytics_api_port = sys.argv[index]
            elif arg == "--admin-user":
                username = sys.argv[index]
            elif arg == "--admin-password":
                password = sys.argv[index]
        tab_url = "http://" + analytics_api_ip + ":" +\
            analytics_api_port + "/analytics/tables"
        tables = OpServerUtils.get_url_http(tab_url, username, password)
        if tables != {}:
            table_list = json.loads(tables.text)
            for table in table_list:
                if table['type'] == 'STAT':
                    table_name = '.'.join(table['name'].split('.')[1:])
                    # append to stat_table_list only if not existing
                    if table_name not in stat_table_list:
                        stat_table_list.append(table_name)

        if self.parse_args(stat_table_list) != 0:
            return

        if len(self._args.select) == 0 and self._args.dtable is None:
            tab_url = "http://" + self._args.analytics_api_ip + ":" +\
                self._args.analytics_api_port +\
                "/analytics/table/StatTable." + self._args.table
            schematxt = OpServerUtils.get_url_http(tab_url + "/schema",
                                                   self._args.admin_user,
                                                   self._args.admin_password)
            schema = json.loads(schematxt.text)['columns']
            for pp in schema:
                if pp.has_key('suffixes') and pp['suffixes']:
                    des = "%s %s" % (pp['name'], str(pp['suffixes']))
                else:
                    des = "%s" % pp['name']
                if pp['index']:
                    valuetxt = OpServerUtils.get_url_http(
                        tab_url + "/column-values/" + pp['name'],
                        self._args.admin_user, self._args.admin_password)
                    print "%s : %s %s" % (des, pp['datatype'], valuetxt.text)
                else:
                    print "%s : %s" % (des, pp['datatype'])
        else:
            result = self.query()
            self.display(result)
예제 #17
0
    def aggregate(self, key, flat):
        '''
        This function does parallel aggregation of this UVE's state.
        It aggregates across all sources and return the global state of the UVE
        '''
        result = {}
        try:
            for typ in self._state[key].keys():
                result[typ] = {}
                for objattr in self._state[key][typ].keys():
                    if self._is_sum(self._state[key][typ][objattr]):
                        sum_res = self._sum_agg(self._state[key][typ][objattr])
                        if flat:
                            result[typ][objattr] = \
                                OpServerUtils.uve_attr_flatten(sum_res)
                        else:
                            result[typ][objattr] = sum_res
                    elif self._is_union(self._state[key][typ][objattr]):
                        union_res = self._union_agg(
                            self._state[key][typ][objattr])
                        if flat:
                            result[typ][objattr] = \
                                OpServerUtils.uve_attr_flatten(union_res)
                        else:
                            result[typ][objattr] = union_res
                    elif self._is_append(self._state[key][typ][objattr]):
                        result[typ][objattr] = self._append_agg(
                            self._state[key][typ][objattr])
                        append_res = ParallelAggregator.consolidate_list(
                            result, typ, objattr)

                        if flat:
                            result[typ][objattr] =\
                                OpServerUtils.uve_attr_flatten(append_res)
                        else:
                            result[typ][objattr] = append_res

                    else:
                        default_res = self._default_agg(
                            self._state[key][typ][objattr])
                        if flat:
                            if (len(default_res) == 1):
                                result[typ][objattr] =\
                                    OpServerUtils.uve_attr_flatten(
                                        default_res[0][0])
                            else:
                                nres = []
                                for idx in range(len(default_res)):
                                    nres.append(default_res[idx])
                                    nres[idx][0] =\
                                        OpServerUtils.uve_attr_flatten(
                                            default_res[idx][0])
                                result[typ][objattr] = nres
                        else:
                            result[typ][objattr] = default_res
        except KeyError:
            pass
        return result
예제 #18
0
 def _get_opserver_query(cls, job_execution_id, status):
     value = "%s:%s" % (job_execution_id, cls.JOB_STATUS.get(status.value))
     match = OpServerUtils.Match(name=cls.OBJECT_ID,
                                 value=value,
                                 op=OpServerUtils.MatchOp.EQUAL)
     return OpServerUtils.Query(cls.TABLE,
                                start_time=cls.START_TIME,
                                end_time=cls.END_TIME,
                                select_fields=cls.FIELDS,
                                where=[[match.__dict__]])
예제 #19
0
    def query(self,
              table,
              start_time=None,
              end_time=None,
              select_fields=None,
              where_clause="",
              sort_fields=None,
              sort=None,
              limit=None,
              filter=None):
        """
        This function takes in the query parameters,
        format appropriately and calls
        ReST API to the :mod:`opserver` to get data

        :param table: table to do the query on
        :type table: str
        :param start_time: start_time of the query's timeperiod
        :type start_time: int
        :param end_time: end_time of the query's timeperiod
        :type end_time: int
        :param select_fields: list of columns to be returned in the
         final result
        :type select_fields: list of str
        :param where_clause: list of match conditions for the query
        :type where_clause: list of match, which is a pair of str ANDed
        :returns: str -- json formatted result
        :raises: Error

        """

        flows_url = OpServerUtils.opserver_query_url(self._args.opserver_ip,
                                                     self._args.opserver_port)
        print flows_url

        query_dict = OpServerUtils.get_query_dict(table, start_time, end_time,
                                                  select_fields, where_clause,
                                                  sort_fields, sort, limit,
                                                  filter)

        print json.dumps(query_dict)
        resp = OpServerUtils.post_url_http(flows_url, json.dumps(query_dict))
        if resp is not None:
            resp = json.loads(resp)
            qid = resp['href'].rsplit('/', 1)[1]
            result = OpServerUtils.get_query_result(self._args.opserver_ip,
                                                    self._args.opserver_port,
                                                    qid)
            for item in result:
                print item

        return
예제 #20
0
    def _get_overlay_flow_data(self):
        """Fetch the overlay flow data from the FlowRecord Table.

        Convert the where clause in the OverlayToUnderlay query according
        to the schema defined for the FlowRecord Table. Get the overlay
        flow data [source vrouter, destination vrouter, flowtuple hash,
        encapsulation] from the FlowRecord Table required to query the
        underlay data.
        """
        # process where clause
        try:
            where_or_list = self.query_json['where']
        except KeyError:
            where_or_list = []
        flow_record_where = []
        for where_and_list in where_or_list:
            flow_record_where_and_list = []
            for match_term in where_and_list:
                fname = self._overlay_to_flowrecord_name(match_term['name'])
                match = OpServerUtils.Match(name=fname,
                                            value=match_term['value'],
                                            op=match_term['op'],
                                            value2=match_term.get('value2'))
                flow_record_where_and_list.append(match.__dict__)
                if match_term.get('suffix') is not None:
                    fname = self._overlay_to_flowrecord_name(
                        match_term['suffix']['name'])
                    match = OpServerUtils.Match(
                        name=fname,
                        value=match_term['suffix']['value'],
                        op=match_term['suffix']['op'],
                        value2=match_term['suffix'].get('value2'))
                    flow_record_where_and_list.append(match.__dict__)
            flow_record_where.append(flow_record_where_and_list)

        # populate the select list
        flow_record_select = [
            FlowRecordNames[FlowRecordFields.FLOWREC_VROUTER_IP],
            FlowRecordNames[FlowRecordFields.FLOWREC_OTHER_VROUTER_IP],
            FlowRecordNames[FlowRecordFields.FLOWREC_UNDERLAY_SPORT],
            FlowRecordNames[FlowRecordFields.FLOWREC_UNDERLAY_PROTO]
        ]

        flow_record_query = OpServerUtils.Query(
            table=FLOW_TABLE,
            start_time=self._start_time,
            end_time=self._end_time,
            select_fields=flow_record_select,
            where=flow_record_where,
            dir=1)
        return self._send_query(json.dumps(flow_record_query.__dict__))
예제 #21
0
    def run(self):
        topdir = "/usr/share/doc/contrail-docs/html/messages/"
        extn = ".json"
        stat_schema_files = []
        for dirpath, dirnames, files in os.walk(topdir):
            for name in files:
                if name.lower().endswith(extn):
                    stat_schema_files.append(os.path.join(dirpath, name))
        stat_tables = []
        for schema_file in stat_schema_files:
            with open(schema_file) as data_file:
                data = json.load(data_file)
            for _, tables in data.iteritems():
                for table in tables:
                    if table not in stat_tables:
                        stat_tables.append(table)
        stat_table_list = [xx.stat_type + "." + xx.stat_attr for xx in VizConstants._STAT_TABLES]
        stat_table_list.extend([xx["stat_type"] + "." + xx["stat_attr"] for xx in stat_tables])

        if self.parse_args(stat_table_list) != 0:
            return

        if len(self._args.select) == 0 and self._args.dtable is None:
            tab_url = (
                "http://"
                + self._args.analytics_api_ip
                + ":"
                + self._args.analytics_api_port
                + "/analytics/table/StatTable."
                + self._args.table
            )
            schematxt = OpServerUtils.get_url_http(
                tab_url + "/schema", self._args.admin_user, self._args.admin_password
            )
            schema = json.loads(schematxt.text)["columns"]
            for pp in schema:
                if pp.has_key("suffixes") and pp["suffixes"]:
                    des = "%s %s" % (pp["name"], str(pp["suffixes"]))
                else:
                    des = "%s" % pp["name"]
                if pp["index"]:
                    valuetxt = OpServerUtils.get_url_http(
                        tab_url + "/column-values/" + pp["name"], self._args.admin_user, self._args.admin_password
                    )
                    print "%s : %s %s" % (des, pp["datatype"], valuetxt.text)
                else:
                    print "%s : %s" % (des, pp["datatype"])
        else:
            result = self.query()
            self.display(result)
 def _send_query(self, query):
     """Post the query to the analytics-api server and returns the
     response."""
     self._logger.debug('Sending query: %s' % (query))
     opserver_url = OpServerUtils.opserver_query_url(self._analytics_api_ip,
                        str(self._analytics_api_port))
     resp = OpServerUtils.post_url_http(opserver_url, query, True)
     try:
         resp = json.loads(resp)
         value = resp['value']
     except (TypeError, ValueError, KeyError):
         raise _QueryError(query)
     self._logger.debug('Query response: %s' % str(value))
     return value
예제 #23
0
 def _send_query(self, query):
     """Post the query to the analytics-api server and returns the
     response."""
     self._logger.debug('Sending query: %s' % (query))
     opserver_url = OpServerUtils.opserver_query_url(
         self._analytics_api_ip, str(self._analytics_api_port))
     resp = OpServerUtils.post_url_http(opserver_url, query, True)
     try:
         resp = json.loads(resp)
         value = resp['value']
     except (TypeError, ValueError, KeyError):
         raise _QueryError(query)
     self._logger.debug('Query response: %s' % str(value))
     return value
예제 #24
0
    def run(self):
        topdir = '/usr/share/doc/contrail-docs/html/messages/'
        extn = '.json'
        stat_schema_files = []
        for dirpath, dirnames, files in os.walk(topdir):
            for name in files:
                if name.lower().endswith(extn):
                    stat_schema_files.append(os.path.join(dirpath, name))
        stat_tables = []
        for schema_file in stat_schema_files:
            with open(schema_file) as data_file:
                data = json.load(data_file)
            for _, tables in data.iteritems():
                for table in tables:
                    if table not in stat_tables:
                        stat_tables.append(table)
        stat_table_list = [
            xx.stat_type + "." + xx.stat_attr
            for xx in VizConstants._STAT_TABLES
        ]
        stat_table_list.extend(
            [xx["stat_type"] + "." + xx["stat_attr"] for xx in stat_tables])

        if self.parse_args(stat_table_list) != 0:
            return

        if len(self._args.select) == 0 and self._args.dtable is None:
            tab_url = "http://" + self._args.analytics_api_ip + ":" +\
                self._args.analytics_api_port +\
                "/analytics/table/StatTable." + self._args.table
            schematxt = OpServerUtils.get_url_http(tab_url + "/schema",
                                                   self._args.admin_user,
                                                   self._args.admin_password)
            schema = json.loads(schematxt.text)['columns']
            for pp in schema:
                if pp.has_key('suffixes') and pp['suffixes']:
                    des = "%s %s" % (pp['name'], str(pp['suffixes']))
                else:
                    des = "%s" % pp['name']
                if pp['index']:
                    valuetxt = OpServerUtils.get_url_http(
                        tab_url + "/column-values/" + pp['name'],
                        self._args.admin_user, self._args.admin_password)
                    print "%s : %s %s" % (des, pp['datatype'], valuetxt.text)
                else:
                    print "%s : %s" % (des, pp['datatype'])
        else:
            result = self.query()
            self.display(result)
예제 #25
0
    def post_query(self,
                   table,
                   start_time=None,
                   end_time=None,
                   select_fields=None,
                   where_clause='',
                   sort_fields=None,
                   sort=None,
                   limit=None,
                   filter=None,
                   dir=None):
        res = None
        try:
            self._drv._auth()
            headers = self._drv._headers
        except Exception as e:
            headers = None  #vcenter case where openstack not available

        try:
            flows_url = OpServerUtils.opserver_query_url(
                self._ip, str(self._port))
            print flows_url
            query_dict = OpServerUtils.get_query_dict(table, start_time,
                                                      end_time, select_fields,
                                                      where_clause,
                                                      sort_fields, sort, limit,
                                                      filter, dir)

            print json.dumps(query_dict)
            res = []
            resp = OpServerUtils.post_url_http(flows_url,
                                               json.dumps(query_dict), headers)
            if resp is not None:
                resp = json.loads(resp)
                try:
                    qid = resp['href'].rsplit('/', 1)[1]
                    result = OpServerUtils.get_query_result(
                        self._ip, str(self._port), qid, headers)
                    for item in result:
                        res.append(item)
                except Exception as e:
                    if 'value' in resp:
                        for item in resp['value']:
                            res.append(item)
        except Exception as e:
            print str(e)
        finally:
            return res
예제 #26
0
def main():
    try:
        querier = LogQuerier()
        if querier.parse_args() != 0:
            return
        if querier._args.f:
            start_time = UTCTimestampUsec() - 10 * pow(10, 6)
            while True:
                querier._start_time = start_time
                querier._end_time = UTCTimestampUsec()
                start_time = querier._end_time + 1
                time.sleep(3)
                result = querier.query()
                if result == -1:
                    return
                querier.display(result)
        else:
            start_time = querier._args.start_time
            end_time = querier._args.end_time
            if not querier._args.start_time:
                start_time = "now-10m"
            if not querier._args.end_time:
                end_time = "now"
            try:
                querier._start_time, querier._end_time = OpServerUtils.parse_start_end_time(
                    start_time=start_time, end_time=end_time, last=querier._args.last
                )
            except:
                return -1
            result = querier.query()
            if result == -1:
                return
            querier.display(result)
    except KeyboardInterrupt:
        return
    def post_query(self, table, start_time=None, end_time=None,
                   select_fields=None,
                   where_clause='',
                   sort_fields=None, sort=None, limit=None, filter=None, dir=None,
                   session_type=None):
        res = None
        try:
            query_dict = OpServerUtils.get_query_dict(
                table, start_time, end_time,
                select_fields,
                where_clause,
                sort_fields, sort, limit, filter, dir,
                session_type)

            res = []
            resp = self.post(path='analytics/query', payload=query_dict)
            if resp is not None:
                try:
                    qid = resp['href'].rsplit('/', 1)[1]
                    result = self.get_query_result(qid)
                    for item in result:
                        res.append(item)
                except Exception as e:
                    if 'value' in resp:
                        for item in resp['value']:
                            res.append(item)
        except Exception as e:
            self.log.debug("Got exception %s"%e)
        finally:
            return res
    def post_query(self,
                   table,
                   start_time=None,
                   end_time=None,
                   select_fields=None,
                   where_clause='',
                   sort_fields=None,
                   sort=None,
                   limit=None,
                   filter=None,
                   dir=None,
                   session_type=None):
        res = None
        try:
            query_dict = OpServerUtils.get_query_dict(
                table, start_time, end_time, select_fields, where_clause,
                sort_fields, sort, limit, filter, dir, session_type)

            res = []
            resp = self.post(path='analytics/query', payload=query_dict)
            if resp is not None:
                try:
                    qid = resp['href'].rsplit('/', 1)[1]
                    result = self.get_query_result(qid)
                    for item in result:
                        res.append(item)
                except Exception as e:
                    if 'value' in resp:
                        for item in resp['value']:
                            res.append(item)
        except Exception as e:
            self.log.debug("Got exception %s" % e)
        finally:
            return res
예제 #29
0
    def query(self, table, start_time=None, end_time=None,
              select_fields=None,
              where_clause="",
              sort_fields=None, sort=None, limit=None, filter=None):
        """
        This function takes in the query parameters,
        format appropriately and calls
        ReST API to the :mod:`opserver` to get data

        :param table: table to do the query on
        :type table: str
        :param start_time: start_time of the query's timeperiod
        :type start_time: int
        :param end_time: end_time of the query's timeperiod
        :type end_time: int
        :param select_fields: list of columns to be returned in the
         final result
        :type select_fields: list of str
        :param where_clause: list of match conditions for the query
        :type where_clause: list of match, which is a pair of str ANDed
        :returns: str -- json formatted result
        :raises: Error

        """

        flows_url = OpServerUtils.opserver_query_url(self._args.opserver_ip,
                                                     self._args.opserver_port)
        print flows_url

        query_dict = OpServerUtils.get_query_dict(table, start_time, end_time,
                                                  select_fields, where_clause,
                                                  sort_fields, sort, limit,
                                                  filter)

        print json.dumps(query_dict)
        resp = OpServerUtils.post_url_http(flows_url, json.dumps(query_dict))
        if resp is not None:
            resp = json.loads(resp)
            qid = resp['href'].rsplit('/', 1)[1]
            result = OpServerUtils.get_query_result(self._args.opserver_ip,
                                                    self._args.opserver_port,
                                                    qid)
            for item in result:
                print item

        return
예제 #30
0
    def parse_args(self):
        """ 
        Eg. python stats.py --analytics-api-ip 127.0.0.1
                          --analytics-api-port 8081
                          --table AnalyticsCpuState.cpu_info
                          --where name=a6s40 cpu_info.module_id=Collector
                          --select "T=60 SUM(cpu_info.cpu_share)"
                          --sort "SUM(cpu_info.cpu_share)"
                          [--start-time now-10m --end-time now] | --last 10m

            python stats.py --table AnalyticsCpuState.cpu_info
        """
        defaults = {
            'analytics_api_ip': '127.0.0.1',
            'analytics_api_port': '8081',
            'start_time': 'now-10m',
            'end_time': 'now',
            'select': [],
            'where': [],
            'sort': []
        }

        parser = argparse.ArgumentParser(
            formatter_class=argparse.ArgumentDefaultsHelpFormatter)
        parser.set_defaults(**defaults)
        parser.add_argument("--analytics-api-ip",
                            help="IP address of Analytics API Server")
        parser.add_argument("--analytics-api-port",
                            help="Port of Analytcis API Server")
        parser.add_argument("--start-time",
                            help="Logs start time (format now-10m, now-1h)")
        parser.add_argument("--end-time", help="Logs end time")
        parser.add_argument("--last",
                            help="Logs from last time period (format 10m, 1d)")
        parser.add_argument("--table",
                            help="StatTable to query",
                            choices=STAT_TABLE_LIST)
        parser.add_argument("--dtable", help="Dynamic StatTable to query")
        parser.add_argument("--select", help="List of Select Terms", nargs='+')
        parser.add_argument("--where",
                            help="List of Where Terms to be ANDed",
                            nargs='+')
        parser.add_argument("--sort", help="List of Sort Terms", nargs='+')
        self._args = parser.parse_args()

        if self._args.table is None and self._args.dtable is None:
            return -1

        try:
            self._start_time, self._end_time = \
                OpServerUtils.parse_start_end_time(
                    start_time = self._args.start_time,
                    end_time = self._args.end_time,
                    last = self._args.last)
        except:
            return -1

        return 0
 def post_db_purge(self,purge_input):
     
     res = []
     json_body = OpServerUtils.get_json_body(purge_input = purge_input)
     print json.dumps(json_body)
     try:
         purge_url = OpServerUtils.opserver_db_purge_url(
             self._ip, str(self._port))
         print purge_url
         resp = OpServerUtils.post_url_http(
             purge_url, json.dumps(json_body))
         if resp is not None:
             resp = json.loads(resp)
             res.append(resp)
     except Exception as e:
         print str(e)
     finally:
         return res
예제 #32
0
    def parse_args(self):
        """ 
        Eg. python stats.py --analytics-api-ip 127.0.0.1
                          --analytics-api-port 8081
                          --table AnalyticsCpuState.cpu_info
                          --where name=a6s40 cpu_info.module_id=Collector
                          --select "T=60 SUM(cpu_info.cpu_share)"
                          --sort "SUM(cpu_info.cpu_share)"
                          [--start-time now-10m --end-time now] | --last 10m

            python stats.py --table AnalyticsCpuState.cpu_info
        """
        defaults = {
            'analytics_api_ip': '127.0.0.1',
            'analytics_api_port': '8081',
            'start_time': 'now-10m',
            'end_time': 'now',
            'select' : [],
            'where' : ['Source=*'],
            'sort': []
        }

        parser = argparse.ArgumentParser(
            formatter_class=argparse.ArgumentDefaultsHelpFormatter)
        parser.set_defaults(**defaults)
        parser.add_argument("--analytics-api-ip", help="IP address of Analytics API Server")
        parser.add_argument("--analytics-api-port", help="Port of Analytcis API Server")
        parser.add_argument(
            "--start-time", help="Logs start time (format now-10m, now-1h)")
        parser.add_argument("--end-time", help="Logs end time")
        parser.add_argument(
            "--last", help="Logs from last time period (format 10m, 1d)")
        parser.add_argument(
            "--table", help="StatTable to query", choices=STAT_TABLE_LIST)
        parser.add_argument(
            "--dtable", help="Dynamic StatTable to query")
        parser.add_argument(
            "--select", help="List of Select Terms", nargs='+')
        parser.add_argument(
            "--where", help="List of Where Terms to be ANDed", nargs='+')
        parser.add_argument(
            "--sort", help="List of Sort Terms", nargs='+')
        self._args = parser.parse_args()

        if self._args.table is None and self._args.dtable is None:
            return -1

        try:
            self._start_time, self._end_time = \
                OpServerUtils.parse_start_end_time(
                    start_time = self._args.start_time,
                    end_time = self._args.end_time,
                    last = self._args.last)
        except:
            return -1

        return 0
예제 #33
0
    def post_db_purge(self, purge_input):

        res = []
        json_body = OpServerUtils.get_json_body(purge_input=purge_input)
        print json.dumps(json_body)
        try:
            purge_url = OpServerUtils.opserver_db_purge_url(
                self._ip, str(self._port))
            print purge_url
            resp = OpServerUtils.post_url_http(purge_url,
                                               json.dumps(json_body))
            if resp is not None:
                resp = json.loads(resp)
                res.append(resp)
        except Exception as e:
            print str(e)
        finally:
            return res
    def post_query(self, table, start_time=None, end_time=None,
                   select_fields=None,
                   where_clause='',
                   sort_fields=None, sort=None, limit=None, filter=None, dir=None,
                   session_type=None):
        res = None
        try:
            self._drv._auth()
            headers = self._drv._headers
        except Exception as e:
            headers = None #vcenter case where openstack not available

        try:
            flows_url = OpServerUtils.opserver_query_url(
                self._ip, str(self._port))
            print flows_url
            query_dict = OpServerUtils.get_query_dict(
                table, start_time, end_time,
                select_fields,
                where_clause,
                sort_fields, sort, limit, filter, dir,
                session_type)

            print json.dumps(query_dict)
            res = []
            resp = OpServerUtils.post_url_http(
                flows_url, json.dumps(query_dict), headers)
            if resp is not None:
                resp = json.loads(resp)
                try:
                    qid = resp['href'].rsplit('/', 1)[1]
                    result = OpServerUtils.get_query_result(
                        self._ip, str(self._port), qid, headers)
                    for item in result:
                        res.append(item)
                except Exception as e:
                    if 'value' in resp:
                        for item in resp['value']:
                            res.append(item)
        except Exception as e:
            print str(e)
        finally:
            return res
    def post_query(self,
                   table,
                   start_time=None,
                   end_time=None,
                   select_fields=None,
                   where_clause=None,
                   sort_fields=None,
                   sort=None,
                   limit=None,
                   filter=None,
                   sync=True):
        res = None
        try:
            flows_url = OpServerUtils.opserver_query_url(
                self._ip, str(self._port))
            print flows_url
            query_dict = OpServerUtils.get_query_dict(table, start_time,
                                                      end_time, select_fields,
                                                      where_clause,
                                                      sort_fields, sort, limit,
                                                      filter)

            print json.dumps(query_dict)
            res = []
            resp = OpServerUtils.post_url_http(flows_url,
                                               json.dumps(query_dict), sync)
            if sync:
                if resp is not None:
                    res = json.loads(resp)
                    res = res['value']
            else:
                if resp is not None:
                    resp = json.loads(resp)
                    qid = resp['href'].rsplit('/', 1)[1]
                    result = OpServerUtils.get_query_result(
                        self._ip, str(self._port), qid, 30)
                    for item in result:
                        res.append(item)
        except Exception as e:
            print str(e)
        finally:
            return res
예제 #36
0
    def query(self):
        if not self._args.where:
            where = ''
        else:
            where = "AND".join(self._args.where)

        if not self._args.filter:
            filter = None
        else:
            filter = "AND".join(self._args.filter)

        query_url = OpServerUtils.opserver_query_url(
            self._args.analytics_api_ip, self._args.analytics_api_port)

        query_dict = OpServerUtils.get_query_dict(
            self._args.table,
            str(self._start_time),
            str(self._end_time),
            select_fields=self._args.select,
            where_clause=where,
            filter=filter,
            sort_fields=self._args.sort,
            limit=self._args.limit,
            session_type=self._args.session_type,
            is_service_instance=self._args.is_service_instance)

        print json.dumps(query_dict)
        resp = OpServerUtils.post_url_http(query_url, json.dumps(query_dict),
                                           self._args.admin_user,
                                           self._args.admin_password)

        result = {}
        if resp is not None:
            resp = json.loads(resp)
            print resp
            qid = resp['href'].rsplit('/', 1)[1]
            result = OpServerUtils.get_query_result(
                self._args.analytics_api_ip, self._args.analytics_api_port,
                qid, self._args.admin_user, self._args.admin_password)

        return result
 def post_db_purge(self,purge_input):
     res = []
     json_body = OpServerUtils.get_json_body(purge_input = purge_input)
     try:
         resp = self.post(path='analytics/operation/database-purge',
                          payload=json_body)
         if resp is not None:
             res.append(resp)
     except Exception as e:
         self.log.debug("Got exception %s"%e)
     finally:
         return res
 def post_db_purge(self, purge_input):
     res = []
     json_body = OpServerUtils.get_json_body(purge_input=purge_input)
     try:
         resp = self.post(path='analytics/operation/database-purge',
                          payload=json_body)
         if resp is not None:
             res.append(resp)
     except Exception as e:
         self.log.debug("Got exception %s" % e)
     finally:
         return res
예제 #39
0
    def parse_args(self, stat_table_list):
        """ 
        Eg. python stats.py --analytics-api-ip 127.0.0.1
                          --analytics-api-port 8181
                          --table NodeStatus.process_mem_cpu_usage
                          --where name=a6s40 cpu_info.module_id=Collector
                          --select "T=60 SUM(cpu_info.cpu_share)"
                          --sort "SUM(cpu_info.cpu_share)"
                          [--start-time now-10m --end-time now] | --last 10m

            python stats.py --table NodeStatus.process_mem_cpu_usage
        """
        defaults = {
            "analytics_api_ip": "127.0.0.1",
            "analytics_api_port": "8181",
            "start_time": "now-10m",
            "end_time": "now",
            "select": [],
            "where": ["Source=*"],
            "sort": [],
        }

        parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
        parser.set_defaults(**defaults)
        parser.add_argument("--analytics-api-ip", help="IP address of Analytics API Server")
        parser.add_argument("--analytics-api-port", help="Port of Analytcis API Server")
        parser.add_argument("--start-time", help="Logs start time (format now-10m, now-1h)")
        parser.add_argument("--end-time", help="Logs end time")
        parser.add_argument("--last", help="Logs from last time period (format 10m, 1d)")
        parser.add_argument("--table", help="StatTable to query", choices=stat_table_list)
        parser.add_argument("--dtable", help="Dynamic StatTable to query")
        parser.add_argument("--select", help="List of Select Terms", nargs="+")
        parser.add_argument("--where", help="List of Where Terms to be ANDed", nargs="+")
        parser.add_argument("--sort", help="List of Sort Terms", nargs="+")
        parser.add_argument("--admin-user", help="Name of admin user", default="admin")
        parser.add_argument("--admin-password", help="Password of admin user", default="contrail123")
        self._args = parser.parse_args()

        if self._args.table is None and self._args.dtable is None:
            return -1

        try:
            self._start_time, self._end_time = OpServerUtils.parse_start_end_time(
                start_time=self._args.start_time, end_time=self._args.end_time, last=self._args.last
            )
        except:
            return -1

        return 0
예제 #40
0
    def _check_job_status(self, url, job_execution_id, status):
        query = self._get_opserver_query(job_execution_id, status)
        username = self._analytics_config.get('username', None)
        password = self._analytics_config.get('password', None)
        resp = OpServerUtils.post_url_http(self._logger, url, query.__dict__,
                                           username, password)
        if resp is not None:
            resp = json.loads(resp)
            if resp and resp['value'] and len(resp['value']) > 0:
                return True
        else:
            self._logger.debug("job handler: invalid response for (%s, %s):" %
                               (self._device_list, str(self._job_type)))

        return False
예제 #41
0
    def _wait(self, job_execution_id, timeout, max_retries):
        url = OpServerUtils.opserver_query_url(self._analytics_config['ip'],
                                               self._analytics_config['port'])

        retry_count = 1
        while not self.is_job_done():
            self._job_status = self._get_job_status(url, job_execution_id)
            if not self.is_job_done():
                if retry_count >= max_retries:
                    self._logger.error(
                        "job handler: timed out waiting for (%s, %s):",
                        self._device_list, str(self._job_type))
                    self._job_status = JobStatus.FAILED
                else:
                    retry_count += 1
                    gevent.sleep(timeout)
예제 #42
0
    def run(self):
        try:
            if self.parse_args() != 0:
                return

            if self._args.tail:
                start_time = UTCTimestampUsec() - 10*pow(10,6)
                while True:
                    self._start_time = start_time
                    self._end_time = UTCTimestampUsec()
                    start_time = self._end_time + 1
                    time.sleep(3)
                    result = self.query()
                    if result == -1:
                        return
                    self.display(result)
            else:
                start_time = self._args.start_time
                end_time = self._args.end_time
                if not self._args.start_time:
                    start_time = "now-10m"
                if not self._args.end_time:
                    end_time = "now"
                try:
                    self._start_time, self._end_time = \
                        OpServerUtils.parse_start_end_time(
                            start_time = start_time,
                            end_time = end_time,
                            last = self._args.last)
                except:
                    return -1
                result = self.query()
                if result == -1:
                    return
                # Accumulate the result before processing it as the
                # formatting of result can be cpu intensive and hence would
                # affect the overall time taken to fetch the result from the
                # analytics-api. Since the query result ttl is set to 5 min
                # in redis, it is necessary to improve the read throughput.
                result_list = self.read_result(result)
                self.display(result_list)
        except KeyboardInterrupt:
            return
예제 #43
0
def main():
    try:
        querier = LogQuerier()
        if querier.parse_args() != 0:
            return
        if querier._args.f:
            start_time = UTCTimestampUsec() - 10 * pow(10, 6)
            while True:
                querier._start_time = start_time
                querier._end_time = UTCTimestampUsec()
                start_time = querier._end_time + 1
                time.sleep(3)
                result = querier.query()
                if result == -1:
                    return
                querier.display(result)
        else:
            start_time = querier._args.start_time
            end_time = querier._args.end_time
            if not querier._args.start_time:
                start_time = "now-10m"
            if not querier._args.end_time:
                end_time = "now"
            try:
                querier._start_time, querier._end_time = \
                    OpServerUtils.parse_start_end_time(
                        start_time = start_time,
                        end_time = end_time,
                        last = querier._args.last)
            except:
                return -1
            result = querier.query()
            if result == -1:
                return
            querier.display(result)
    except KeyboardInterrupt:
        return
예제 #44
0
 def run(self):
     try:
         if self.parse_args() != 0:
             return
         if self._args.tail:
             start_time = UTCTimestampUsec() - 10*pow(10,6)
             while True:
                 self._start_time = start_time
                 self._end_time = UTCTimestampUsec()
                 start_time = self._end_time + 1
                 time.sleep(3)
                 result = self.query()
                 if result == -1:
                     return
                 self.display(result)
         else:
             start_time = self._args.start_time
             end_time = self._args.end_time
             if not self._args.start_time:
                 start_time = "now-10m"
             if not self._args.end_time:
                 end_time = "now"
             try:
                 self._start_time, self._end_time = \
                     OpServerUtils.parse_start_end_time(
                         start_time = start_time,
                         end_time = end_time,
                         last = self._args.last)
             except:
                 return -1
             result = self.query()
             if result == -1:
                 return
             self.display(result)
     except KeyboardInterrupt:
         return
예제 #45
0
 def run(self):
     try:
         if self.parse_args() != 0:
             return
         if self._args.tail:
             start_time = UTCTimestampUsec() - 10*pow(10,6)
             while True:
                 self._start_time = start_time
                 self._end_time = UTCTimestampUsec()
                 start_time = self._end_time + 1
                 time.sleep(3)
                 result = self.query()
                 if result == -1:
                     return
                 self.display(result)
         else:
             start_time = self._args.start_time
             end_time = self._args.end_time
             if not self._args.start_time:
                 start_time = "now-10m"
             if not self._args.end_time:
                 end_time = "now"
             try:
                 self._start_time, self._end_time = \
                     OpServerUtils.parse_start_end_time(
                         start_time = start_time,
                         end_time = end_time,
                         last = self._args.last)
             except:
                 return -1
             result = self.query()
             if result == -1:
                 return
             self.display(result)
     except KeyboardInterrupt:
         return
 def get_query_result(self, qid):
     max_retries = 60
     retry = 0
     while True:
         status = self.dict_get('/analytics/query/%s'%qid)
         if not status:
             yield {}
             return
         if status['progress'] != 100:
             if retry < max_retries:
                 retry = retry + 1
                 time.sleep(5)
                 continue
             yield {}
             return
         else:
             for chunk in status['chunks']:
                 resp = self.dict_get(chunk['href'])
                 if resp:
                     yield {}
                 else:
                     for result in OpServerUtils.parse_query_result(resp):
                         yield result
             return
 def get_query_result(self, qid):
     max_retries = 60
     retry = 0
     while True:
         status = self.dict_get('/analytics/query/%s' % qid)
         if not status:
             yield {}
             return
         if status['progress'] != 100:
             if retry < max_retries:
                 retry = retry + 1
                 time.sleep(5)
                 continue
             yield {}
             return
         else:
             for chunk in status['chunks']:
                 resp = self.dict_get(chunk['href'])
                 if resp:
                     yield {}
                 else:
                     for result in OpServerUtils.parse_query_result(resp):
                         yield result
             return
예제 #48
0
    def query(self):
        start_time, end_time = self._start_time, self._end_time
        flow_url = OpServerUtils.opserver_query_url(
            self._args.analytics_api_ip,
            self._args.analytics_api_port)
        where = []
        filter = []
        if self._args.vrouter is not None:
            vrouter_match = OpServerUtils.Match(
                name=self._VROUTER,
                value=self._args.vrouter,
                op=OpServerUtils.MatchOp.EQUAL)
            where.append(vrouter_match.__dict__)

        if self._args.source_vn is not None:
            source_vn_match = OpServerUtils.Match(
                name=self._SOURCE_VN,
                value=self._args.source_vn,
                op=OpServerUtils.MatchOp.EQUAL)
            where.append(source_vn_match.__dict__)

        if self._args.destination_vn is not None:
            dest_vn_match = OpServerUtils.Match(
                name=self._DESTINATION_VN,
                value=self._args.destination_vn,
                op=OpServerUtils.MatchOp.EQUAL)
            where.append(dest_vn_match.__dict__)

        if self._args.source_ip is not None:
            source_ip_match = OpServerUtils.Match(
                name=self._SOURCE_IP,
                value=self._args.source_ip,
                op=OpServerUtils.MatchOp.EQUAL)
            where.append(source_ip_match.__dict__)

        if self._args.destination_ip is not None:
            dest_ip_match = OpServerUtils.Match(
                name=self._DESTINATION_IP,
                value=self._args.destination_ip,
                op=OpServerUtils.MatchOp.EQUAL)
            where.append(dest_ip_match.__dict__)

        if self._args.protocol is not None:
            protocol_match = OpServerUtils.Match(
                name=self._PROTOCOL,
                value=self._args.protocol,
                op=OpServerUtils.MatchOp.EQUAL)
            where.append(protocol_match.__dict__)

        if self._args.source_port is not None:
            source_port_match = OpServerUtils.Match(
                name=self._SOURCE_PORT,
                value=self._args.source_port,
                op=OpServerUtils.MatchOp.EQUAL)
            where.append(source_port_match.__dict__)

        if self._args.destination_port is not None:
            dest_port_match = OpServerUtils.Match(
                name=self._DESTINATION_PORT,
                value=self._args.destination_port,
                op=OpServerUtils.MatchOp.EQUAL)
            where.append(dest_port_match.__dict__)

        if self._args.action is not None:
            action_match = OpServerUtils.Match(
                name=self._ACTION,
                value=self._args.action,
                op=OpServerUtils.MatchOp.EQUAL)
            filter.append(action_match.__dict__)

        if self._args.vrouter_ip is not None:
            vrouter_ip_match = OpServerUtils.Match(
                name=self._VROUTER_IP,
                value=self._args.vrouter_ip,
                op=OpServerUtils.MatchOp.EQUAL)
            filter.append(vrouter_ip_match.__dict__)

        if self._args.other_vrouter_ip is not None:
            other_vrouter_ip_match = OpServerUtils.Match(
                name=self._OTHER_VROUTER_IP,
                value=self._args.other_vrouter_ip,
                op=OpServerUtils.MatchOp.EQUAL)
            filter.append(other_vrouter_ip_match.__dict__)

        if self._args.vmi_uuid is not None:
            vmi_match = OpServerUtils.Match(
                name=self._VMI_UUID,
                value=uuid.UUID(self._args.vmi_uuid),
                op=OpServerUtils.MatchOp.EQUAL)
            filter.append(vmi_match.__dict__)

        # Flow Record Table Query
        table = VizConstants.FLOW_TABLE
        if len(where) == 0:
            where = None
        else:
            where = [where]

        select_list = [
            VizConstants.FLOW_TABLE_UUID,
            self._VROUTER,
            self._SETUP_TIME,
            self._TEARDOWN_TIME,
            self._SOURCE_VN,
            self._DESTINATION_VN,
            self._SOURCE_IP,
            self._DESTINATION_IP,
            self._PROTOCOL,
            self._SOURCE_PORT,
            self._DESTINATION_PORT,
            self._ACTION,
            self._DIRECTION,
            VizConstants.FLOW_TABLE_AGG_BYTES,
            VizConstants.FLOW_TABLE_AGG_PKTS,
            self._SG_RULE_UUID,
            self._NW_ACE_UUID,
            self._VROUTER_IP,
            self._OTHER_VROUTER_IP,
            self._VMI_UUID,
            self._DROP_REASON
        ]
        if self._args.tunnel_info:
            select_list.append(self._UNDERLAY_PROTO)
            select_list.append(self._UNDERLAY_SPORT)

        if len(filter) == 0:
            filter = None

        flow_query = OpServerUtils.Query(table,
                                         start_time=start_time,
                                         end_time=end_time,
                                         select_fields=select_list,
                                         where=where,
                                         filter=filter,
                                         dir=self._args.direction)
        if self._args.verbose:
            print 'Performing query: {0}'.format(
                json.dumps(flow_query.__dict__))
        print ''
        resp = OpServerUtils.post_url_http(
            flow_url, json.dumps(flow_query.__dict__))
        result = {}
        if resp is not None:
            resp = json.loads(resp)
            qid = resp['href'].rsplit('/', 1)[1]
            result = OpServerUtils.get_query_result(
                self._args.analytics_api_ip, self._args.analytics_api_port, qid)
        return result
예제 #49
0
    def aggregate(self, key, flat, base_url = None):
        '''
        This function does parallel aggregation of this UVE's state.
        It aggregates across all sources and return the global state of the UVE
        '''
        result = {}
        try:
            for typ in self._state[key].keys():
                result[typ] = {}
                for objattr in self._state[key][typ].keys():
                    if self._is_sum(self._state[key][typ][objattr]):
                        sum_res = self._sum_agg(self._state[key][typ][objattr])
                        if flat:
                            result[typ][objattr] = \
                                OpServerUtils.uve_attr_flatten(sum_res)
                        else:
                            result[typ][objattr] = sum_res
                    elif self._is_union(self._state[key][typ][objattr]):
                        union_res = self._union_agg(
                            self._state[key][typ][objattr])
                        conv_res = None
                        if union_res.has_key('@ulink') and base_url and \
                                union_res['list']['@type'] == 'string':
                            uterms = union_res['@ulink'].split(":",1)

                            # This is the linked UVE's table name
                            m_table = uterms[0]

                            if self._rev_map.has_key(m_table):
                                h_table = self._rev_map[m_table]
                                conv_res = []
                                sname = ParallelAggregator.get_list_name(union_res)
                                for el in union_res['list'][sname]:
                                    lobj = {}
                                    lobj['name'] = el
                                    lobj['href'] = base_url + '/analytics/uves/' + \
                                        h_table + '/' + el
                                    if len(uterms) == 2:
                                        lobj['href'] = lobj['href'] + '?cfilt=' + uterms[1]
                                    else:
                                        lobj['href'] = lobj['href'] + '?flat'
                                    conv_res.append(lobj)
                        if flat:
                            if not conv_res:
                                result[typ][objattr] = \
                                        OpServerUtils.uve_attr_flatten(union_res)
                            else:
                                result[typ][objattr] = conv_res
                        else:
                            result[typ][objattr] = union_res
                    elif self._is_append(self._state[key][typ][objattr]):
                        result[typ][objattr] = self._append_agg(
                            self._state[key][typ][objattr])
                        append_res = ParallelAggregator.consolidate_list(
                            result, typ, objattr)

                        if flat:
                            result[typ][objattr] =\
                                OpServerUtils.uve_attr_flatten(append_res)
                        else:
                            result[typ][objattr] = append_res

                    else:
                        default_res = self._default_agg(
                            self._state[key][typ][objattr])
                        if flat:
                            if (len(default_res) == 1):
                                result[typ][objattr] =\
                                    OpServerUtils.uve_attr_flatten(
                                        default_res[0][0])
                            else:
                                nres = []
                                for idx in range(len(default_res)):
                                    nres.append(default_res[idx])
                                    nres[idx][0] =\
                                        OpServerUtils.uve_attr_flatten(
                                            default_res[idx][0])
                                result[typ][objattr] = nres
                        else:
                            result[typ][objattr] = default_res
        except KeyError:
            pass
        return result
예제 #50
0
    def display(self, result):
        if result == [] or result is None:
            return
        messages_dict_list = result
        # Setup logger and syslog handler
        if self._args.send_syslog:
            logger = logging.getLogger()
            logger.setLevel(logging.DEBUG)
            syslog_handler = logging.handlers.SysLogHandler(
                address = (self._args.syslog_server, self._args.syslog_port))
            contrail_formatter = logging.Formatter('contrail: %(message)s')
            syslog_handler.setFormatter(contrail_formatter)
            logger.addHandler(syslog_handler)
            self._logger = logger

        # For json we will be outputting list of dicts so open the list here
        if self._args.json:
            first = True
            self.output('[', SandeshLevel.INVALID)
        for messages_dict in messages_dict_list:

            if VizConstants.TIMESTAMP in messages_dict:
                message_dt = datetime.datetime.fromtimestamp(
                    int(messages_dict[VizConstants.TIMESTAMP]) /
                    OpServerUtils.USECS_IN_SEC)
                message_dt += datetime.timedelta(
                    microseconds=
                    (int(messages_dict[VizConstants.TIMESTAMP]) %
                     OpServerUtils.USECS_IN_SEC))
                message_ts = message_dt.strftime(OpServerUtils.TIME_FORMAT_STR)
            else:
                message_ts = 'Time: NA'
            messages_dict[VizConstants.TIMESTAMP] = message_ts
            if VizConstants.SOURCE in messages_dict:
                source = messages_dict[VizConstants.SOURCE]
            else:
                source = 'Source: NA'
            if VizConstants.NODE_TYPE in messages_dict:
                node_type = messages_dict[VizConstants.NODE_TYPE]
            else:
                node_type = ''
            if VizConstants.MODULE in messages_dict:
                module = messages_dict[VizConstants.MODULE]
            else:
                module = 'Module: NA'
            if VizConstants.INSTANCE_ID in messages_dict:
                instance_id = messages_dict[VizConstants.INSTANCE_ID]
            else:
                instance_id = ''
            if VizConstants.MESSAGE_TYPE in messages_dict:
                message_type = messages_dict[VizConstants.MESSAGE_TYPE]
            else:
                message_type = 'Message Type: NA'
            if VizConstants.SANDESH_TYPE in messages_dict:
                sandesh_type = messages_dict[VizConstants.SANDESH_TYPE]
            else:
                sandesh_type = SandeshType.INVALID
            # By default SYS_DEBUG
            sandesh_level = SandeshLevel.SYS_DEBUG
            if self._args.object_type is None:
                if VizConstants.CATEGORY in messages_dict:
                    category = messages_dict[VizConstants.CATEGORY]
                else:
                    category = 'Category: NA'
                if VizConstants.LEVEL in messages_dict:
                    sandesh_level = messages_dict[VizConstants.LEVEL]
                    level = SandeshLevel._VALUES_TO_NAMES[sandesh_level]
                else:
                    level = 'Level: NA'
                messages_dict[VizConstants.LEVEL] = level
                if VizConstants.SEQUENCE_NUM in messages_dict:
                    seq_num = messages_dict[VizConstants.SEQUENCE_NUM]
                else:
                    seq_num = 'Sequence Number: NA'
                if VizConstants.DATA in messages_dict:
                    # Convert XML data to dict
                    if self._args.raw:
                        data_str = messages_dict[VizConstants.DATA]
                    else:
                        OpServerUtils.messages_xml_data_to_dict(
                            messages_dict, VizConstants.DATA)
                        if isinstance(messages_dict[VizConstants.DATA], dict):
                            data_dict = messages_dict[VizConstants.DATA]
                            data_str = OpServerUtils.messages_data_dict_to_str(
                                data_dict, message_type, sandesh_type)
                        else:
                            data_str = messages_dict[VizConstants.DATA]
                else:
                    data_str = 'Data not present'
                if self._args.json:
                    if not first:
                        self.output(", ", sandesh_level)
                    else:
                        first = False
                    OpServerUtils.messages_dict_scrub(messages_dict)
                    self.output(messages_dict, sandesh_level)
                else:
                    if self._args.trace is not None:
                        trace_str = '{0} {1}:{2} {3}'.format(
                            message_ts, message_type, seq_num, data_str)
                        self.output(trace_str, sandesh_level)
                    else:
                        log_str = \
                            '{0} {1} [{2}:{3}:{4}:{5}][{6}] : {7}:{8} {9}'.format(
                            message_ts, source, node_type, module, instance_id,
                            category, level, message_type, seq_num, data_str)
                        self.output(log_str, sandesh_level)
            else:
                if self._args.object_values is True:
                    if OpServerUtils.OBJECT_ID in messages_dict:
                        obj_str = messages_dict[OpServerUtils.OBJECT_ID]
                        print obj_str
                        continue
                for obj_sel_field in self._args.object_select_field:
                    if obj_sel_field in messages_dict:
                        if self._args.raw:
                            data_str = messages_dict[obj_sel_field]
                        else:
                            # Convert XML data to dict
                            OpServerUtils.messages_xml_data_to_dict(
                                messages_dict, obj_sel_field)
                            if isinstance(messages_dict[obj_sel_field], dict):
                                data_dict = messages_dict[obj_sel_field]
                                data_str =\
                                    OpServerUtils.messages_data_dict_to_str(
                                        data_dict, message_type,
                                        sandesh_type)
                            else:
                                data_str = messages_dict[obj_sel_field]
                        if data_str:
                            obj_str = '{0} {1} [{2}:{3}:{4}] : {5}: {6}'.format(
                                message_ts, source, node_type, module,
                                instance_id, message_type, data_str)
                            if self._args.json:
                                if not first:
                                    self.output(", ", sandesh_level)
                                else:
                                    first = False
                                OpServerUtils.messages_dict_scrub(messages_dict)
                                self.output(messages_dict, sandesh_level)
                            else:
                                self.output(obj_str, sandesh_level)
        # For json we will be outputting list of dicts so close the list here
        if self._args.json:
            self.output(']', SandeshLevel.INVALID)
예제 #51
0
    def query(self):
        if self._args.tail and (self._args.send_syslog or self._args.reverse or
               self._args.start_time or self._args.end_time):
            invalid_combination = " --tail"
            if self._args.send_syslog:
                 invalid_combination += ", --send-syslog"
            if self._args.reverse:
                 invalid_combination += ", --reverse"
            if self._args.start_time:
                 invalid_combination += ", --start-time"
            if self._args.end_time:
                 invalid_combination += ", --end-time"
            print "Combination of options" + invalid_combination + " are not valid."
            return -1
        global output_file_handle
        if self._args.output_file is not None:
            if output_file_handle is None:
               #Open the file for writing
               try:
                   if self._args.tail:
                      output_file_handle = open(self._args.output_file, "a")
                   else:
                      output_file_handle = open(self._args.output_file, "w")
               except Exception as e:
                   print e
                   print "Exception occured when creating/opening file %s" % \
                         self._args.output_file
                   return -1

        start_time, end_time = self._start_time, self._end_time
        if self._args.message_types is True:
            command_str = ("contrail-stats --table FieldNames.fields" +
               " --where name=MessageTable:Messagetype --select name fields.value" +
               " --start-time " + str(start_time) +
               " --end-time " + str(end_time) +
               " --analytics-api-ip " + str(self._args.analytics_api_ip) +
               " --analytics-api-port " + str(self._args.analytics_api_port))
            res = commands.getoutput(command_str)
            res = res.splitlines()
            res = res[1:]
            for r in res:
                print ast.literal_eval(r)['fields.value']
            return None
        messages_url = OpServerUtils.opserver_query_url(
            self._args.analytics_api_ip,
            self._args.analytics_api_port)
        where_msg = []
        where_obj = []
        and_filter = []
        or_filter = []
        if self._args.source is not None:
            source_match = OpServerUtils.Match(name=VizConstants.SOURCE,
                                               value=self._args.source,
                                               op=OpServerUtils.MatchOp.EQUAL)
            where_msg.append(source_match.__dict__)

        if self._args.module is not None:
            module_match = OpServerUtils.Match(name=VizConstants.MODULE,
                                               value=self._args.module,
                                               op=OpServerUtils.MatchOp.EQUAL)
            where_msg.append(module_match.__dict__)

        if self._args.category is not None:
            category_match = OpServerUtils.Match(
                name=VizConstants.CATEGORY,
                value=self._args.category,
                op=OpServerUtils.MatchOp.EQUAL)
            where_msg.append(category_match.__dict__)

        if self._args.message_type is not None:
            message_type_match = OpServerUtils.Match(
                name=VizConstants.MESSAGE_TYPE,
                value=self._args.message_type,
                op=OpServerUtils.MatchOp.EQUAL)
            where_msg.append(message_type_match.__dict__)

        if self._args.level is not None:
            level_match = OpServerUtils.Match(
                name=VizConstants.LEVEL,
                value=SandeshLevel._NAMES_TO_VALUES[self._args.level],
                op=OpServerUtils.MatchOp.LEQ)
            and_filter.append(level_match.__dict__)

        if self._args.node_type is not None:
            node_type_match = OpServerUtils.Match(
                name=VizConstants.NODE_TYPE,
                value=self._args.node_type,
                op=OpServerUtils.MatchOp.EQUAL)
            and_filter.append(node_type_match.__dict__)

        if self._args.instance_id is not None:
            instance_id_match = OpServerUtils.Match(
                name=VizConstants.INSTANCE_ID,
                value=self._args.instance_id,
                op=OpServerUtils.MatchOp.EQUAL)
            and_filter.append(instance_id_match.__dict__)

        # Object logs :
        # --object-type <> : All logs for the particular object type
        # --object-type <> --object-values : Object-id values for the particular
        #     object tye
        # --object-type <> --object-id <> : All logs matching object-id for
        #     particular object type
        if (self._args.object_type is not None or
           self._args.object_id is not None or
           self._args.object_select_field is not None or
           self._args.object_values is True):
            # Validate object-type
            if self._args.object_type is not None:
                if self._args.object_type in OBJECT_TYPE_LIST:
                    if self._args.object_type in OBJECT_TABLE_MAP:
                        table = OBJECT_TABLE_MAP[self._args.object_type]
                    else:
                        print 'Table not found for object-type [%s]' % \
                            (self._args.object_type)
                        return -1
                else:
                    print 'Unknown object-type [%s]' % (self._args.object_type)
                    return -1
            else:
                print 'Object-type required for query'
                return -1
            # Validate object-id and object-values
            if self._args.object_id is not None and \
               self._args.object_values is False:
                object_id = self._args.object_id
                if object_id.endswith("*"):
                    id_match = OpServerUtils.Match(
                        name=OpServerUtils.OBJECT_ID,
                        value=object_id[:-1],
                        op=OpServerUtils.MatchOp.PREFIX) 
                else:
                    id_match = OpServerUtils.Match(
                        name=OpServerUtils.OBJECT_ID,
                        value=object_id,
                        op=OpServerUtils.MatchOp.EQUAL)
                where_obj.append(id_match.__dict__)
            elif self._args.object_id is not None and \
               self._args.object_values is True:
                print 'Please specify either object-id or object-values but not both'
                return -1

            if self._args.object_values is False:
                if self._args.object_select_field is not None:
                    obj_sel_field = self._args.object_select_field
                    if not isinstance(self._args.object_select_field, list):
                         obj_sel_field = [self._args.object_select_field]
                    if VizConstants.OBJECT_LOG or VizConstants.SYSTEM_LOG \
                       in obj_sel_field:
                         self._args.object_select_field = obj_sel_field
                    else:
                         print 'Invalid object-select-field. '\
                            'Valid values are "%s" or "%s"' \
                            % (VizConstants.OBJECT_LOG,
                               VizConstants.SYSTEM_LOG)
                         return -1
                else:
                    self._args.object_select_field = obj_sel_field = [
                        VizConstants.OBJECT_LOG, VizConstants.SYSTEM_LOG]
                select_list = [
                    VizConstants.TIMESTAMP,
                    VizConstants.SOURCE,
                    VizConstants.MODULE,
                    VizConstants.MESSAGE_TYPE,
                ] + obj_sel_field
            else:
                if self._args.object_select_field:
                    print 'Please specify either object-id with ' + \
                        'object-select-field or only object-values'
                    return -1
                if len(where_msg):
                    options = [where['name'] for where in where_msg]
                    print 'Invalid/unsupported where-clause options %s for object-values query' % str(options)
                    return -1
                select_list = [
                    OpServerUtils.OBJECT_ID
                ]

            if len(where_obj) or len(where_msg):
                where = [where_obj + where_msg]
            else:
                where = None

        elif self._args.trace is not None:
            table = VizConstants.COLLECTOR_GLOBAL_TABLE
            if self._args.source is None:
                print 'Source is required for trace buffer dump'
                return -1
            if self._args.module is None:
                print 'Module is required for trace buffer dump'
                return -1
            trace_buf_match = OpServerUtils.Match(
                name=VizConstants.CATEGORY,
                value=self._args.trace,
                op=OpServerUtils.MatchOp.EQUAL)
            where_msg.append(trace_buf_match.__dict__)
            where = [where_msg]
            select_list = [
                VizConstants.TIMESTAMP,
                VizConstants.MESSAGE_TYPE,
                VizConstants.SEQUENCE_NUM,
                VizConstants.DATA,
                VizConstants.SANDESH_TYPE
            ]
            sandesh_type_filter = OpServerUtils.Match(
                name=VizConstants.SANDESH_TYPE,
                value=str(
                    SandeshType.TRACE),
                op=OpServerUtils.MatchOp.EQUAL)
            and_filter.append(sandesh_type_filter.__dict__)
        else:
            # Message Table Query
            table = VizConstants.COLLECTOR_GLOBAL_TABLE

            if len(where_msg):
                where = [where_msg]
            else:
                where = None

            select_list = [
                VizConstants.TIMESTAMP,
                VizConstants.SOURCE,
                VizConstants.MODULE,
                VizConstants.CATEGORY,
                VizConstants.MESSAGE_TYPE,
                VizConstants.SEQUENCE_NUM,
                VizConstants.DATA,
                VizConstants.SANDESH_TYPE,
                VizConstants.LEVEL,
                VizConstants.NODE_TYPE,
                VizConstants.INSTANCE_ID,
            ]

        filter = None
        if len(or_filter):
            filter = [and_filter+[filt] for filt in or_filter]
        elif len(and_filter):
            filter = [and_filter]

        if self._args.keywords is not None:
            p = re.compile('\s*,\s*|\s+')
            if where is None:
                where = [[]]
            for kwd in p.split(self._args.keywords):
                message_type_match = OpServerUtils.Match(
                    name=VizConstants.KEYWORD,
                    value=kwd,
                    op=OpServerUtils.MatchOp.EQUAL)
                for wc in where:
                    wc.append(message_type_match.__dict__)

        # Add sort by timestamp for non object value queries
        sort_op = None
        sort_fields = None
        if self._args.object_values is False:
            if self._args.reverse:
                sort_op = OpServerUtils.SortOp.DESCENDING
            else:
                sort_op = OpServerUtils.SortOp.ASCENDING
            sort_fields = [VizConstants.TIMESTAMP]

        if self._args.limit:
            limit = int(self._args.limit)
        else:
            limit = None

        messages_query = OpServerUtils.Query(table,
                                             start_time=start_time,
                                             end_time=end_time,
                                             select_fields=select_list,
                                             where=where,
                                             filter=filter,
                                             sort=sort_op,
                                             sort_fields=sort_fields,
                                             limit=limit)
        if self._args.verbose:
            print 'Performing query: {0}'.format(
                json.dumps(messages_query.__dict__))
        resp = OpServerUtils.post_url_http(
            messages_url, json.dumps(messages_query.__dict__))
        result = {}
        if resp is not None:
            resp = json.loads(resp)
            qid = resp['href'].rsplit('/', 1)[1]
            result = OpServerUtils.get_query_result(
                self._args.analytics_api_ip, self._args.analytics_api_port, qid)
        return result
예제 #52
0
    def aggregate(self, key, flat, base_url = None):
        '''
        This function does parallel aggregation of this UVE's state.
        It aggregates across all sources and return the global state of the UVE
        '''
        result = {}
        ltyp = None
        objattr = None
        try:
            for typ in self._state[key].keys():
                ltyp = typ
                result[typ] = {}
                for objattr in self._state[key][typ].keys():
                    if self._is_elem_sum(self._state[key][typ][objattr]):
                        sume_res = self._elem_sum_agg(self._state[key][typ][objattr])
                        if flat:
                            result[typ][objattr] = \
                                OpServerUtils.uve_attr_flatten(sume_res)
                        else:
                            result[typ][objattr] = sume_res
                    elif self._is_struct_sum(self._state[key][typ][objattr]):
                        sums_res = self._struct_sum_agg(self._state[key][typ][objattr])
                        if flat:
                            result[typ][objattr] = \
                                OpServerUtils.uve_attr_flatten(sums_res)
                        else:
                            result[typ][objattr] = sums_res
                    elif self._is_list_union(self._state[key][typ][objattr]):
                        unionl_res = self._list_union_agg(
                            self._state[key][typ][objattr])
                        if flat:
                            result[typ][objattr] = \
                                OpServerUtils.uve_attr_flatten(unionl_res)
                        else:
                            result[typ][objattr] = unionl_res
                    elif self._is_map_union(self._state[key][typ][objattr]):
                        unionm_res = self._map_union_agg(
                            self._state[key][typ][objattr])
                        if flat:
                            result[typ][objattr] = \
                                OpServerUtils.uve_attr_flatten(unionm_res)
                        else:
                            result[typ][objattr] = unionm_res
                    elif self._is_append(self._state[key][typ][objattr]):
                        result[typ][objattr] = self._append_agg(
                            self._state[key][typ][objattr])
                        append_res = ParallelAggregator.consolidate_list(
                            result, typ, objattr)

                        if flat:
                            result[typ][objattr] =\
                                OpServerUtils.uve_attr_flatten(append_res)
                        else:
                            result[typ][objattr] = append_res

                    else:
                        default_res = self._default_agg(
                            self._state[key][typ][objattr])
                        if flat:
                            if (len(default_res) == 1):
                                result[typ][objattr] =\
                                    OpServerUtils.uve_attr_flatten(
                                        default_res[0][0])
                            else:
                                nres = []
                                for idx in range(len(default_res)):
                                    nres.append(default_res[idx])
                                    nres[idx][0] =\
                                        OpServerUtils.uve_attr_flatten(
                                            default_res[idx][0])
                                result[typ][objattr] = nres
                        else:
                            result[typ][objattr] = default_res
        except KeyError:
            pass
        except Exception as ex:
            print "Aggregation Error key %s type %s attr %s in %s" % \
                    (key, str(ltyp), str(objattr), str(self._state[key][typ][objattr]))
        return result
예제 #53
0
    def display(self, result):
        if result == [] or result is None:
            return
        flow_dict_list = result
    
        for flow_dict in flow_dict_list:
            # Setup time
            if self._SETUP_TIME in flow_dict and\
                flow_dict[self._SETUP_TIME] is not None:
                setup_time = int(flow_dict[self._SETUP_TIME])
                if setup_time != 0:
                    setup_dt = datetime.datetime.fromtimestamp(
                        setup_time /
                        OpServerUtils.USECS_IN_SEC)
                    setup_dt += datetime.timedelta(
                        microseconds=
                        (setup_time %
                         OpServerUtils.USECS_IN_SEC))
                    setup_ts = setup_dt.strftime(
                        OpServerUtils.TIME_FORMAT_STR)
                else:
                    setup_ts = 'Setup Time: NA'
            else:
                setup_ts = 'Setup Time: NA'
            # Teardown time
            if self._TEARDOWN_TIME in flow_dict and\
                flow_dict[self._TEARDOWN_TIME] is not None:
                teardown_time = int(flow_dict[ 
                    self._TEARDOWN_TIME])
                if teardown_time != 0:
                    teardown_dt = datetime.datetime.fromtimestamp(
                        teardown_time /
                        OpServerUtils.USECS_IN_SEC)
                    teardown_dt += datetime.timedelta(
                        microseconds=
                        (teardown_time %
                         OpServerUtils.USECS_IN_SEC))
                    teardown_ts = teardown_dt.strftime(
                        OpServerUtils.TIME_FORMAT_STR)
                else:
                    teardown_ts = 'Active'
            else:
                teardown_ts = 'Active'
            # VRouter
            if self._VROUTER in flow_dict and\
                flow_dict[self._VROUTER] is not None:
                vrouter = flow_dict[self._VROUTER]
            else:
                vrouter = 'VRouter: NA'
            # Direction 
            if self._DIRECTION in flow_dict and\
                flow_dict[self._DIRECTION] is not None:
                direction = int(flow_dict[self._DIRECTION])
                if direction == 1:
                    direction = 'ingress'
                elif direction == 0:
                    direction = 'egress'
                else:
                    direction = 'Direction: Invalid'
            else:
                direction = 'Direction: NA'
            # Flow UUID 
            if VizConstants.FLOW_TABLE_UUID in flow_dict and\
                flow_dict[VizConstants.FLOW_TABLE_UUID] is not None:
                flow_uuid = flow_dict[VizConstants.FLOW_TABLE_UUID]
            else:
                flow_uuid = 'UUID: NA'
            # Source VN
            if self._SOURCE_VN in flow_dict and\
                flow_dict[self._SOURCE_VN] is not None:
                source_vn = flow_dict[self._SOURCE_VN]
            else:
                source_vn = 'Source VN: NA'
            # Destination VN
            if self._DESTINATION_VN in flow_dict and\
                flow_dict[self._DESTINATION_VN] is not None:
                destination_vn = flow_dict[self._DESTINATION_VN]
            else:
                destination_vn = 'Destination VN: NA'
            # Source IP 
            if self._SOURCE_IP in flow_dict and\
                flow_dict[self._SOURCE_IP] is not None:
                source_ip = flow_dict[self._SOURCE_IP]
            else:
                source_ip = 'Source IP: NA'
            # Destination IP 
            if self._DESTINATION_IP in flow_dict and\
                flow_dict[self._DESTINATION_IP] is not None:
                destination_ip = flow_dict[self._DESTINATION_IP]
            else:
                destination_ip = 'Destination IP: NA'
            # Source port 
            if self._SOURCE_PORT in flow_dict and\
                flow_dict[self._SOURCE_PORT] is not None:
                source_port = flow_dict[self._SOURCE_PORT]
            else:
                source_port = 'Source Port: NA'
            # Destination port 
            if self._DESTINATION_PORT in flow_dict and\
                flow_dict[self._DESTINATION_PORT] is not None:
                destination_port = flow_dict[self._DESTINATION_PORT]
            else:
                destination_port = 'Destination Port: NA'
            # Protocol
            if self._PROTOCOL in flow_dict and\
                flow_dict[self._PROTOCOL] is not None:
                protocol = OpServerUtils.ip_protocol_to_str(
                    int(flow_dict[self._PROTOCOL]))
            else:
                protocol = 'Protocol: NA'
            # Action 
            if self._ACTION in flow_dict and\
                flow_dict[self._ACTION] is not None:
                action = flow_dict[self._ACTION]
            else:
                action = ''
            # Agg packets and bytes
            if VizConstants.FLOW_TABLE_AGG_BYTES in flow_dict and\
                flow_dict[VizConstants.FLOW_TABLE_AGG_BYTES] is not None:
                agg_bytes = int(flow_dict[VizConstants.FLOW_TABLE_AGG_BYTES])
            else:
                agg_bytes = 'Agg Bytes: NA'
            if VizConstants.FLOW_TABLE_AGG_PKTS in flow_dict and\
                flow_dict[VizConstants.FLOW_TABLE_AGG_PKTS] is not None:
                agg_pkts = int(flow_dict[VizConstants.FLOW_TABLE_AGG_PKTS])
            else:
                agg_pkts = 'Agg Packets: NA'
            # SG rule UUID
            if self._SG_RULE_UUID in flow_dict and\
                flow_dict[self._SG_RULE_UUID] is not None:
                sg_rule_uuid = flow_dict[self._SG_RULE_UUID]
            else:
                sg_rule_uuid = None
            # NW ACE UUID
            if self._NW_ACE_UUID in flow_dict and\
                flow_dict[self._NW_ACE_UUID] is not None:
                nw_ace_uuid = flow_dict[self._NW_ACE_UUID]
            else:
                nw_ace_uuid = None
            # VRouter IP
            if self._VROUTER_IP in flow_dict and\
                flow_dict[self._VROUTER_IP] is not None:
                vrouter_ip = '/' + flow_dict[self._VROUTER_IP]
            else:
                vrouter_ip = ''
            # Other VRouter IP
            if self._OTHER_VROUTER_IP in flow_dict and\
                flow_dict[self._OTHER_VROUTER_IP] is not None:
                other_vrouter_ip = ' [DST-VR:' + flow_dict[self._OTHER_VROUTER_IP] + ']'
            else:
                other_vrouter_ip = ''

            if self._VMI_UUID in flow_dict and (
                    flow_dict[self._VMI_UUID] is not None):
                src_vmi_uuid = ' [SRC VMI UUID:' + flow_dict[self._VMI_UUID] + ']'
            else:
                src_vmi_uuid = ' [SRC VMI UUID: None]'

            # Underlay info
            if self._UNDERLAY_PROTO in flow_dict and\
                flow_dict[self._UNDERLAY_PROTO] is not None:
                tunnel_proto = 'T:' + OpServerUtils.tunnel_type_to_str(flow_dict[self._UNDERLAY_PROTO])
            else:
                tunnel_proto = None
            if self._UNDERLAY_SPORT in flow_dict and\
                flow_dict[self._UNDERLAY_SPORT] is not None:
                tunnel_sport = 'Src Port:' + str(flow_dict[self._UNDERLAY_SPORT]) + ' '
                if tunnel_proto:
                    tunnel_info = tunnel_proto + '/' + tunnel_sport
                else:
                    tunnel_info = tunnel_sport
            else:
                tunnel_sport = None
                if tunnel_proto:
                    tunnel_info = tunnel_proto
                else:
                    tunnel_info = ''
            # Drop Reason
            if self._DROP_REASON in flow_dict and\
                flow_dict[self._DROP_REASON] is not None:
                drop_reason = flow_dict[self._DROP_REASON]
            else:
                drop_reason = ''

            output_dict = {}
            output_dict['vrouter'] = vrouter
            output_dict['vrouter_ip'] = vrouter_ip
            output_dict['direction'] = direction
            output_dict['action'] = action
            output_dict['drop_reason'] = drop_reason
            output_dict['setup_ts'] = setup_ts
            output_dict['teardown_ts'] = teardown_ts
            output_dict['protocol'] = protocol
            output_dict['source_vn'] = source_vn
            output_dict['source_ip'] = source_ip
            output_dict['source_port'] = source_port
            output_dict['src_vmi_uuid'] = src_vmi_uuid
            output_dict['destination_vn'] = destination_vn
            output_dict['destination_ip'] = destination_ip
            output_dict['destination_port'] = destination_port
            output_dict['other_vrouter_ip'] = other_vrouter_ip
            output_dict['agg_pkts'] = agg_pkts
            output_dict['agg_bytes'] = agg_bytes
            output_dict['sg_rule_uuid'] = sg_rule_uuid
            output_dict['nw_ace_uuid'] = nw_ace_uuid
            output_dict['tunnel_info'] = tunnel_info
            output_dict['flow_uuid'] = flow_uuid
            self.output(output_dict)
예제 #54
0
    def parse_args(self):
        """
        Eg. python flow.py --analytics-api-ip 127.0.0.1
                          --analytics-api-port 8081
                          --vrouter a6s23
                          --source-vn default-domain:default-project:vn1
                          --destination-vn default-domain:default-project:vn2
                          --source-ip 1.1.1.1
                          --destination-ip 2.2.2.2
                          --protocol TCP
                          --source-port 32678
                          --destination-port 80
                          --action drop
                          --direction ingress
                          --vrouter-ip 172.16.0.1
                          --other-vrouter-ip 172.32.0.1
                          --tunnel-info
                          [--start-time now-10m --end-time now] | --last 10m
        """
        defaults = {
            'analytics_api_ip': '127.0.0.1',
            'analytics_api_port': '8081',
            'start_time': 'now-10m',
            'end_time': 'now',
            'direction' : 'ingress',
        }

        parser = argparse.ArgumentParser(
            formatter_class=argparse.ArgumentDefaultsHelpFormatter)
        parser.set_defaults(**defaults)
        parser.add_argument("--analytics-api-ip",
            help="IP address of Analytics API Server")
        parser.add_argument("--analytics-api-port",
            help="Port of Analytics API Server")
        parser.add_argument(
            "--start-time", help="Flow record start time (format now-10m, now-1h)")
        parser.add_argument("--end-time", help="Flow record end time")
        parser.add_argument(
            "--last", help="Flow records from last time period (format 10m, 1d)")
        parser.add_argument("--vrouter", help="Flow records from vrouter")
        parser.add_argument("--source-vn",
            help="Flow records with source virtual network")
        parser.add_argument("--destination-vn",
            help="Flow records with destination virtual network")
        parser.add_argument("--source-ip",
            help="Flow records with source IP address")
        parser.add_argument("--destination-ip",
            help="Flow records with destination IP address")
        parser.add_argument("--protocol", help="Flow records with protocol")
        parser.add_argument("--source-port",
            help="Flow records with source port", type=int)
        parser.add_argument("--destination-port",
            help="Flow records with destination port", type=int)
        parser.add_argument("--action", help="Flow records with action")
        parser.add_argument("--direction", help="Flow direction",
            choices=['ingress', 'egress'])
        parser.add_argument("--vrouter-ip",
            help="Flow records from vrouter IP address")
        parser.add_argument("--other-vrouter-ip",
            help="Flow records to vrouter IP address")
        parser.add_argument("--tunnel-info", action="store_true",
            help="Show flow tunnel information")
	parser.add_argument("--vmi-uuid",
            help="Show vmi uuid information")
        parser.add_argument(
            "--verbose", action="store_true", help="Show internal information")        
        self._args = parser.parse_args()

        try:
            self._start_time, self._end_time = \
                OpServerUtils.parse_start_end_time(
                    start_time = self._args.start_time,
                    end_time = self._args.end_time,
                    last = self._args.last)
        except:
            return -1

        # Validate flow arguments
        if self._args.source_ip is not None and self._args.source_vn is None:
            print 'Please provide source virtual network in addtion to '\
                'source IP address'
            return -1
        if self._args.destination_ip is not None and \
                self._args.destination_vn is None:
            print 'Please provide destination virtual network in addtion to '\
                'destination IP address'
            return -1
        if self._args.source_port is not None and self._args.protocol is None:
            print 'Please provide protocol in addtion to source port'
            return -1
        if self._args.destination_port is not None and \
                self._args.protocol is None:
            print 'Please provide protocol in addtion to '\
                'destination port'
            return -1

        # Convert direction
        if self._args.direction.lower() == "ingress":
            self._args.direction = 1
        elif self._args.direction.lower() == "egress":
            self._args.direction = 0
        else:
            print 'Direction should be ingress or egress'
            return -1

        # Protocol
        if self._args.protocol is not None:
            if self._args.protocol.isalpha():
                protocol = OpServerUtils.str_to_ip_protocol(
                    self._args.protocol)
                if protocol == -1:
                    print 'Please provide valid protocol'
                    return -1
                self._args.protocol = protocol

        return 0
예제 #55
0
    def aggregate(self, key, flat, base_url = None):
        '''
        This function does parallel aggregation of this UVE's state.
        It aggregates across all sources and return the global state of the UVE
        '''
        result = {}
        ltyp = None
        objattr = None
        try:
            for typ in self._state[key].keys():
                ltyp = typ
                result[typ] = {}
                for objattr in self._state[key][typ].keys():
                    if self._is_elem_sum(self._state[key][typ][objattr]):
                        sume_res = self._elem_sum_agg(self._state[key][typ][objattr])
                        if flat:
                            result[typ][objattr] = \
                                OpServerUtils.uve_attr_flatten(sume_res)
                        else:
                            result[typ][objattr] = sume_res
                    elif self._is_struct_sum(self._state[key][typ][objattr]):
                        sums_res = self._struct_sum_agg(self._state[key][typ][objattr])
                        if flat:
                            result[typ][objattr] = \
                                OpServerUtils.uve_attr_flatten(sums_res)
                        else:
                            result[typ][objattr] = sums_res
                    elif self._is_list_union(self._state[key][typ][objattr]):
                        unionl_res = self._list_union_agg(
                            self._state[key][typ][objattr])
                        if flat:
                            result[typ][objattr] = \
                                OpServerUtils.uve_attr_flatten(unionl_res)
                        else:
                            result[typ][objattr] = unionl_res
                    elif self._is_map_union(self._state[key][typ][objattr]):
                        unionm_res = self._map_union_agg(
                            self._state[key][typ][objattr])
                        if flat:
                            result[typ][objattr] = \
                                OpServerUtils.uve_attr_flatten(unionm_res)
                        else:
                            result[typ][objattr] = unionm_res
                    elif self._is_append(self._state[key][typ][objattr]):
                        result[typ][objattr] = self._append_agg(
                            self._state[key][typ][objattr])
                        append_res = ParallelAggregator.consolidate_list(
                            result, typ, objattr)

                        if flat:
                            result[typ][objattr] =\
                                OpServerUtils.uve_attr_flatten(append_res)
                        else:
                            result[typ][objattr] = append_res

                    else:
                        default_res = self._default_agg(
                            self._state[key][typ][objattr])
                        if flat:
                            if (len(default_res) == 1):
                                result[typ][objattr] =\
                                    OpServerUtils.uve_attr_flatten(
                                        default_res[0][0])
                            else:
                                nres = []
                                for idx in range(len(default_res)):
                                    nres.append(default_res[idx])
                                    nres[idx][0] =\
                                        OpServerUtils.uve_attr_flatten(
                                            default_res[idx][0])
                                result[typ][objattr] = nres
                        else:
                            result[typ][objattr] = default_res
        except KeyError:
            pass
        except Exception as ex:
            print "Aggregation Error key %s type %s attr %s in %s" % \
                    (key, str(ltyp), str(objattr), str(self._state[key][typ][objattr]))
        return result
예제 #56
0
    def get_uve(self, key, flat, sfilter=None, mfilter=None, tfilter=None, multi=False):
        state = {}
        state[key] = {}
        statdict = {}
        for redis_uve in self._redis_uve_list:
            redish = redis.StrictRedis(host=redis_uve[0],
                                       port=redis_uve[1], db=1)
            try:
                qmap = {}
                for origs in redish.smembers("ORIGINS:" + key):
                    info = origs.rsplit(":", 1)
                    sm = info[0].split(":", 1)
                    source = sm[0]
                    if sfilter is not None:
                        if sfilter != source:
                            continue
                    mdule = sm[1]
                    if mfilter is not None:
                        if mfilter != mdule:
                            continue
                    dsource = source + ":" + mdule

                    typ = info[1]
                    if tfilter is not None:
                        if typ not in tfilter:
                            continue

                    odict = redish.hgetall("VALUES:" + key + ":" + origs)

                    afilter_list = set()
                    if tfilter is not None:
                        afilter_list = tfilter[typ]
                    for attr, value in odict.iteritems():
                        if len(afilter_list):
                            if attr not in afilter_list:
                                continue

                        if typ not in state[key]:
                            state[key][typ] = {}

                        if value[0] == '<':
                            snhdict = xmltodict.parse(value)
                            if snhdict[attr]['@type'] == 'list':
                                if snhdict[attr]['list']['@size'] == '0':
                                    continue
                                elif snhdict[attr]['list']['@size'] == '1':
                                    sname = ParallelAggregator.get_list_name(
                                        snhdict[attr])
                                    if not isinstance(
                                        snhdict[attr]['list'][sname], list):
                                        snhdict[attr]['list'][sname] = [
                                            snhdict[attr]['list'][sname]]
                        else:
                            if not flat:
                                continue
                            if typ not in statdict:
                                statdict[typ] = {}
                            statdict[typ][attr] = []
                            statsattr = json.loads(value)
                            for elem in statsattr:
                                #import pdb; pdb.set_trace()
                                edict = {}
                                if elem["rtype"] == "list":
                                    elist = redish.lrange(elem["href"], 0, -1)
                                    for eelem in elist:
                                        jj = json.loads(eelem).items()
                                        edict[jj[0][0]] = jj[0][1]
                                elif elem["rtype"] == "zset":
                                    elist = redish.zrange(
                                        elem["href"], 0, -1, withscores=True)
                                    for eelem in elist:
                                        tdict = json.loads(eelem[0])
                                        tval = long(tdict["ts"])
                                        dt = datetime.datetime.utcfromtimestamp(
                                            float(tval) / 1000000)
                                        tms = (tval % 1000000) / 1000
                                        tstr = dt.strftime('%Y %b %d %H:%M:%S')
                                        edict[tstr + "." + str(tms)] = eelem[1]
                                elif elem["rtype"] == "hash":
                                    elist = redish.hgetall(elem["href"])
                                    edict = elist
                                elif elem["rtype"] == "query":
                                    if sfilter is None and mfilter is None and not multi:
                                        qdict = {}
                                        qdict["table"] = elem["aggtype"]
                                        qdict["select_fields"] = elem["select"]
                                        qdict["where"] =[[{"name":"name",
                                            "value":key.split(":",1)[1],
                                            "op":1}]]
                                        qmap[elem["aggtype"]] = {"query":qdict,
                                            "type":typ, "attr":attr}
                                    # For the stats query case, defer processing
                                    continue

                                statdict[typ][attr].append(
                                    {elem["aggtype"]: edict})
                            continue

                        # print "Attr %s Value %s" % (attr, snhdict)
                        if attr not in state[key][typ]:
                            state[key][typ][attr] = {}
                        if dsource in state[key][typ][attr]:
                            print "Found Dup %s:%s:%s:%s:%s = %s" % \
                                (key, typ, attr, source, mdule, state[
                                key][typ][attr][dsource])
                        state[key][typ][attr][dsource] = snhdict[attr]
                
                if len(qmap):
                    url = OpServerUtils.opserver_query_url(
                        self._local_redis_uve[0],
                        str(8081))
                    for t,q in qmap.iteritems():
                        try:
                            q["query"]["end_time"] = OpServerUtils.utc_timestamp_usec()
                            q["query"]["start_time"] = qdict["end_time"] - (3600 * 1000000)
                            json_str = json.dumps(q["query"])
                            resp = OpServerUtils.post_url_http(url, json_str, True)
                            if resp is not None:
                                edict = json.loads(resp)
                                edict = edict['value']
                                statdict[q["type"]][q["attr"]].append(
                                    {t: edict})
                        except Exception as e:
                            print "Stats Query Exception:" + str(e)
                        
                if sfilter is None and mfilter is None:
                    for ptyp in redish.smembers("PTYPES:" + key):
                        afilter = None
                        if tfilter is not None:
                            if ptyp not in tfilter:
                                continue
                            afilter = tfilter[ptyp]
                        existing = redish.hgetall("PREVIOUS:" + key + ":" + ptyp)
                        nstate = UVEServer.convert_previous(
                            existing, state, key, ptyp, afilter)
                        state = copy.deepcopy(nstate)

                pa = ParallelAggregator(state)
                rsp = pa.aggregate(key, flat)
            except redis.exceptions.ConnectionError:
                self._logger.error("Failed to connect to redis-uve: %s:%d" \
                                   % (redis_uve[0], redis_uve[1]))
            except Exception as e:
                self._logger.error("Exception: %s" % e)
                return {}
            else:
                self._logger.debug("Computed %s" % key)

        for k, v in statdict.iteritems():
            if k in rsp:
                mp = dict(v.items() + rsp[k].items())
                statdict[k] = mp

        return dict(rsp.items() + statdict.items())
예제 #57
0
    def _get_underlay_flow_data(self, flow_record_data):
        """Fetch the underlay data from the UFlowData table.

        Construct the Where clause for the UFlowData query from the
        FlowRecord query response. Convert the select clause, sort_fields,
        filter clause in the OverlayToUnderlay query according to the schema
        defined for the UFlowData table.
        """
        if not len(flow_record_data):
            return []

        # populate where clause for Underlay Flow query
        uflow_data_where = []
        for row in flow_record_data:
            # if any of the column value is None, then skip the row
            if any(col == None for col in row.values()):
                continue
            uflow_data_where_and_list = []
            ufname = self._flowrecord_to_uflowdata_name(
                FlowRecordNames[FlowRecordFields.FLOWREC_VROUTER_IP])
            val = row[FlowRecordNames[FlowRecordFields.FLOWREC_VROUTER_IP]]
            sip = OpServerUtils.Match(name=ufname,
                                      value=val,
                                      op=OpServerUtils.MatchOp.EQUAL)
            uflow_data_where_and_list.append(sip.__dict__)
            ufname = self._flowrecord_to_uflowdata_name(
                FlowRecordNames[FlowRecordFields.FLOWREC_OTHER_VROUTER_IP])
            val = \
                row[FlowRecordNames[FlowRecordFields.FLOWREC_OTHER_VROUTER_IP]]
            dip = OpServerUtils.Match(name=ufname,
                                      value=val,
                                      op=OpServerUtils.MatchOp.EQUAL)
            uflow_data_where_and_list.append(dip.__dict__)
            ufname = self._flowrecord_to_uflowdata_name(
                FlowRecordNames[FlowRecordFields.FLOWREC_UNDERLAY_SPORT])
            val = row[FlowRecordNames[FlowRecordFields.FLOWREC_UNDERLAY_SPORT]]
            sport = OpServerUtils.Match(name=ufname,
                                        value=val,
                                        op=OpServerUtils.MatchOp.EQUAL)
            ufname = self._flowrecord_to_uflowdata_name(
                FlowRecordNames[FlowRecordFields.FLOWREC_UNDERLAY_PROTO])
            val = row[FlowRecordNames[FlowRecordFields.FLOWREC_UNDERLAY_PROTO]]
            # get the protocol from tunnel_type
            val = OpServerUtils.tunnel_type_to_protocol(val)
            protocol = OpServerUtils.Match(name=ufname,
                                           value=val,
                                           op=OpServerUtils.MatchOp.EQUAL,
                                           suffix=sport)
            uflow_data_where_and_list.append(protocol.__dict__)
            uflow_data_where.append(uflow_data_where_and_list)

        # if the where clause is empty, then no need to send
        # the UFlowData query
        if not len(uflow_data_where):
            return []

        # populate UFlowData select
        uflow_data_select = []
        for select in self.query_json['select_fields']:
            uflow_data_select.append(self._underlay_to_uflowdata_name(select))

        # sort_fields specified in the query?
        uflow_data_sort_fields = None
        if self.query_json.get('sort_fields'):
            uflow_data_sort_fields = []
            for field in self.query_json['sort_fields']:
                uflow_data_sort_fields.append(
                    self._underlay_to_uflowdata_name(field))
        uflow_data_sort_type = self.query_json.get('sort')

        # does the query contain limit attribute?
        uflow_data_limit = self.query_json.get('limit')

        # add filter if specified
        uflow_data_filter = None
        if self.query_json.get('filter') is not None:
            uflow_data_filter = list(self.query_json['filter'])
            if len(uflow_data_filter):
                if not isinstance(uflow_data_filter[0], list):
                    uflow_data_filter = [uflow_data_filter]
            for filter_and in uflow_data_filter:
                for match_term in filter_and:
                    match_term['name'] = self._underlay_to_uflowdata_name(
                        match_term['name'])

        uflow_data_query = OpServerUtils.Query(
            table='StatTable.UFlowData.flow',
            start_time=self._start_time,
            end_time=self._end_time,
            select_fields=uflow_data_select,
            where=uflow_data_where,
            sort=uflow_data_sort_type,
            sort_fields=uflow_data_sort_fields,
            limit=uflow_data_limit,
            filter=uflow_data_filter)
        return self._send_query(json.dumps(uflow_data_query.__dict__))
예제 #58
0
    def aggregate(self, key, flat, base_url=None):
        '''
        This function does parallel aggregation of this UVE's state.
        It aggregates across all sources and return the global state of the UVE
        '''
        result = {}
        try:
            for typ in self._state[key].keys():
                result[typ] = {}
                for objattr in self._state[key][typ].keys():
                    if self._is_sum(self._state[key][typ][objattr]):
                        sum_res = self._sum_agg(self._state[key][typ][objattr])
                        if flat:
                            result[typ][objattr] = \
                                OpServerUtils.uve_attr_flatten(sum_res)
                        else:
                            result[typ][objattr] = sum_res
                    elif self._is_union(self._state[key][typ][objattr]):
                        union_res = self._union_agg(
                            self._state[key][typ][objattr])
                        conv_res = None
                        if union_res.has_key('@ulink') and base_url and \
                                union_res['list']['@type'] == 'string':
                            uterms = union_res['@ulink'].split(":", 1)

                            # This is the linked UVE's table name
                            m_table = uterms[0]

                            if self._rev_map.has_key(m_table):
                                h_table = self._rev_map[m_table]
                                conv_res = []
                                sname = ParallelAggregator.get_list_name(
                                    union_res)
                                for el in union_res['list'][sname]:
                                    lobj = {}
                                    lobj['name'] = el
                                    lobj['href'] = base_url + '/analytics/uves/' + \
                                        h_table + '/' + el
                                    if len(uterms) == 2:
                                        lobj['href'] = lobj[
                                            'href'] + '?cfilt=' + uterms[1]
                                    else:
                                        lobj['href'] = lobj['href'] + '?flat'
                                    conv_res.append(lobj)
                        if flat:
                            if not conv_res:
                                result[typ][objattr] = \
                                        OpServerUtils.uve_attr_flatten(union_res)
                            else:
                                result[typ][objattr] = conv_res
                        else:
                            result[typ][objattr] = union_res
                    elif self._is_append(self._state[key][typ][objattr]):
                        result[typ][objattr] = self._append_agg(
                            self._state[key][typ][objattr])
                        append_res = ParallelAggregator.consolidate_list(
                            result, typ, objattr)

                        if flat:
                            result[typ][objattr] =\
                                OpServerUtils.uve_attr_flatten(append_res)
                        else:
                            result[typ][objattr] = append_res

                    else:
                        default_res = self._default_agg(
                            self._state[key][typ][objattr])
                        if flat:
                            if (len(default_res) == 1):
                                result[typ][objattr] =\
                                    OpServerUtils.uve_attr_flatten(
                                        default_res[0][0])
                            else:
                                nres = []
                                for idx in range(len(default_res)):
                                    nres.append(default_res[idx])
                                    nres[idx][0] =\
                                        OpServerUtils.uve_attr_flatten(
                                            default_res[idx][0])
                                result[typ][objattr] = nres
                        else:
                            result[typ][objattr] = default_res
        except KeyError:
            pass
        return result
예제 #59
0
    def get_uve(self, key, flat, filters=None, base_url=None):

        filters = filters or {}
        sfilter = filters.get('sfilt')
        mfilter = filters.get('mfilt')
        tfilter = filters.get('cfilt')
        ackfilter = filters.get('ackfilt')

        if flat and not sfilter and not mfilter and self._usecache:
            return self._uvedbcache.get_uve(key, filters)

        is_alarm = False
        if tfilter == "UVEAlarms":
            is_alarm = True

        state = {}
        state[key] = {}
        rsp = {}
        failures = False

        tab = key.split(":", 1)[0]

        for r_inst in self._redis_uve_map.keys():
            try:
                redish = self._redis_inst_get(r_inst)
                qmap = {}

                ppe = redish.pipeline()
                ppe.smembers("ALARM_ORIGINS:" + key)
                if not is_alarm:
                    ppe.smembers("ORIGINS:" + key)
                pperes = ppe.execute()
                origins = set()
                for origset in pperes:
                    for smt in origset:
                        tt = smt.rsplit(":", 1)[1]
                        sm = smt.rsplit(":", 1)[0]
                        source = sm.split(":", 1)[0]
                        mdule = sm.split(":", 1)[1]
                        if tfilter is not None:
                            if tt not in tfilter:
                                continue
                        if sfilter is not None:
                            if sfilter != source:
                                continue
                        if mfilter is not None:
                            if mfilter != mdule:
                                continue
                        origins.add(smt)

                ppeval = redish.pipeline()
                for origs in origins:
                    ppeval.hgetall("VALUES:" + key + ":" + origs)
                odictlist = ppeval.execute()

                idx = 0
                for origs in origins:

                    odict = odictlist[idx]
                    idx = idx + 1

                    info = origs.rsplit(":", 1)
                    dsource = info[0]
                    typ = info[1]

                    afilter_list = set()
                    if tfilter is not None:
                        afilter_list = tfilter[typ]

                    for attr, value in odict.iteritems():
                        if len(afilter_list):
                            if attr not in afilter_list:
                                continue

                        if value[0] == '<':
                            snhdict = xmltodict.parse(value)
                            # TODO: This is a hack for separating external
                            # bgp routers from control-nodes
                            if snhdict[attr]['@type'] == 'map':
                                if typ == 'ContrailConfig' and \
                                        tab == 'ObjectBgpRouter' and \
                                        attr == 'elements':
                                    try:
                                        elem = OpServerUtils.uve_attr_flatten(\
                                            snhdict[attr])
                                        vendor = json.loads(\
                                            elem['bgp_router_parameters'])["vendor"]
                                        if vendor != "contrail":
                                            continue
                                    except:
                                        pass
                            elif snhdict[attr]['@type'] == 'list':
                                sname = ParallelAggregator.get_list_name(
                                    snhdict[attr])
                                if snhdict[attr]['list']['@size'] == '0':
                                    continue
                                elif snhdict[attr]['list']['@size'] == '1':
                                    if not isinstance(
                                            snhdict[attr]['list'][sname],
                                            list):
                                        snhdict[attr]['list'][sname] = [
                                            snhdict[attr]['list'][sname]
                                        ]
                                if typ == 'UVEAlarms' and attr == 'alarms' and \
                                        ackfilter is not None:
                                    alarms = []
                                    for alarm in snhdict[attr]['list'][sname]:
                                        ack_attr = alarm.get('ack')
                                        if ack_attr:
                                            ack = ack_attr['#text']
                                        else:
                                            ack = 'false'
                                        if ack == ackfilter:
                                            alarms.append(alarm)
                                    if not len(alarms):
                                        continue
                                    snhdict[attr]['list'][sname] = alarms
                                    snhdict[attr]['list']['@size'] = \
                                        str(len(alarms))
                        else:
                            continue

                        # print "Attr %s Value %s" % (attr, snhdict)
                        if typ not in state[key]:
                            state[key][typ] = {}
                        if attr not in state[key][typ]:
                            state[key][typ][attr] = {}
                        if dsource in state[key][typ][attr]:
                            print "Found Dup %s:%s:%s:%s:%s = %s" % \
                                (key, typ, attr, source, mdule, state[
                                key][typ][attr][dsource])
                        state[key][typ][attr][dsource] = snhdict[attr]

                pa = ParallelAggregator(state, self._uve_reverse_map)
                rsp = pa.aggregate(key, flat, base_url)
            except Exception as e:
                self._logger.error("redis-uve failed %s for : %s tb %s" \
                               % (str(e), str(r_inst), traceback.format_exc()))
                self._redis_inst_down(r_inst)
                failures = True
            else:
                self._redis_inst_up(r_inst, redish)
                self._logger.debug("Computed %s as %s" % (key, rsp.keys()))

        return failures, rsp