def verify_uflow(self, vizd_obj, flow_type, exp_res):
     logging.info('verify <%s> data' % (flow_type))
     vns = VerificationOpsSrv('127.0.0.1', vizd_obj.get_opserver_port())
     res = vns.post_query('StatTable.UFlowData.flow',
         start_time='-1m', end_time='now',
         select_fields=['name', 'flow.flowtype', 'flow.sip', 'flow.sport'],
         where_clause = 'name=*')
     res = sorted([OrderedDict(sorted(r.items())) \
                  for r in res if r['flow.flowtype'] == flow_type])
     exp_res = sorted([OrderedDict(sorted(r.items())) for r in exp_res])
     logging.info('Expected Result: ' + str(exp_res))
     logging.info('Result: ' + str(res))
     if res != exp_res:
         return False
     return True
Example #2
0
 def verify_uflow(self, vizd_obj, flow_type, exp_res):
     logging.info('verify <%s> data' % (flow_type))
     vns = VerificationOpsSrv('127.0.0.1', vizd_obj.get_opserver_port())
     res = vns.post_query(
         'StatTable.UFlowData.flow',
         start_time='-1m',
         end_time='now',
         select_fields=['name', 'flow.flowtype', 'flow.sip', 'flow.sport'],
         where_clause='name=*')
     res = sorted([OrderedDict(sorted(r.items())) \
                  for r in res if r['flow.flowtype'] == flow_type])
     exp_res = sorted([OrderedDict(sorted(r.items())) for r in exp_res])
     logging.info('Expected Result: ' + str(exp_res))
     logging.info('Result: ' + str(res))
     if res != exp_res:
         return False
     return True
    def test_03_ipfix(self):
        '''
        This test starts redis,vizd,opserver and qed
        It uses the test class' cassandra instance
        Then it feeds IPFIX packets to the collector
        and queries for them
        '''
        logging.info("*** test_03_ipfix ***")
        if StatsTest._check_skip_test() is True:
            return True
        
        sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        sock.bind(("", 0))
        ipfix_port = sock.getsockname()[1]
        sock.close()

        sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        vizd_obj = self.useFixture(
            AnalyticsFixture(logging,
                             builddir,
                             self.__class__.cassandra_port,
                             ipfix_port))
        assert vizd_obj.verify_on_setup()
        assert vizd_obj.verify_collector_obj_count()
        collectors = [vizd_obj.get_collector()]
        
        UDP_IP = "127.0.0.1"
        f1 = open(builddir + '/opserver/test/data/ipfix_t.data')
        sock.sendto(f1.read(), (UDP_IP, ipfix_port))
        f2 = open(builddir + '/opserver/test/data/ipfix_d.data')
        sock.sendto(f2.read(), (UDP_IP, ipfix_port))
        sock.close()

        logging.info("Verifying IPFIX data")
        vns = VerificationOpsSrv('127.0.0.1', vizd_obj.get_opserver_port())
        res = vns.post_query("StatTable.UFlowData.flow",
            start_time="-5m", end_time="now",
            select_fields=["name", "flow.flowtype", "flow.sip", "flow.sport"],
            where_clause = 'name=*')
        logging.info("Rssult: " + str(res))
        assert(self.verify_ipfix(res))

        return True
Example #4
0
    def test_03_ipfix(self):
        '''
        This test starts redis,vizd,opserver and qed
        It uses the test class' cassandra instance
        Then it feeds IPFIX packets to the collector
        and queries for them
        '''
        logging.info("*** test_03_ipfix ***")
        if StatsTest._check_skip_test() is True:
            return True

        vizd_obj = self.useFixture(
            AnalyticsFixture(logging,
                             builddir,
                             self.__class__.redis_port,
                             self.__class__.cassandra_port,
                             ipfix_port=True))
        assert vizd_obj.verify_on_setup()
        assert vizd_obj.verify_collector_obj_count()
        ipfix_port = vizd_obj.collectors[0].get_ipfix_port()

        sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)

        UDP_IP = "127.0.0.1"
        f1 = open(builddir + '/opserver/test/data/ipfix_t.data')
        sock.sendto(f1.read(), (UDP_IP, ipfix_port))
        f2 = open(builddir + '/opserver/test/data/ipfix_d.data')
        sock.sendto(f2.read(), (UDP_IP, ipfix_port))
        sock.close()

        logging.info("Verifying IPFIX data sent to port %s" % str(ipfix_port))
        uexp = {
            u'name': u'127.0.0.1',
            u'flow.sport': 49152,
            u'flow.sip': u'10.84.45.254',
            u'flow.flowtype': u'IPFIX'
        }
        vns = VerificationOpsSrv('127.0.0.1', vizd_obj.get_opserver_port())
        assert (self.verify_ipfix(vns, uexp))

        return True
    def test_00_startup(self):
        '''
        This test loads the pre existing data into cassandra and does
        queries to opserver
        The idea is to use this to monitor/improve qed performance
        '''
        logging.info("*** test_00_startup ***")
        if AnalyticsTest._check_skip_test() == True:
            return True

        vizd_obj = self.useFixture(AnalyticsFixture(logging, \
            builddir, self.__class__.cassandra_port, True))
        assert vizd_obj.verify_on_setup()

        assert AnalyticsTest._load_data_into_cassandra(self.__class__.cassandra_port)

        #'SystemObjectTable' is not getting the updated timestamp, hence we are hardcoding
        # analytics_start_time
        #
        #pool = ConnectionPool(COLLECTOR_KEYSPACE, ['127.0.0.1:%s'
        #                      % (self.__class__.cassandra_port)])
        #col_family = ColumnFamily(pool, SYSTEM_OBJECT_TABLE)
        #analytics_info_row = col_family.get(SYSTEM_OBJECT_ANALYTICS)
        #if analytics_info_row[SYSTEM_OBJECT_START_TIME]:
        #    analytics_start_time = analytics_info_row[SYSTEM_OBJECT_START_TIME]
        #else:
        #    assert False

        analytics_start_time = 1387254916542720

        vizd_obj.query_engine.start(analytics_start_time)

        opserver_port = vizd_obj.get_opserver_port()

        vns = VerificationOpsSrv('127.0.0.1', opserver_port)
        
        # message table
        a_query = Query(table="MessageTable",
                start_time=analytics_start_time+5*60*1000000,
                end_time=analytics_start_time+10*60*1000000,
                select_fields=["MessageTS", "Source", "ModuleId", "Messagetype", "Xmlmessage"],
                sort_fields = ["MessageTS"],
                sort = 1)
        json_qstr = json.dumps(a_query.__dict__)
        t1=time.time();
        res = vns.post_query_json(json_qstr)
        t2=time.time();
        logging.info("Result Length: "+str(len(res)))
        logging.info("Query: "+json_qstr)
        logging.info("Time(s): "+str(t2-t1))

        # flow series table aggregation on a tuple
        a_query = Query(table="FlowSeriesTable",
                start_time=analytics_start_time+40*60*1000000,
                end_time=analytics_start_time+100*60*1000000,
                select_fields=["sourcevn", "sourceip", "destvn", "destip", "sum(bytes)"])
        json_qstr = json.dumps(a_query.__dict__)
        t1=time.time();
        res = vns.post_query_json(json_qstr)
        t2=time.time();
        logging.info("Result Length: "+str(len(res)))
        logging.info("Query: "+json_qstr)
        logging.info("Time(s): "+str(t2-t1))

        # flow series table port distribution table
        a_query = Query(table="FlowSeriesTable",
                start_time=analytics_start_time+40*60*1000000,
                end_time=analytics_start_time+100*60*1000000,
                select_fields=["dport", "protocol", "flow_count", "sum(bytes)"],
                sort=2,
                sort_fields=["sum(bytes)"],
                where=[[{"name": "protocol", "value": 1, "op": 1},
                {"name": "sourcevn", "value": "default-domain:demo:vn0", "op": 1}],
                [{"name": "protocol", "value": 6, "op": 1},
                {"name": "sourcevn", "value": "default-domain:demo:vn0", "op": 1}],
                [{"name": "protocol", "value": 17, "op": 1},
                {"name": "sourcevn", "value": "default-domain:demo:vn0", "op": 1}]])
        json_qstr = json.dumps(a_query.__dict__)
        t1=time.time();
        res = vns.post_query_json(json_qstr)
        t2=time.time();
        logging.info("Result Length: "+str(len(res)))
        logging.info("Query: "+json_qstr)
        logging.info("Time(s): "+str(t2-t1))

        # flow series map
        a_query = Query(table="FlowSeriesTable",
                start_time=analytics_start_time+40*60*1000000,
                end_time=analytics_start_time+100*60*1000000,
                select_fields=["sum(bytes)", "sum(packets)", "T=7", "sourcevn", "flow_count"],
                where=[[{"name": "sourcevn", "value": "default-domain:demo:vn0", "op": 1}]])
        json_qstr = json.dumps(a_query.__dict__)
        t1=time.time();
        res = vns.post_query_json(json_qstr)
        t2=time.time();
        logging.info("Result Length: "+str(len(res)))
        logging.info("Query: "+json_qstr)
        logging.info("Time(s): "+str(t2-t1))

        return True
    def test_00_startup(self):
        '''
        This test loads the pre existing data into cassandra and does
        queries to opserver
        The idea is to use this to monitor/improve qed performance
        '''
        logging.info("%%% test_00_startup %%%")
        if AnalyticsTest._check_skip_test() == True:
            return True

        vizd_obj = self.useFixture(AnalyticsFixture(logging, \
            builddir, self.__class__.cassandra_port, True))
        assert vizd_obj.verify_on_setup()

        assert AnalyticsTest._load_data_into_cassandra(
            self.__class__.cassandra_port)

        #'SystemObjectTable' is not getting the updated timestamp, hence we are hardcoding
        # analytics_start_time
        #
        #pool = ConnectionPool(COLLECTOR_KEYSPACE, ['127.0.0.1:%s'
        #                      % (self.__class__.cassandra_port)])
        #col_family = ColumnFamily(pool, SYSTEM_OBJECT_TABLE)
        #analytics_info_row = col_family.get(SYSTEM_OBJECT_ANALYTICS)
        #if analytics_info_row[SYSTEM_OBJECT_START_TIME]:
        #    analytics_start_time = analytics_info_row[SYSTEM_OBJECT_START_TIME]
        #else:
        #    assert False

        analytics_start_time = 1387254916542720

        vizd_obj.query_engine.start(analytics_start_time)

        opserver_port = vizd_obj.get_opserver_port()

        vns = VerificationOpsSrv('127.0.0.1', opserver_port)

        # message table
        a_query = Query(table="MessageTable",
                        start_time=analytics_start_time + 5 * 60 * 1000000,
                        end_time=analytics_start_time + 10 * 60 * 1000000,
                        select_fields=[
                            "MessageTS", "Source", "ModuleId", "Messagetype",
                            "Xmlmessage"
                        ],
                        sort_fields=["MessageTS"],
                        sort=1)
        json_qstr = json.dumps(a_query.__dict__)
        t1 = time.time()
        res = vns.post_query_json(json_qstr)
        t2 = time.time()
        logging.info("Result Length: " + str(len(res)))
        logging.info("Query: " + json_qstr)
        logging.info("Time(s): " + str(t2 - t1))

        # flow series table aggregation on a tuple
        a_query = Query(table="FlowSeriesTable",
                        start_time=analytics_start_time + 40 * 60 * 1000000,
                        end_time=analytics_start_time + 100 * 60 * 1000000,
                        select_fields=[
                            "sourcevn", "sourceip", "destvn", "destip",
                            "sum(bytes)"
                        ])
        json_qstr = json.dumps(a_query.__dict__)
        t1 = time.time()
        res = vns.post_query_json(json_qstr)
        t2 = time.time()
        logging.info("Result Length: " + str(len(res)))
        logging.info("Query: " + json_qstr)
        logging.info("Time(s): " + str(t2 - t1))

        # flow series table port distribution table
        a_query = Query(
            table="FlowSeriesTable",
            start_time=analytics_start_time + 40 * 60 * 1000000,
            end_time=analytics_start_time + 100 * 60 * 1000000,
            select_fields=["dport", "protocol", "flow_count", "sum(bytes)"],
            sort=2,
            sort_fields=["sum(bytes)"],
            where=[[{
                "name": "protocol",
                "value": 1,
                "op": 1
            }, {
                "name": "sourcevn",
                "value": "default-domain:demo:vn0",
                "op": 1
            }],
                   [{
                       "name": "protocol",
                       "value": 6,
                       "op": 1
                   }, {
                       "name": "sourcevn",
                       "value": "default-domain:demo:vn0",
                       "op": 1
                   }],
                   [{
                       "name": "protocol",
                       "value": 17,
                       "op": 1
                   }, {
                       "name": "sourcevn",
                       "value": "default-domain:demo:vn0",
                       "op": 1
                   }]])
        json_qstr = json.dumps(a_query.__dict__)
        t1 = time.time()
        res = vns.post_query_json(json_qstr)
        t2 = time.time()
        logging.info("Result Length: " + str(len(res)))
        logging.info("Query: " + json_qstr)
        logging.info("Time(s): " + str(t2 - t1))

        # flow series map
        a_query = Query(table="FlowSeriesTable",
                        start_time=analytics_start_time + 40 * 60 * 1000000,
                        end_time=analytics_start_time + 100 * 60 * 1000000,
                        select_fields=[
                            "sum(bytes)", "sum(packets)", "T=7", "sourcevn",
                            "flow_count"
                        ],
                        where=[[{
                            "name": "sourcevn",
                            "value": "default-domain:demo:vn0",
                            "op": 1
                        }]])
        json_qstr = json.dumps(a_query.__dict__)
        t1 = time.time()
        res = vns.post_query_json(json_qstr)
        t2 = time.time()
        logging.info("Result Length: " + str(len(res)))
        logging.info("Query: " + json_qstr)
        logging.info("Time(s): " + str(t2 - t1))

        return True