Ejemplo n.º 1
0
 def run(self):
     try:
         index = 0
         analytics_api_ip = self._defaults['analytics_api_ip']
         analytics_api_port = self._defaults['analytics_api_port']
         username = self._defaults['username']
         password = self._defaults['password']
         for arg in sys.argv:
             index = index + 1
             if arg == "--analytics-api-ip":
                 analytics_api_ip = sys.argv[index]
             elif arg == "--analytics-api-port":
                 analytics_api_port = sys.argv[index]
             elif arg == "--admin-user":
                 username = sys.argv[index]
             elif arg == "--admin-password":
                 password = sys.argv[index]
         tab_url = "http://" + analytics_api_ip + ":" +\
             analytics_api_port + "/analytics/tables"
         tables = OpServerUtils.get_url_http(tab_url, username, password)
         if tables != {}:
             table_list = json.loads(tables.text)
             for table in table_list:
                 if table['type'] == 'OBJECT':
                     OBJECT_TYPE_LIST.append(str(table['display_name']))
         if self.parse_args() != 0:
             return
         if self._args.tail:
             start_time = UTCTimestampUsec() - 10 * pow(10, 6)
             while True:
                 self._start_time = start_time
                 self._end_time = UTCTimestampUsec()
                 start_time = self._end_time + 1
                 time.sleep(3)
                 result = self.query()
                 if result == -1:
                     return
                 self.display(result)
         else:
             start_time = self._args.start_time
             end_time = self._args.end_time
             if not self._args.start_time:
                 start_time = "now-10m"
             if not self._args.end_time:
                 end_time = "now"
             try:
                 self._start_time, self._end_time = \
                     OpServerUtils.parse_start_end_time(
                         start_time = start_time,
                         end_time = end_time,
                         last = self._args.last)
             except:
                 return -1
             result = self.query()
             if result == -1:
                 return
             self.display(result)
     except KeyboardInterrupt:
         return
Ejemplo n.º 2
0
 def __init__(self,
              brokers,
              partition,
              uve_topic,
              logger,
              callback,
              host_ip,
              rsc,
              aginst,
              rport,
              disc=None):
     super(UveStreamProc, self).__init__(brokers, "workers", uve_topic,
                                         logger, False)
     self._uvedb = {}
     self._uvein = {}
     self._uveout = {}
     self._callback = callback
     self._partno = partition
     self._host_ip = host_ip
     self._ip_code, = struct.unpack(
         '>I', socket.inet_pton(socket.AF_INET, host_ip))
     self.disc_rset = set()
     self._resource_cb = rsc
     self._aginst = aginst
     self._disc = disc
     self._acq_time = UTCTimestampUsec()
     self._rport = rport
 def __init__(self,
              brokers,
              partition,
              uve_topic,
              logger,
              callback,
              host_ip,
              rsc,
              aginst,
              rport,
              kafka_use_ssl,
              kafka_ssl_params,
              group="-workers"):
     super(UveStreamProc,
           self).__init__(brokers, group, uve_topic, logger, False,
                          kafka_use_ssl, kafka_ssl_params)
     self._uvedb = {}
     self._uvein = {}
     self._callback = callback
     self._partno = partition
     self._host_ip = host_ip
     self._ip_code, = struct.unpack(
         '>I', socket.inet_pton(socket.AF_INET, host_ip))
     self.disc_rset = set()
     self._resource_cb = rsc
     self._aginst = aginst
     self._acq_time = UTCTimestampUsec()
     self._up = True
     self._rport = rport
Ejemplo n.º 4
0
 def wait(self, stdin=sys.stdin, stdout=sys.stdout):
     self.ready(stdout)
     while 1:
         if select.select([stdin], [], [])[0]:
             line = stdin.readline()
             if line is not None:
                 self.supervisor_events_ctr += 1
                 self.supervisor_events_timestamp = str(UTCTimestampUsec())
                 break
             else:
                 self.supervisor_events_error_ctr += 1
                 self.supervisor_events_error_timestamp = str(
                     UTCTimestampUsec())
     headers = childutils.get_headers(line)
     payload = stdin.read(int(headers['len']))
     return headers, payload
Ejemplo n.º 5
0
 def set_analytics_db_purge_status(self, purge_id, purge_input):
     try:
         redish = redis.StrictRedis(db=0,
                                    host='127.0.0.1',
                                    port=self._redis_query_port,
                                    password=self._redis_password)
         redish.hset('ANALYTICS_DB_PURGE', 'status', 'running')
         redish.hset('ANALYTICS_DB_PURGE', 'purge_input', purge_input)
         redish.hset('ANALYTICS_DB_PURGE', 'purge_start_time',
                     UTCTimestampUsec())
         redish.hset('ANALYTICS_DB_PURGE', 'purge_id', purge_id)
     except redis.exceptions.ConnectionError:
         self._logger.error("Exception: "
                            "Failure in connection to redis-server")
         response = {
             'status': 'failed',
             'reason': 'Failure in connection to redis-server'
         }
         return response
     except redis.exceptions.ResponseError:
         self._logger.error("Exception: " "Redis authentication failed")
         response = {
             'status': 'failed',
             'reason': 'Redis authentication failed'
         }
         return response
     return None
Ejemplo n.º 6
0
 def _create_uve_alarm_info(self):
     uve_alarm_info = UVEAlarmInfo()
     uve_alarm_info.type = 'ProcessStatus'
     uve_alarm_info.description = [AlarmElement('rule1', 'value1')]
     uve_alarm_info.ack = False
     uve_alarm_info.timestamp = UTCTimestampUsec()
     uve_alarm_info.severity = 1
     return uve_alarm_info
Ejemplo n.º 7
0
 def test_close_sm_session(self):
     initial_close_interval_msec = \
         SandeshClient._INITIAL_SM_SESSION_CLOSE_INTERVAL_MSEC
     close_time_usec = UTCTimestampUsec()
     # First time close event
     (close, close_interval_msec) = SandeshClient._do_close_sm_session(
         close_time_usec, 0, 0)
     self.assertTrue(close)
     self.assertEqual(initial_close_interval_msec, close_interval_msec)
     # Close event time same as last close time
     last_close_interval_usec = initial_close_interval_msec * 1000
     (close, close_interval_msec) = SandeshClient._do_close_sm_session(
         close_time_usec, close_time_usec, last_close_interval_usec)
     self.assertFalse(close)
     self.assertEqual(0, close_interval_msec)
     # Close event time is less than last close interval
     (close, close_interval_msec) = SandeshClient._do_close_sm_session(
         close_time_usec, close_time_usec - last_close_interval_usec,
         last_close_interval_usec)
     self.assertFalse(close)
     self.assertEqual(0, close_interval_msec)
     # Close event time is between close interval and 2 * last close
     # interval
     last_close_interval_usec = (initial_close_interval_msec * 2) * 1000
     (close, close_interval_msec) = SandeshClient._do_close_sm_session(
         close_time_usec,
         close_time_usec - (1.5 * last_close_interval_usec),
         last_close_interval_usec)
     self.assertTrue(close)
     self.assertEqual(old_div((last_close_interval_usec * 2), 1000),
                      close_interval_msec)
     # Close event time is between 2 * last close interval and 4 * last
     # close interval
     last_close_interval_usec = (initial_close_interval_msec * 3) * 1000
     (close, close_interval_msec) = SandeshClient._do_close_sm_session(
         close_time_usec, close_time_usec - (3 * last_close_interval_usec),
         last_close_interval_usec)
     self.assertTrue(close)
     self.assertEqual(old_div(last_close_interval_usec, 1000),
                      close_interval_msec)
     # Close event ime is beyond 4 * last close interval
     last_close_interval_usec = (initial_close_interval_msec * 2) * 1000
     (close, close_interval_msec) = SandeshClient._do_close_sm_session(
         close_time_usec, close_time_usec - (5 * last_close_interval_usec),
         last_close_interval_usec)
     self.assertTrue(close)
     self.assertEqual(initial_close_interval_msec, close_interval_msec)
     # Maximum close interval
     last_close_interval_usec = old_div(
         (SandeshClient._MAX_SM_SESSION_CLOSE_INTERVAL_MSEC * 1000), 2)
     (close, close_interval_msec) = SandeshClient._do_close_sm_session(
         close_time_usec,
         close_time_usec - (1.5 * last_close_interval_usec),
         last_close_interval_usec)
     self.assertTrue(close)
     self.assertEqual(SandeshClient._MAX_SM_SESSION_CLOSE_INTERVAL_MSEC,
                      close_interval_msec)
Ejemplo n.º 8
0
    def run(self):
        try:
            if self.parse_args() != 0:
                return

            if self._args.tail:
                start_time = UTCTimestampUsec() - 10*pow(10,6)
                while True:
                    self._start_time = start_time
                    self._end_time = UTCTimestampUsec()
                    start_time = self._end_time + 1
                    time.sleep(3)
                    result = self.query()
                    if result == -1:
                        return
                    self.display(result)
            else:
                start_time = self._args.start_time
                end_time = self._args.end_time
                if not self._args.start_time:
                    start_time = "now-10m"
                if not self._args.end_time:
                    end_time = "now"
                try:
                    self._start_time, self._end_time = \
                        OpServerUtils.parse_start_end_time(
                            start_time = start_time,
                            end_time = end_time,
                            last = self._args.last)
                except:
                    return -1
                result = self.query()
                if result == -1:
                    return
                # Accumulate the result before processing it as the
                # formatting of result can be cpu intensive and hence would
                # affect the overall time taken to fetch the result from the
                # analytics-api. Since the query result ttl is set to 5 min
                # in redis, it is necessary to improve the read throughput.
                result_list = self.read_result(result)
                self.display(result_list)
        except KeyboardInterrupt:
            return
Ejemplo n.º 9
0
    def _send_flow_sandesh(self):
        flows = []
        while True:
            # Populate flows if not done
            if len(flows) == 0:
                other_vn = self._other_vn
                for vn in range(self._start_vn, self._end_vn):
                    for nvm in range(self._vm_iterations):
                        for nflow in range(self._num_flows_per_vm):
                            init_packets = random.randint(1, \
                                self._FLOW_PKTS_PER_SEC)
                            init_bytes = init_packets * \
                                random.randint(1, self._BYTES_PER_PACKET)
                            sourceip = int(self._ip_vns[vn] + \
                                self._ip_start_index + nvm)
                            destip = int(self._ip_vns[other_vn] + \
                                self._ip_start_index + nvm)
                            flows.append(
                                FlowDataIpv4(
                                    flowuuid=str(uuid.uuid1()),
                                    direction_ing=random.randint(0, 1),
                                    sourcevn=self._VN_PREFIX + str(vn),
                                    destvn=self._VN_PREFIX + str(other_vn),
                                    sourceip=sourceip,
                                    destip=destip,
                                    sport=random.randint(0, 65535),
                                    dport=random.randint(0, 65535),
                                    protocol=random.choice([6, 17, 1]),
                                    setup_time=UTCTimestampUsec(),
                                    packets=init_packets,
                                    bytes=init_bytes,
                                    diff_packets=init_packets,
                                    diff_bytes=init_bytes))
                    other_vn = (other_vn + 1) % self._num_vns

            # Send the flows periodically
            flow_cnt = 0
            for flow_data in flows:
                new_packets = random.randint(1, self._FLOW_PKTS_PER_SEC)
                new_bytes = new_packets * \
                    random.randint(1, self._BYTES_PER_PACKET)
                flow_data.packets += new_packets
                flow_data.bytes += new_bytes
                flow_data.diff_packets = new_packets
                flow_data.diff_bytes = new_bytes
                flow_object = FlowDataIpv4Object(
                    flowdata=flow_data, sandesh=self._sandesh_instance)
                flow_object.send(sandesh=self._sandesh_instance)
                flow_cnt += 1
                if flow_cnt == self._NUM_FLOWS_IN_ITERATION:
                    flow_cnt = 0
                    gevent.sleep(self._FLOW_MSG_INTVL_IN_SEC)
                else:
                    gevent.sleep(0)
Ejemplo n.º 10
0
 def writeSandeshEnd(self):
     sandesh_end = '}'
     sandesh_end += ','
     sandesh_end += '\"TIMESTAMP\":'
     sandesh_end += str(UTCTimestampUsec())
     sandesh_end += '}'
     self.writeBuffer(sandesh_end)
     self.current_sandesh_context_.pop()
     self.is_first_element_list_.pop()
     self.is_primitive_element_list_.pop()
     return 0
Ejemplo n.º 11
0
 def create_test_alarm_info(alarm_type):
     or_list = []
     or_list.append([AllOf(all_of=[AlarmElement(\
         rule=AlarmTemplate(oper="!=",
             operand1=Operand1(keys=["dummytoken"]),
             operand2=Operand2(json_value=json.dumps('UP'))),
         json_operand1_value=json.dumps('DOWN'))])])
     alarm_info = UVEAlarmInfo(type=alarm_type, severity=1,
                               timestamp=UTCTimestampUsec(),
                               token="dummytoken",
                               any_of=or_list, ack=False)
     return alarm_info
Ejemplo n.º 12
0
 def db_purge(self, purge_input, purge_id):
     total_rows_deleted = 0  # total number of rows deleted
     if (purge_input != None):
         current_time = UTCTimestampUsec()
         analytics_start_time = float(self._get_analytics_start_time())
         purge_time = analytics_start_time + (float(
             (purge_input) *
             (float(current_time) - analytics_start_time))) / 100
         total_rows_deleted = self.purge_old_data(purge_time, purge_id)
         if (total_rows_deleted != 0):
             self._update_analytics_start_time(int(purge_time))
     return total_rows_deleted
Ejemplo n.º 13
0
 def create_test_alarm_info(alarm_type):
     elems = []
     elems.append(
         AlarmRule(oper="!=",
                   operand1=AlarmOperand(name="state", json_value="'DOWN'"),
                   operand2=AlarmOperand(name=None, json_value="'UP'")))
     alarm_info = UVEAlarmInfo(type=alarm_type,
                               severity=1,
                               timestamp=UTCTimestampUsec(),
                               token="dummytoken",
                               rules=elems,
                               ack=False)
     return alarm_info
Ejemplo n.º 14
0
def main():
    try:
        querier = LogQuerier()
        if querier.parse_args() != 0:
            return
        if querier._args.f:
            start_time = UTCTimestampUsec() - 10 * pow(10, 6)
            while True:
                querier._start_time = start_time
                querier._end_time = UTCTimestampUsec()
                start_time = querier._end_time + 1
                time.sleep(3)
                result = querier.query()
                if result == -1:
                    return
                querier.display(result)
        else:
            start_time = querier._args.start_time
            end_time = querier._args.end_time
            if not querier._args.start_time:
                start_time = "now-10m"
            if not querier._args.end_time:
                end_time = "now"
            try:
                querier._start_time, querier._end_time = \
                    OpServerUtils.parse_start_end_time(
                        start_time = start_time,
                        end_time = end_time,
                        last = querier._args.last)
            except:
                return -1
            result = querier.query()
            if result == -1:
                return
            querier.display(result)
    except KeyboardInterrupt:
        return
Ejemplo n.º 15
0
 def _create_uve_alarm_info(self):
     uve_alarm_info = UVEAlarmInfo()
     uve_alarm_info.type = 'ProcessStatus'
     condition = AlarmCondition(
         operation='==',
         operand1='NodeStatus.process_info.process_state',
         operand2=AlarmOperand2(json_value=json.dumps('null')))
     match1 = AlarmMatch(json_operand1_value=json.dumps('null'))
     condition_match = AlarmConditionMatch(condition, [match1])
     and_list = AlarmAndList(and_list=[condition_match])
     uve_alarm_info.alarm_rules = [AlarmRules(or_list=[and_list])]
     uve_alarm_info.ack = False
     uve_alarm_info.timestamp = UTCTimestampUsec()
     uve_alarm_info.severity = 1
     return uve_alarm_info
Ejemplo n.º 16
0
 def run(self):
     try:
         if self.parse_args() != 0:
             return
         if self._args.tail:
             start_time = UTCTimestampUsec() - 10*pow(10,6)
             while True:
                 self._start_time = start_time
                 self._end_time = UTCTimestampUsec()
                 start_time = self._end_time + 1
                 time.sleep(3)
                 result = self.query()
                 if result == -1:
                     return
                 self.display(result)
         else:
             start_time = self._args.start_time
             end_time = self._args.end_time
             if not self._args.start_time:
                 start_time = "now-10m"
             if not self._args.end_time:
                 end_time = "now"
             try:
                 self._start_time, self._end_time = \
                     OpServerUtils.parse_start_end_time(
                         start_time = start_time,
                         end_time = end_time,
                         last = self._args.last)
             except:
                 return -1
             result = self.query()
             if result == -1:
                 return
             self.display(result)
     except KeyboardInterrupt:
         return
Ejemplo n.º 17
0
 def db_purge(self, purge_input, purge_id):
     total_rows_deleted = 0  # total number of rows deleted
     if (purge_input != None):
         current_time = UTCTimestampUsec()
         analytics_start_time = self._get_analytics_start_time()
         if (analytics_start_time == None):
             self._logger.error("Failed to get the analytics start time")
             return -1
         purge_time = analytics_start_time + (float(
             (purge_input) *
             (float(current_time) - float(analytics_start_time)))) / 100
         total_rows_deleted = self.purge_old_data(purge_time, purge_id)
         if (total_rows_deleted != -1):
             self._update_analytics_start_time(int(purge_time))
     return total_rows_deleted
Ejemplo n.º 18
0
   def create_test_alarm_info(self, table, name, alarm_type):
       or_list = []
       or_list.append([AllOf(all_of=[AlarmElement(\
           rule=AlarmTemplate(oper="!=",
               operand1=Operand1(keys=["dummytoken"]),
               operand2=Operand2(json_value=json.dumps('UP'))),
           json_operand1_value=json.dumps('DOWN'))])])
       uai = UVEAlarmInfo(type=alarm_type,
                          severity=1,
                          timestamp=UTCTimestampUsec(),
                          token="dummytoken",
                          any_of=or_list,
                          ack=False)
       conf = UVEAlarmConfig()
       state = UVEAlarmOperState(state=UVEAlarmState.Active,
                                 head_timestamp=0,
                                 alarm_timestamp=[])
       alarm_info = AlarmStateMachine(tab = table, uv = name, nm = \
 alarm_type, activeTimer = 0, idleTimer = 0, freqCheck_Times\
 = 0, freqCheck_Seconds = 0, freqExceededCheck = False, sandesh=self._ag._sandesh)
       alarm_info.set_uai(uai)
       return alarm_info
Ejemplo n.º 19
0
   def create_test_alarm_info(self, table, name, alarm_type):
       or_list = []
       condition_match = AlarmConditionMatch(
           condition=AlarmCondition(operation="!=",
                                    operand1="dummytoken",
                                    operand2=json.dumps("UP")),
           match=[AlarmMatch(json_operand1_value=json.dumps("DOWN"))])
       or_list.append(AlarmAndList([condition_match]))
       uai = UVEAlarmInfo(type=alarm_type,
                          severity=1,
                          timestamp=UTCTimestampUsec(),
                          token="dummytoken",
                          alarm_rules=AlarmRules(or_list),
                          ack=False)
       conf = UVEAlarmConfig()
       state = UVEAlarmOperState(state=UVEAlarmState.Active,
                                 head_timestamp=0,
                                 alarm_timestamp=[])
       uv = table + ':' + name
       alarm_info = AlarmStateMachine(tab = table, uv = uv, nm = \
 alarm_type, activeTimer = 0, idleTimer = 0, freqCheck_Times\
 = 0, freqCheck_Seconds = 0, freqExceededCheck = False, sandesh=self._ag._sandesh)
       alarm_info.set_uai(uai)
       return alarm_info
    def test_00_database_purge_query(self):
        '''
        This test starts redis,vizd,opserver and qed
        It uses the test class' cassandra instance
        and checks if database purge functonality is
        is working properly
        '''
        logging.info("*** test_00_database_purge_query ***")
        if AnalyticsDbTest._check_skip_test() is True:
            return True

        vizd_obj = self.useFixture(
            AnalyticsFixture(logging, builddir, self.__class__.cassandra_port))
        assert vizd_obj.verify_on_setup()
        assert vizd_obj.verify_collector_obj_count()
        end_time = UTCTimestampUsec()
        start_time = end_time - 3600 * pow(10, 6)
        assert vizd_obj.verify_collector_object_log(start_time, end_time)
        assert vizd_obj.verify_collector_object_log_before_purge(
            start_time, end_time)
        assert vizd_obj.verify_database_purge_query(start_time, end_time)
        assert vizd_obj.verify_collector_object_log_after_purge(
            start_time, end_time)
        return True
Ejemplo n.º 21
0
    def handle_uve_notif(self, uves, remove=False):
        self._logger.debug("Changed UVEs : %s" % str(uves))
        no_handlers = set()
        for uv in uves:
            tab = uv.split(':', 1)[0]
            uve_name = uv.split(':', 1)[1]
            if not self.mgrs.has_key(tab):
                no_handlers.add(tab)
                continue
            if remove:
                uve_data = []
            else:
                filters = {'kfilt': [uve_name]}
                itr = self._us.multi_uve_get(tab, True, filters)
                uve_data = itr.next()['value']
            if len(uve_data) == 0:
                self._logger.info("UVE %s deleted" % uv)
                if self.tab_alarms[tab].has_key(uv):
                    del self.tab_alarms[tab][uv]
                    ustruct = UVEAlarms(name=uve_name, deleted=True)
                    alarm_msg = AlarmTrace(data=ustruct, table=tab)
                    self._logger.info('send del alarm: %s' % (alarm_msg.log()))
                    alarm_msg.send()
                continue
            results = self.mgrs[tab].map_method("__call__", uv, uve_data)
            new_uve_alarms = {}
            for res in results:
                nm, sev, errs = res
                self._logger.debug("Alarm[%s] %s: %s" % (tab, nm, str(errs)))
                elems = []
                for ae in errs:
                    rule, val = ae
                    rv = AlarmElement(rule, val)
                    elems.append(rv)
                if len(elems):
                    new_uve_alarms[nm] = UVEAlarmInfo(type=nm,
                                                      severity=sev,
                                                      timestamp=0,
                                                      description=elems,
                                                      ack=False)
            del_types = []
            if self.tab_alarms[tab].has_key(uv):
                for nm, uai in self.tab_alarms[tab][uv].iteritems():
                    uai2 = copy.deepcopy(uai)
                    uai2.timestamp = 0
                    # This type was present earlier, but is now gone
                    if not new_uve_alarms.has_key(nm):
                        del_types.append(nm)
                    else:
                        # This type has no new information
                        if pprint.pformat(uai2) == \
                                pprint.pformat(new_uve_alarms[nm]):
                            del new_uve_alarms[nm]
            if len(del_types) != 0  or \
                    len(new_uve_alarms) != 0:
                self._logger.debug("Alarm[%s] Deleted %s" % \
                        (tab, str(del_types)))
                self._logger.debug("Alarm[%s] Updated %s" % \
                        (tab, str(new_uve_alarms)))
                # These alarm types are new or updated
                for nm, uai2 in new_uve_alarms.iteritems():
                    uai = copy.deepcopy(uai2)
                    uai.timestamp = UTCTimestampUsec()
                    if not self.tab_alarms[tab].has_key(uv):
                        self.tab_alarms[tab][uv] = {}
                    self.tab_alarms[tab][uv][nm] = uai
                # These alarm types are now gone
                for dnm in del_types:
                    del self.tab_alarms[tab][uv][dnm]

                ustruct = None
                if len(self.tab_alarms[tab][uv]) == 0:
                    ustruct = UVEAlarms(name=uve_name, deleted=True)
                    del self.tab_alarms[tab][uv]
                else:
                    ustruct = UVEAlarms(
                        name=uve_name,
                        alarms=self.tab_alarms[tab][uv].values(),
                        deleted=False)
                alarm_msg = AlarmTrace(data=ustruct, table=tab)
                self._logger.info('send alarm: %s' % (alarm_msg.log()))
                alarm_msg.send()

        if len(no_handlers):
            self._logger.debug('No Alarm Handlers for %s' % str(no_handlers))
    def generate_flow_samples(self):
        self.flows = []
        self.egress_flows = []
        self.flow_cnt = 5
        self.num_flow_samples = 0
        self.egress_num_flow_samples = 0
        self.flow_start_time = None
        self.flow_end_time = None
        self.egress_flow_start_time = None
        self.egress_flow_end_time = None
        for i in range(self.flow_cnt):
            self.flows.append(
                FlowLogData(flowuuid=str(uuid.uuid1()),
                            direction_ing=1,
                            sourcevn='domain1:admin:vn1',
                            destvn='domain1:admin:vn2&>',
                            sourceip=netaddr.IPAddress('10.10.10.1'),
                            destip=netaddr.IPAddress('2001:db8::2:1'),
                            sport=i * 10 + 32747,
                            dport=i + 100,
                            protocol=i / 2,
                            action='pass',
                            sg_rule_uuid=str(uuid.uuid1()),
                            nw_ace_uuid=str(uuid.uuid1()),
                            vmi_uuid=self.flow_vmi_uuid))
            self.flows[i].samples = []
            self._logger.info(str(self.flows[i]))

        for i in range(self.flow_cnt):
            self.egress_flows.append(
                FlowLogData(flowuuid=str(uuid.uuid1()),
                            direction_ing=0,
                            destvn='domain1:admin:vn1',
                            sourcevn='domain1:admin:vn2',
                            destip=netaddr.IPAddress('10.10.10.1'),
                            sourceip=netaddr.IPAddress('2001:db8::1:2'),
                            dport=i * 10 + 32747,
                            sport=i + 100,
                            protocol=i / 2,
                            action='drop',
                            sg_rule_uuid=str(uuid.uuid1()),
                            nw_ace_uuid=str(uuid.uuid1()),
                            vmi_uuid=self.flow_vmi_uuid,
                            drop_reason='Reason' + str(i)))
            self.egress_flows[i].samples = []
            self._logger.info(str(self.egress_flows[i]))

        # 'duration' - lifetime of the flow in seconds
        # 'tdiff'    - time difference between consecutive flow samples
        # 'pdiff'    - packet increment factor
        # 'psize'    - packet size
        flow_template = [{
            'duration': 60,
            'tdiff': 5,
            'pdiff': 1,
            'psize': 50
        }, {
            'duration': 30,
            'tdiff': 4,
            'pdiff': 2,
            'psize': 100
        }, {
            'duration': 20,
            'tdiff': 3,
            'pdiff': 3,
            'psize': 25
        }, {
            'duration': 10,
            'tdiff': 2,
            'pdiff': 4,
            'psize': 75
        }, {
            'duration': 5,
            'tdiff': 1,
            'pdiff': 5,
            'psize': 120
        }]
        assert (len(flow_template) == self.flow_cnt)

        # set the flow_end_time to _start_time + (max duration in
        # flow_template)
        max_duration = 0
        for fd in flow_template:
            if max_duration < fd['duration']:
                max_duration = fd['duration']
        assert (self._start_time is not None)
        self.flow_start_time = self._start_time
        self.flow_end_time = self.flow_start_time + \
            (max_duration * self._KSECINMSEC)
        assert (self.flow_end_time <= UTCTimestampUsec())

        # generate flows based on the flow template defined above
        cnt = 0
        for fd in flow_template:
            num_samples = (fd['duration'] / fd['tdiff']) +\
                bool((fd['duration'] % fd['tdiff']))
            for i in range(num_samples):
                ts = self.flow_start_time + \
                    (i * fd['tdiff'] * self._KSECINMSEC) + \
                    random.randint(1, 10000)
                pkts = (i + 1) * fd['pdiff']
                bytes = pkts * fd['psize']
                self.num_flow_samples += 1
                self.send_flow_stat(self.flows[cnt], bytes, pkts, ts)
            cnt += 1

        # set the egress_flow_start_time to flow_end_time + (max duration
        # in flow template)
        # set the egress_flow_end_time to egress_flow_start_time + (max
        # duration in flow_template)
        self.egress_flow_start_time = self.flow_end_time + \
                (max_duration * self._KSECINMSEC)
        self.egress_flow_end_time = self.egress_flow_start_time + \
                (max_duration * self._KSECINMSEC)
        assert (self.egress_flow_end_time <= UTCTimestampUsec())

        # generate egress_flows based on the flow template defined above
        cnt = 0
        for fd in flow_template:
            num_samples = (fd['duration'] / fd['tdiff']) +\
                bool((fd['duration'] % fd['tdiff']))
            for i in range(num_samples):
                ts = self.egress_flow_start_time + \
                    (i * fd['tdiff'] * self._KSECINMSEC) + \
                    random.randint(1, 10000)
                pkts = (i + 1) * fd['pdiff']
                bytes = pkts * fd['psize']
                self.egress_num_flow_samples += 1
                self.send_flow_stat(self.egress_flows[cnt], bytes, pkts, ts)
            cnt += 1
Ejemplo n.º 23
0
 def _send_flow_sandesh(self):
     flows = []
     flow_cnt = 0
     diff_time = 0
     while True:
         # Populate flows if not done
         if len(flows) == 0:
             other_vn = self._other_vn
             for vn in range(self._start_vn, self._end_vn):
                 for nflow in range(self._num_flows_per_network):
                     init_packets = random.randint(1, \
                         self._FLOW_PKTS_PER_SEC)
                     init_bytes = init_packets * \
                         random.randint(1, self._BYTES_PER_PACKET)
                     sourceip = int(self._ip_vns[vn] + \
                         self._ip_start_index + nflow)
                     destip = int(self._ip_vns[other_vn] + \
                         self._ip_start_index + nflow)
                     flows.append(
                         FlowLogData(flowuuid=str(uuid.uuid1()),
                                     direction_ing=random.randint(0, 1),
                                     sourcevn=self._VN_PREFIX + str(vn),
                                     destvn=self._VN_PREFIX + str(other_vn),
                                     sourceip=netaddr.IPAddress(sourceip),
                                     destip=netaddr.IPAddress(destip),
                                     sport=random.randint(0, 65535),
                                     dport=random.randint(0, 65535),
                                     protocol=random.choice([6, 17, 1]),
                                     setup_time=UTCTimestampUsec(),
                                     packets=init_packets,
                                     bytes=init_bytes,
                                     diff_packets=init_packets,
                                     diff_bytes=init_bytes))
                 other_vn = (other_vn + 1) % self._num_vns
             self._logger.info("Total flows: %d" % len(flows))
         # Send the flows periodically
         for flow_data in flows:
             stime = time.time()
             new_packets = random.randint(1, self._FLOW_PKTS_PER_SEC)
             new_bytes = new_packets * \
                 random.randint(1, self._BYTES_PER_PACKET)
             flow_data.packets += new_packets
             flow_data.bytes += new_bytes
             flow_data.diff_packets = new_packets
             flow_data.diff_bytes = new_bytes
             flow_object = FlowLogDataObject(flowdata=[flow_data],
                                             sandesh=self._sandesh_instance)
             flow_object.send(sandesh=self._sandesh_instance)
             flow_cnt += 1
             diff_time += time.time() - stime
             if diff_time >= 1.0:
                 if flow_cnt < self._num_flows_per_sec:
                     self._logger.error(
                         "Unable to send %d flows per second, "
                         "only sent %d" %
                         (self._num_flows_per_sec, flow_cnt))
                 flow_cnt = 0
                 gevent.sleep(0)
                 diff_time = 0
             else:
                 if flow_cnt == self._num_flows_per_sec:
                     self._logger.info("Sent %d flows in %f secs" %
                                       (flow_cnt, diff_time))
                     flow_cnt = 0
                     gevent.sleep(1.0 - diff_time)
                     diff_time = 0
Ejemplo n.º 24
0
    def handle_uve_notif(self, part, uves):
        """
        Call this function when a UVE has changed. This can also
        happed when taking ownership of a partition, or when a
        generator is deleted.
        Args:
            part   : Partition Number
            uve    : dict, where the key is the UVE Name.
                     The value is either a dict of UVE structs, or "None",
                     which means that all UVE structs should be processed.

        Returns: 
            status of operation (True for success)
        """
        self._logger.debug("Changed part %d UVEs : %s" % (part, str(uves)))
        success = True
        output = {}
        for uv, types in uves.iteritems():
            tab = uv.split(':', 1)[0]
            if tab not in self.tab_perf:
                self.tab_perf[tab] = AGTabStats()

            uve_name = uv.split(':', 1)[1]
            prevt = UTCTimestampUsec()
            filters = {}
            if types:
                filters["cfilt"] = {}
                for typ in types.keys():
                    filters["cfilt"][typ] = set()

            failures, uve_data = self._us.get_uve(uv, True, filters)

            if failures:
                success = False
            self.tab_perf[tab].record_get(UTCTimestampUsec() - prevt)
            # Handling Agg UVEs
            if not part in self.ptab_info:
                self._logger.error("Creating UVE table for part %s" %
                                   str(part))
                self.ptab_info[part] = {}

            if not tab in self.ptab_info[part]:
                self.ptab_info[part][tab] = {}

            if uve_name not in self.ptab_info[part][tab]:
                self.ptab_info[part][tab][uve_name] = AGKeyInfo(part)
            prevt = UTCTimestampUsec()
            output[uv] = {}
            touched = False
            if not types:
                self.ptab_info[part][tab][uve_name].update(uve_data)
                if len(self.ptab_info[part][tab][uve_name].removed()):
                    touched = True
                    self._logger.info("UVE %s removed structs %s" % (uve_name, \
                            self.ptab_info[part][tab][uve_name].removed()))
                    for rems in self.ptab_info[part][tab][uve_name].removed():
                        output[uv][rems] = None
                if len(self.ptab_info[part][tab][uve_name].changed()):
                    touched = True
                    self._logger.debug("UVE %s changed structs %s" % (uve_name, \
                            self.ptab_info[part][tab][uve_name].changed()))
                    for chgs in self.ptab_info[part][tab][uve_name].changed():
                        output[uv][chgs] = \
                                self.ptab_info[part][tab][uve_name].values()[chgs]
                if len(self.ptab_info[part][tab][uve_name].added()):
                    touched = True
                    self._logger.debug("UVE %s added structs %s" % (uve_name, \
                            self.ptab_info[part][tab][uve_name].added()))
                    for adds in self.ptab_info[part][tab][uve_name].added():
                        output[uv][adds] = \
                                self.ptab_info[part][tab][uve_name].values()[adds]
            else:
                for typ in types:
                    val = None
                    if typ in uve_data:
                        val = uve_data[typ]
                    self.ptab_info[part][tab][uve_name].update_single(typ, val)
                    if len(self.ptab_info[part][tab][uve_name].removed()):
                        touched = True
                        self._logger.info("UVE %s removed structs %s" % (uve_name, \
                                self.ptab_info[part][tab][uve_name].removed()))
                        for rems in self.ptab_info[part][tab][
                                uve_name].removed():
                            output[uv][rems] = None
                    if len(self.ptab_info[part][tab][uve_name].changed()):
                        touched = True
                        self._logger.debug("UVE %s changed structs %s" % (uve_name, \
                                self.ptab_info[part][tab][uve_name].changed()))
                        for chgs in self.ptab_info[part][tab][
                                uve_name].changed():
                            output[uv][chgs] = \
                                    self.ptab_info[part][tab][uve_name].values()[chgs]
                    if len(self.ptab_info[part][tab][uve_name].added()):
                        touched = True
                        self._logger.debug("UVE %s added structs %s" % (uve_name, \
                                self.ptab_info[part][tab][uve_name].added()))
                        for adds in self.ptab_info[part][tab][uve_name].added(
                        ):
                            output[uv][adds] = \
                                    self.ptab_info[part][tab][uve_name].values()[adds]
            if not touched:
                del output[uv]
            local_uve = self.ptab_info[part][tab][uve_name].values()

            self.tab_perf[tab].record_pub(UTCTimestampUsec() - prevt)

            if len(local_uve.keys()) == 0:
                self._logger.info("UVE %s deleted in proc" % (uv))
                del self.ptab_info[part][tab][uve_name]
                output[uv] = None

                # Both alarm and non-alarm contents are gone.
                # We do not need to do alarm evaluation
                continue

            # Withdraw the alarm if the UVE has no non-alarm structs
            if len(local_uve.keys()) == 1 and "UVEAlarms" in local_uve:
                if tab in self.tab_alarms:
                    if uv in self.tab_alarms[tab]:
                        del self.tab_alarms[tab][uv]
                        ustruct = UVEAlarms(name=uve_name, deleted=True)
                        alarm_msg = AlarmTrace(data=ustruct, table=tab, \
                                sandesh=self._sandesh)
                        self._logger.info('send del alarm: %s' %
                                          (alarm_msg.log()))
                        alarm_msg.send(sandesh=self._sandesh)
                continue

            # Handing Alarms
            if not self.mgrs.has_key(tab):
                continue
            prevt = UTCTimestampUsec()

            #TODO: We may need to remove alarm from local_uve before
            #      alarm evaluation
            # if "UVEAlarms" in uve_data:
            #     del uve_data["UVEAlarms"]

            results = self.mgrs[tab].map_method("__call__", uv, local_uve)
            self.tab_perf[tab].record_call(UTCTimestampUsec() - prevt)
            new_uve_alarms = {}
            for res in results:
                nm, sev, errs = res
                self._logger.debug("Alarm[%s] %s: %s" % (tab, nm, str(errs)))
                elems = []
                for ae in errs:
                    rule, val = ae
                    rv = AlarmElement(rule, val)
                    elems.append(rv)
                if len(elems):
                    new_uve_alarms[nm] = UVEAlarmInfo(type=nm,
                                                      severity=sev,
                                                      timestamp=0,
                                                      token="",
                                                      description=elems,
                                                      ack=False)
            del_types = []
            if self.tab_alarms[tab].has_key(uv):
                for nm, uai in self.tab_alarms[tab][uv].iteritems():
                    uai2 = copy.deepcopy(uai)
                    uai2.timestamp = 0
                    uai2.token = ""
                    # This type was present earlier, but is now gone
                    if not new_uve_alarms.has_key(nm):
                        del_types.append(nm)
                    else:
                        # This type has no new information
                        if uai2 == new_uve_alarms[nm]:
                            del new_uve_alarms[nm]
            if len(del_types) != 0  or \
                    len(new_uve_alarms) != 0:
                self._logger.debug("Alarm[%s] Deleted %s" % \
                        (tab, str(del_types)))
                self._logger.debug("Alarm[%s] Updated %s" % \
                        (tab, str(new_uve_alarms)))
                # These alarm types are new or updated
                for nm, uai2 in new_uve_alarms.iteritems():
                    uai = copy.deepcopy(uai2)
                    uai.timestamp = UTCTimestampUsec()
                    uai.token = Controller.token(self._sandesh, uai.timestamp)
                    if not self.tab_alarms[tab].has_key(uv):
                        self.tab_alarms[tab][uv] = {}
                    self.tab_alarms[tab][uv][nm] = uai
                # These alarm types are now gone
                for dnm in del_types:
                    del self.tab_alarms[tab][uv][dnm]

                ustruct = None
                if len(self.tab_alarms[tab][uv]) == 0:
                    ustruct = UVEAlarms(name=uve_name, deleted=True)
                    del self.tab_alarms[tab][uv]
                else:
                    alm_copy = copy.deepcopy(self.tab_alarms[tab][uv])
                    ustruct = UVEAlarms(name=uve_name,
                                        alarms=alm_copy.values(),
                                        deleted=False)
                alarm_msg = AlarmTrace(data=ustruct, table=tab, \
                        sandesh=self._sandesh)
                self._logger.info('send alarm: %s' % (alarm_msg.log()))
                alarm_msg.send(sandesh=self._sandesh)
        if success:
            return output
        else:
            return None
Ejemplo n.º 25
0
    def test_03_alarm_ack_callback(self, MockAlarmTrace):
        self._ag.tab_alarms = {}
        self.add_test_alarm('table1', 'name1', 'type1')
        self.add_test_alarm('table1', 'name1', 'type2')
        tab_alarms_copy = copy.deepcopy(self._ag.tab_alarms)

        TestCase = namedtuple('TestCase', ['name', 'input', 'output'])
        TestInput = namedtuple('TestInput', ['alarm_ack_req'])
        TestOutput = namedtuple('TestOutput',
                                ['return_code', 'alarm_send', 'ack_values'])

        tests = [
            TestCase(
                name='case 1: Invalid "table"',
                input=TestInput(
                    alarm_ack_req=SandeshAlarmAckRequest(table='invalid_table',
                        name='name1', type='type1',
                        timestamp=UTCTimestampUsec())),
                output=TestOutput(
                    return_code=SandeshAlarmAckResponseCode.ALARM_NOT_PRESENT,
                    alarm_send=False, ack_values=None)
            ),
            TestCase(
                name='case 2: Invalid "name"',
                input=TestInput(
                    alarm_ack_req=SandeshAlarmAckRequest(table='table1',
                        name='invalid_name', type='type1',
                        timestamp=UTCTimestampUsec())),
                output=TestOutput(
                    return_code=SandeshAlarmAckResponseCode.ALARM_NOT_PRESENT,
                    alarm_send=False, ack_values=None)
            ),
            TestCase(
                name='case 3: Invalid "type"',
                input=TestInput(
                    alarm_ack_req=SandeshAlarmAckRequest(table='table1',
                        name='name1', type='invalid_type',
                        timestamp=UTCTimestampUsec())),
                output=TestOutput(
                    return_code=SandeshAlarmAckResponseCode.ALARM_NOT_PRESENT,
                    alarm_send=False, ack_values=None)
            ),
            TestCase(
                name='case 4: Invalid "timestamp"',
                input=TestInput(
                    alarm_ack_req=SandeshAlarmAckRequest(table='table1',
                        name='name1', type='type1',
                        timestamp=UTCTimestampUsec())),
                output=TestOutput(
                    return_code=\
                        SandeshAlarmAckResponseCode.INVALID_ALARM_REQUEST,
                    alarm_send=False, ack_values=None)
            ),
            TestCase(
                name='case 5: Valid ack request',
                input=TestInput(
                    alarm_ack_req=SandeshAlarmAckRequest(table='table1',
                        name='name1', type='type2',
                        timestamp=self.get_test_alarm(
                            'table1', 'name1', 'type2').timestamp)),
                output=TestOutput(
                    return_code=SandeshAlarmAckResponseCode.SUCCESS,
                    alarm_send=True, ack_values={'type1':False, 'type2':True})
            ),
            TestCase(
                name='case 6: Duplicate ack request',
                input=TestInput(
                    alarm_ack_req=SandeshAlarmAckRequest(table='table1',
                        name='name1', type='type2',
                        timestamp=self.get_test_alarm(
                            'table1', 'name1', 'type2').timestamp)),
                output=TestOutput(
                    return_code=SandeshAlarmAckResponseCode.SUCCESS,
                    alarm_send=False, ack_values=None)
            ),
            TestCase(
                name='case 7: Valid ack request - different alarm type',
                input=TestInput(
                    alarm_ack_req=SandeshAlarmAckRequest(table='table1',
                        name='name1', type='type1',
                        timestamp=self.get_test_alarm(
                            'table1', 'name1', 'type1').timestamp)),
                output=TestOutput(
                    return_code=SandeshAlarmAckResponseCode.SUCCESS,
                    alarm_send=True, ack_values={'type1':True, 'type2':True})
            )
        ]

        for case in tests:
            logging.info('=== Test %s ===' % (case.name))
            return_code = self._ag.alarm_ack_callback(case.input.alarm_ack_req)
            # verify return code
            self.assertEqual(case.output.return_code, return_code)
            table = case.input.alarm_ack_req.table
            name = case.input.alarm_ack_req.name
            if case.output.alarm_send is True:
                # verify alarm ack message is sent
                uvekey = table + ':' + name
                for atype, alarm in tab_alarms_copy[table][uvekey].iteritems():
                    if atype in case.output.ack_values:
                        alarm.ack = case.output.ack_values[atype]
                alarms = copy.deepcopy(tab_alarms_copy[table][uvekey])
                alarm_data = UVEAlarms(name=name, alarms=alarms.values())
                MockAlarmTrace.assert_called_once_with(
                    data=alarm_data, table=table, sandesh=self._ag._sandesh)
                MockAlarmTrace().send.assert_called_once_with(
                    sandesh=self._ag._sandesh)
                MockAlarmTrace.reset_mock()
Ejemplo n.º 26
0
    def generate_flow_samples(self):
        self.flows = []
        self.flow_cnt = 5
        self.num_flow_samples = 0
        self.flow_start_time = None
        self.flow_end_time = None
        for i in range(self.flow_cnt):
            self.flows.append(
                FlowDataIpv4(flowuuid=str(uuid.uuid1()),
                             direction_ing=1,
                             sourcevn='domain1:admin:vn1',
                             destvn='domain1:admin:vn2',
                             sourceip=0x0A0A0A01,
                             destip=0x0A0A0A02,
                             sport=i + 10,
                             dport=i + 100,
                             protocol=i / 2))
            self._logger.info(str(self.flows[i]))

        # 'duration' - lifetime of the flow in seconds
        # 'tdiff'    - time difference between consecutive flow samples
        # 'pdiff'    - packet increment factor
        # 'psize'    - packet size
        flow_template = [{
            'duration': 60,
            'tdiff': 5,
            'pdiff': 1,
            'psize': 50
        }, {
            'duration': 30,
            'tdiff': 4,
            'pdiff': 2,
            'psize': 100
        }, {
            'duration': 20,
            'tdiff': 3,
            'pdiff': 3,
            'psize': 25
        }, {
            'duration': 10,
            'tdiff': 2,
            'pdiff': 4,
            'psize': 75
        }, {
            'duration': 5,
            'tdiff': 1,
            'pdiff': 5,
            'psize': 120
        }]
        assert (len(flow_template) == self.flow_cnt)

        # set the flow_end_time to _start_time + (max duration in flow_template)
        max_duration = 0
        for fd in flow_template:
            if max_duration < fd['duration']:
                max_duration = fd['duration']
        assert (self._start_time != None)
        self.flow_start_time = self._start_time
        self.flow_end_time = self.flow_start_time + (max_duration *
                                                     self._KSECINMSEC)
        assert (self.flow_end_time <= UTCTimestampUsec())

        # generate flows based on the flow template defined above
        cnt = 0
        for fd in flow_template:
            num_samples = (fd['duration']/fd['tdiff'])+\
                           bool((fd['duration']%fd['tdiff']))
            for i in range(num_samples):
                ts = self.flow_start_time + \
                     (i*fd['tdiff']*self._KSECINMSEC) + random.randint(1, 10000)
                pkts = (i + 1) * fd['pdiff']
                bytes = pkts * fd['psize']
                self.num_flow_samples += 1
                self.send_flow_stat(self.flows[cnt], bytes, pkts, ts)
            cnt += 1
 def reset_acq_time(self):
     self._acq_time = UTCTimestampUsec()
Ejemplo n.º 28
0
 def run(self):
     try:
         index = 0
         analytics_api_ip = self._defaults['analytics_api_ip']
         analytics_api_port = self._defaults['analytics_api_port']
         username = self._defaults['username']
         password = self._defaults['password']
         for arg in sys.argv:
             index = index + 1
             if arg == "--analytics-api-ip":
                 analytics_api_ip = sys.argv[index]
             elif arg == "--analytics-api-port":
                 analytics_api_port = sys.argv[index]
             elif arg == "--admin-user":
                 username = sys.argv[index]
             elif arg == "--admin-password":
                 password = sys.argv[index]
         tab_url = "http://" + analytics_api_ip + ":" +\
             analytics_api_port + "/analytics/tables"
         tables = OpServerUtils.get_url_http(tab_url, username, password)
         if tables != {}:
             table_list = json.loads(tables.text)
             for table in table_list:
                 if table['type'] == 'OBJECT':
                     # append to OBJECT_TYPE_LIST only if not existing
                     if table['name'] not in OBJECT_TABLE_MAP.values():
                         OBJECT_TYPE_LIST.append(str(table['name']))
                         # For object table the mapping between the actual table
                         # name and the table name used in help msg are the same
                         OBJECT_TABLE_MAP[table['name']] = table['name']
         if self.parse_args() != 0:
             return
         if self._args.tail:
             start_time = UTCTimestampUsec() - 10 * pow(10, 6)
             while True:
                 self._start_time = start_time
                 self._end_time = UTCTimestampUsec()
                 start_time = self._end_time + 1
                 time.sleep(3)
                 result = self.query()
                 if result == -1:
                     return
                 self.display(result)
         else:
             start_time = self._args.start_time
             end_time = self._args.end_time
             if not self._args.start_time:
                 start_time = "now-10m"
             if not self._args.end_time:
                 end_time = "now"
             try:
                 self._start_time, self._end_time = \
                     OpServerUtils.parse_start_end_time(
                         start_time = start_time,
                         end_time = end_time,
                         last = self._args.last)
             except:
                 return -1
             result = self.query()
             if result == -1:
                 return
             # Accumulate the result before processing it as the
             # formatting of result can be cpu intensive and hence would
             # affect the overall time taken to fetch the result from the
             # analytics-api. Since the query result ttl is set to 5 min
             # in redis, it is necessary to improve the read throughput.
             result_list = self.read_result(result)
             self.display(result_list)
     except KeyboardInterrupt:
         return