def events_listener(): warnings_queue = Communicator(queue_name='web_rcv', exchange='to_master', routing_key='#', exchange_type='topic') base_url = "http://127.0.0.1:5000" for method, properties, msg in warnings_queue.consume(): data = { 'method': method.routing_key, 'msg': msg.decode() } try: requests.post(base_url + "/events", json.dumps(data)) except Exception as e: print("Error sending the event to webmanager: %s" % str(e)[:100])
class Dummy(object): def __init__(self, name): self.name = name self.count_sent = [0 for i in range(count_dummy)] self.count_recv = [0 for i in range(count_dummy)] self.filename_sent = './../../test/connection_dur_test/dummy_log/' + name + '_sent.txt' self.filename_recv = './../../test/connection_dur_test/dummy_log/' + name + '_recv.txt' self.file_sent = open(self.filename_sent, 'w') self.file_recv = open(self.filename_recv, 'w') def init_comm_agents(self, core=False): self.comm_agents = Communicator(core) def deinit_comm_agents(self): self.comm_agents.close() def perceive(self): message = self.comm_agents.read() if message: t = datetime.now() print("Got message / Got Time: " + str(t) + " From\t" + message, file=self.file_recv) idx = ''.join(x for x in message.split('/')[0] if x.isdigit()) if idx.isdigit() == True: self.count_recv[int(idx) - 1] += 1 def tell(self, statement): t = datetime.now() msg = self.name + '/ Sent Time: ' + str(t) + '/ Msg: ' + statement print(self.name + "\tis telling to everyone " + msg, file=self.file_sent) for i in range(count_dummy): self.count_sent[i] += 1 self.comm_agents.send(msg) def print_res(self): global recv_msg print(self.name) for i in range(count_dummy): print('\t' + self.name + ' sent msg to \t\t%5d #: %d' % (i + 1, self.count_sent[i])) print('\t' + self.name + ' recv msg from \t%5d #: %d' % (i + 1, self.count_recv[i])) recv_msg += self.count_recv[i] self.file_sent.close() self.file_recv.close()
def get_status_info_comm(): STATUS_INFO_EXCHANGE_HOSTADDRESS = \ config.get('STATUS_INFO_EXCHANGE_HOSTADDRESS') STATUS_INFO_EXCHANGE_NAME = \ config.get('STATUS_INFO_EXCHANGE_NAME') STATUS_INFO_EXPIRATION_TIME = \ config.getint('STATUS_INFO_EXPIRATION_TIME') comm_info = Communicator(host_address=STATUS_INFO_EXCHANGE_HOSTADDRESS, exchange=STATUS_INFO_EXCHANGE_NAME, exchange_type='topic', expiration_time=STATUS_INFO_EXPIRATION_TIME) return comm_info
def __init__(self, identifier, custom_config=None): self.globals_last_notification_datetime = datetime.now() self.globals_last_notification_number = 0 self.tracklets_info = {} # Collection of Tracklets self.identifier = identifier self.resolution_multiplier = 1 self.config = CustomConfig(custom_config) if custom_config \ else read_conf() self.movement_change_rules = load_system_rules(self.config) self.min_angle_rotation = self.config.getint('MIN_ANGLE_ROTATION') self.min_walking_speed = self.config.getint('MIN_WALKING_SPEED') self.min_running_speed = self.config.getint('MIN_RUNNING_SPEED') self.WEIGHT_FOR_NEW_DIRECTION_ANGLE = \ self.config.getfloat('WEIGHT_NEW_DIRECTION_ANGLE') self.MIN_EVENTS_SPEED_AMOUNT = \ self.config.getint('MIN_EVENTS_SPEED_AMOUNT') self.MIN_EVENTS_SPEED_TIME = \ self.config.getint('MIN_EVENTS_SPEED_TIME') self.MIN_EVENTS_DIR_AMOUNT = \ self.config.getint('MIN_EVENTS_DIR_AMOUNT') self.MIN_EVENTS_DIR_TIME = self.config.getint('MIN_EVENTS_DIR_TIME') self.AGGLOMERATION_MIN_DISTANCE = \ self.config.getint('AGGLOMERATION_MIN_DISTANCE') self.GLOBAL_EVENTS_LIVES_TIME = \ self.config.getint('GLOBAL_EVENTS_LIVES_TIME') self.TRACKLETS_LIVES_TIME = self.config.getint('TRACKLETS_LIVES_TIME') self.communicator = \ Communicator( expiration_time=self.config.getint('WARNINGS_EXPIRATION_TIME'), host_address=self.config.get('WARNINGS_COMM_HOSTADDRESS'), exchange=self.config.get('WARNINGS_EXCHANGE_NAME'), exchange_type='topic') self.global_events = []
class GorasLogging(Thread): def __init__(self): Thread.__init__(self) self.logs = [] self.listener = Communicator(topic='logging') self.is_alive = True self.log_file = open('/tmp/latest_simulation.txt', 'w') def close(self): self.log_file.close() self.is_alive = False self.join() def run(self): while self.is_alive: message = self.listener.read() if message == '': time.sleep(0.1) continue log_message = '{}\t{}\t\r\n'.format(datetime.datetime.now().isoformat(timespec='microseconds'), message) # Where to store? self.log_file.write(log_message)
class PatternRecognition(object): def __init__(self, identifier, custom_config=None): self.globals_last_notification_datetime = datetime.now() self.globals_last_notification_number = 0 self.tracklets_info = {} # Collection of Tracklets self.identifier = identifier self.resolution_multiplier = 1 self.config = CustomConfig(custom_config) if custom_config \ else read_conf() self.movement_change_rules = load_system_rules(self.config) self.min_angle_rotation = self.config.getint('MIN_ANGLE_ROTATION') self.min_walking_speed = self.config.getint('MIN_WALKING_SPEED') self.min_running_speed = self.config.getint('MIN_RUNNING_SPEED') self.WEIGHT_FOR_NEW_DIRECTION_ANGLE = \ self.config.getfloat('WEIGHT_NEW_DIRECTION_ANGLE') self.MIN_EVENTS_SPEED_AMOUNT = \ self.config.getint('MIN_EVENTS_SPEED_AMOUNT') self.MIN_EVENTS_SPEED_TIME = \ self.config.getint('MIN_EVENTS_SPEED_TIME') self.MIN_EVENTS_DIR_AMOUNT = \ self.config.getint('MIN_EVENTS_DIR_AMOUNT') self.MIN_EVENTS_DIR_TIME = self.config.getint('MIN_EVENTS_DIR_TIME') self.AGGLOMERATION_MIN_DISTANCE = \ self.config.getint('AGGLOMERATION_MIN_DISTANCE') self.GLOBAL_EVENTS_LIVES_TIME = \ self.config.getint('GLOBAL_EVENTS_LIVES_TIME') self.TRACKLETS_LIVES_TIME = self.config.getint('TRACKLETS_LIVES_TIME') self.communicator = \ Communicator( expiration_time=self.config.getint('WARNINGS_EXPIRATION_TIME'), host_address=self.config.get('WARNINGS_COMM_HOSTADDRESS'), exchange=self.config.get('WARNINGS_EXCHANGE_NAME'), exchange_type='topic') self.global_events = [] def set_config(self, data, resolution_mult): self.resolution_multiplier = resolution_mult self.config = CustomConfig(data) if data \ else read_conf() def apply(self, tracklet_raw_info): """ This method is executed every time that data arrives from the first phase with new tracking information. :param tracklet_raw_info: :return: """ trackled_id = tracklet_raw_info['id'] tracklet_info = self.tracklets_info.get(trackled_id, None) if not tracklet_info: # It's a new tracklet ;) we need to create a Tracklet instance self.tracklets_info[trackled_id] = Tracklet(trackled_id) self.tracklets_info[trackled_id].last_position = \ tracklet_raw_info['last_position'] last_update_datetime = \ datetime.strptime(tracklet_raw_info['last_update_timestamp'], "%Y-%m-%dT%H:%M:%S.%f") self.tracklets_info[trackled_id].last_position_time = \ last_update_datetime else: # It's new data to an existent Tracklet info last_update_datetime = \ datetime.strptime(tracklet_raw_info['last_update_timestamp'], "%Y-%m-%dT%H:%M:%S.%f") time_lapse, distance, angle = self.calc_movements_info( tracklet_info, tracklet_raw_info['last_position'], last_update_datetime) if time_lapse > 0: # If there a time lapse to process last_position = tracklet_raw_info['last_position'] # Look for new events current_local_events, current_global_events = \ self.calc_events(tracklet_info, last_update_datetime, time_lapse, distance, angle, last_position) # print("TRACKLET ID:: %s" % trackled_id) # print("LAST POSITION:: %s" % last_position) # print("LAST UPDATED DATETIME:: %s " % last_update_datetime) # print("DISTANCE/TIMELAPSE:: %s / %s" % (distance, time_lapse)) # print("CURRENT EVENTS:: %s" % current_local_events) # Add found local events to the current tracklet tracklet_info.add_new_events(current_local_events) # Add found global events to the global information self.add_global_events(current_global_events) # Update the last_updated_time for the current tracklet tracklet_info.last_position_time = last_update_datetime tracklet_info.last_position = last_position # Considering the new events and the recent events' history, # check if any rule matches found_local_rules, found_global_rules = \ self.calc_rules(tracklet_info) for r in found_local_rules: r[1].tracklet_owner = trackled_id # Update the last tracklet's image tracklet_info.img = tracklet_raw_info['img'] # If Rules were matched, warn about it # TODO: Revisar validez de esta comparacion if [x[1] for x in tracklet_info.last_found_rules] != \ [x[1] for x in found_local_rules]: if found_local_rules: found_local_rules.sort(key=lambda x: x[2], reverse=True) tracklet_info.last_found_rules = found_local_rules tracklet_info.last_time_found_rules = \ last_update_datetime self.fire_alarms(tracklet_info) if found_global_rules and self.global_events and \ not self.global_events[-1].notified and ( (last_update_datetime - self.globals_last_notification_datetime).seconds > 5 or self.globals_last_notification_number < self.global_events[-1].type_): self.fire_global_alarms(found_global_rules, tracklet_info) # Remove abandoned tracklets from lists self.remove_abandoned_tracklets(last_update_datetime) # Remove old global events from list self.remove_old_global_events(last_update_datetime) def remove_abandoned_tracklets(self, last_update): tracklet_to_delete = \ [t.id for t in self.tracklets_info.values() if diff_in_milliseconds(t.last_position_time, last_update) > self.TRACKLETS_LIVES_TIME] for id_ in tracklet_to_delete: del self.tracklets_info[id_] def remove_old_global_events(self, last_update): self.global_events = list( dropwhile( lambda e: diff_in_milliseconds(e.last_update, last_update) > self.GLOBAL_EVENTS_LIVES_TIME, self.global_events)) def add_global_events(self, current_global_events): for event in current_global_events: if self.global_events and \ event.is_glueable(self.global_events[-1]): self.global_events[-1].glue(event) else: self.global_events.append(event) def calc_movements_info(self, tracklet_info, new_position, new_position_time): time = diff_in_milliseconds(tracklet_info.last_position_time, new_position_time) (distance, angle) = self.calc_distance_and_angle_between_points( tracklet_info.last_position, new_position) return time, distance, angle @staticmethod def calc_distance_and_angle_between_points(point1, point2): distance = euclidean_distance(point1, point2) # sin(angle) = opposite / hypotenuse if distance: sin_of_angle = abs(point2[1] - point1[1]) / distance angle = np.degrees(np.arcsin(sin_of_angle)) else: angle = None return distance, angle def calc_events(self, tracklet_info, last_update, time_lapse, distance, angle, last_position): current_local_events = \ self.calc_direction_events(tracklet_info, angle, last_update, time_lapse) current_local_events.extend( self.calc_speed_events(distance, last_update, time_lapse)) current_global_event = \ self.calc_global_events(last_update, last_position, time_lapse) return current_local_events, current_global_event def calc_direction_events(self, tracklet_info, angle, last_update, time_lapse): current_events = [] if not tracklet_info.average_direction: tracklet_info.average_direction = angle else: # calculate the difference between actual direction angle and new # direction angle if angle: min_diff_signed = tracklet_info.average_direction - angle else: min_diff_signed = 0 min_diff_signed = (min_diff_signed + 180) % 360 - 180 min_diff = abs(min_diff_signed) if min_diff > self.min_angle_rotation: # Append ROTATION event current_events.append( EventDirection(EventInfoType.ANGLE, Quantifiers.AX, round(min_diff), time_end=last_update, duration=time_lapse)) # new direction is added to average_direction, but with less # weight to reduce noise tracklet_info.average_direction += \ min_diff_signed * self.WEIGHT_FOR_NEW_DIRECTION_ANGLE return current_events def calc_speed_events(self, distance, last_update, time_lapse): current_events = [] speed = distance / (time_lapse / 1000.0) # Measure in Pixels/Second if speed < self.min_walking_speed: # Append 'STOPPED' event current_events.append( EventSpeed(SpeedEventTypes.STOPPED, Quantifiers.EQ, value=time_lapse, time_end=last_update, duration=time_lapse)) elif speed < self.min_running_speed: # Append 'WALKING' event current_events.append( EventSpeed(SpeedEventTypes.WALKING, Quantifiers.EQ, value=time_lapse, time_end=last_update, duration=time_lapse)) else: # Append 'RUNNING' event current_events.append( EventSpeed(SpeedEventTypes.RUNNING, Quantifiers.EQ, value=time_lapse, time_end=last_update, duration=time_lapse)) return current_events def calc_global_events(self, last_update, last_position, time_lapse): counter = 0 current_global_events = [] # ## Look for AGGLOMERATION events ## # # Calculate distance to each tracklet and check if it is close enough for tracklet in self.tracklets_info.values(): if diff_in_milliseconds( tracklet.last_position_time, last_update) < 1250 and \ euclidean_distance(last_position, tracklet.last_position) < \ self.AGGLOMERATION_MIN_DISTANCE: counter += 1 else: if counter > 1: current_global_events.append( EventAgglomeration(type_=counter, value=time_lapse, time_end=last_update, duration=time_lapse)) # ## Place to verify future global events ## # # ... ... ... return current_global_events def calc_rules(self, tracklet_info): """ :param tracklet_info: :return: a list of tuples with: 0- The distance (trust measurement) 1- The rule that was satisfied 2- The time that the rule has taken satisfied """ found_local_rules = [] found_global_rules = [] last_update = tracklet_info.last_position_time # Take the latest events or a minimum last_speed_events = \ list(map(lambda x: x[1], takewhile( lambda i_e: diff_in_milliseconds(i_e[1].last_update, last_update) < self.MIN_EVENTS_SPEED_TIME or i_e[0] < self.MIN_EVENTS_SPEED_AMOUNT, enumerate(reversed(tracklet_info.active_speed_events)) ))) last_dir_events = \ list(map(lambda x: x[1], takewhile( lambda i_e: diff_in_milliseconds(i_e[1].last_update, last_update) < self.MIN_EVENTS_DIR_TIME or i_e[0] < self.MIN_EVENTS_DIR_AMOUNT, enumerate(reversed(tracklet_info.active_direction_events)) ))) # if any rule matches, the rule is added to found_rules for rule in self.movement_change_rules: satisfies_speed_events, dist1, time_from_start1 = \ self.check_ruleevents_in_activeevents( rule.events, reversed(last_speed_events)) satisfies_dir_events, dist2, time_from_start2 = \ self.check_ruleevents_in_activeevents( rule.events, reversed(last_dir_events)) satisfies_global_events, dist3, time_from_start3 = \ self.check_ruleevents_in_activeevents( rule.events, [self.global_events[-1]]) if \ self.global_events else (None, None, None) if satisfies_global_events: found_global_rules.append((dist3, rule, time_from_start3)) if satisfies_speed_events or satisfies_dir_events: found_local_rules.append((dist1 + dist2, rule, min(time_from_start1, time_from_start2))) return found_local_rules, found_global_rules @staticmethod def check_ruleevents_in_activeevents(rule_events, last_events): """ Checks if the sequence of rule's events are contained in last_events, in the same order as defined in the rule. BE CAREFUL: Same order doesn't mean contiguously. Non contiguous rule's events will have a distance greater than zero. :param rule_events: List of events that shapes a Rule. :param last_events: List of last occurred events :return: (True/False if satisfy the rule, distance (measure of confidence), time from first event to the last) """ if not last_events: return False, 0, maxsize firsts = True last_events_iter = reversed(list(last_events)) try: distance = 0 time_from_start = 0 for pos, rule_event in enumerate(reversed(rule_events)): if pos > 0: firsts = False last_event = next(last_events_iter) while not last_event.satisfies(rule_event): if not firsts: distance += last_event.duration else: time_from_start += last_event.duration last_event = next(last_events_iter) else: # print("CON EVENTOS0:: %s" % list(last_events)) return True, distance, time_from_start except StopIteration: pass # FIXME: si no hay eventos de tal tipo en la rule, la distancia no # deberia ser cero ya que del otro tipo puede cumplir return False, 0, maxsize def fire_alarms(self, tracklet_info): return_data = { 'tracker_id': self.identifier, 'rules': [(r[0], r[1].name) for r in tracklet_info.last_found_rules], 'position': tracklet_info.last_position, 'id': tracklet_info.id, 'img': tracklet_info.img, 'timestamp': str(tracklet_info.last_time_found_rules) } print("INDIVIDUAL:: %s" % str(tracklet_info.last_found_rules)) # print("CON EVENTOS:: %s" % str(tracklet_info.active_speed_events[-10:-1])) self.communicator.apply(json.dumps(return_data), routing_key='warnings') def fire_global_alarms(self, global_rules, current_tracklet): return_data = { 'tracker_id': 'GLOBAL: Cantidad: ' + str(self.global_events[-1].type_) + " por tiempo(ms): " + str(self.global_events[-1].value), 'rules': [(r[0], r[1].name) for r in global_rules], 'position': (0, 0), 'id': current_tracklet.id, 'img': current_tracklet.img, 'timestamp': str(global_rules[0][1].events[0].last_update) } self.global_events[-1].notified = True self.globals_last_notification_datetime = \ self.global_events[-1].last_update self.globals_last_notification_number = self.global_events[-1].type_ # print("GLOBAL:: %s" % [x for x in return_data.items() # if x[0] != 'img']) print("GLOBAL TOTAL:: %s" % self.global_events[-1]) self.communicator.apply(json.dumps(return_data), routing_key='warnings')
def init_comm_agents(self, core=False): self.comm_agents = Communicator(core)
def create_queue(q_name): connection = pika.BlockingConnection() channel = connection.channel() return channel.basic_get(q_name) stream_controller = SC() if __name__ == '__main__': # Starting up the base warnings_queue = Communicator(queue_name='launcher_rcv', exchange='to_master', routing_key='cmd', exchange_type='topic') log("Starting up the Pattern Recognition Engine...") pattern_master = Process(target=pattern_recognition_launcher) pattern_master.start() log("Starting up the web manager...") web_exposer = Process(target=websocket_exposer) web_exposer.start() log("Starting up events sender...") events_listener_process = Process(target=events_listener) events_listener_process.start() log("Espero el resultado")
import sys from utils.communicator import Communicator __author__ = 'jp' if __name__ == '__main__': communicator = Communicator(exchange='to_master', exchange_type='topic', expiration_time=5) communicator.send_message(" ".join(sys.argv[1:]), routing_key='cmd')
def ws_disconn(data): comm = Communicator(exchange='to_master', routing_key='cmd', exchange_type='topic') comm.send_message(data['data'], routing_key='cmd')
def __init__(self): Thread.__init__(self) self.logs = [] self.listener = Communicator(topic='logging') self.is_alive = True self.log_file = open('/tmp/latest_simulation.txt', 'w')
def track_source(identifier=None, source=None, trackermaster_conf=None, patternmaster_conf=None): """ :param identifier: :param source: :param trackermaster_conf: :param patternmaster_conf: :return: """ """ START SETTING CONSTANTS """ trackermaster_conf=None patternmaster_conf=None global USE_HISTOGRAMS_FOR_PERSON_DETECTION, SHOW_PREDICTION_DOTS, \ SHOW_COMPARISONS_BY_COLOR, SHOW_VIDEO_OUTPUT, LIMIT_FPS, \ DEFAULT_FPS_LIMIT, CREATE_MODEL, USE_MODEL, SAVE_POSITIONS_TO_FILE, \ VERBOSE USE_HISTOGRAMS_FOR_PERSON_DETECTION = \ config.getboolean("USE_HISTOGRAMS_FOR_PERSON_DETECTION") SHOW_PREDICTION_DOTS = config.getboolean("SHOW_PREDICTION_DOTS") SHOW_COMPARISONS_BY_COLOR = config.getboolean("SHOW_COMPARISONS_BY_COLOR") SHOW_VIDEO_OUTPUT = config.getboolean("SHOW_VIDEO_OUTPUT") LIMIT_FPS = config.getboolean("LIMIT_FPS") DEFAULT_FPS_LIMIT = config.getfloat("DEFAULT_FPS_LIMIT") if CREATE_MODEL is None: CREATE_MODEL = config.getboolean("CREATE_MODEL") if USE_MODEL is None: USE_MODEL = config.getboolean("USE_MODEL") SAVE_POSITIONS_TO_FILE = config.getboolean("SAVE_POSITIONS_TO_FILE") VERBOSE = config.getboolean('VERBOSE') USE_BSUBTRACTOR_KNN = config.getboolean("USE_BSUBTRACTOR_KNN") """ FINISH SETTING CONSTANTS """ if not identifier: identifier = sha1(str(dt.utcnow()).encode('utf-8')).hexdigest() if trackermaster_conf: set_custome_config(trackermaster_conf) # Instance of VideoCapture to capture webcam(0) images # WebCam # cap = cv2.VideoCapture(0) # popen("v4l2-ctl -d /dev/video1 --set-ctrl " # "white_balance_temperature_auto=0," # "white_balance_temperature=inactive,exposure_absolute=inactive," # "focus_absolute=inactive,focus_auto=0,exposure_auto_priority=0") # Communication with Launcher and others comm_info = get_status_info_comm() # Communication with PatternMaster communicator = \ Communicator(exchange=config.get('TRACK_INFO_EXCHANGE_NAME'), host_address=config.get( 'TRACK_INFO_EXCHANGE_HOSTADDRESS'), expiration_time=config.getint( 'TRACK_INFO_EXPIRATION_TIME'), exchange_type='direct') exit_cause = 'FINISHED' global cap global has_more_images global raw_image global processed global SEC_PER_FRAME if source: cap = cv2.VideoCapture(source) comm_info.send_message( json.dumps(dict( info_id="OPEN", id=identifier, content="Opening source: %s." % source)), routing_key='info') else: # Videos de muestra videos_path = os.path.dirname( os.path.abspath(inspect.getfile(inspect.currentframe()))) # source = videos_path + '/../Videos/Video_003.avi' source = videos_path + '/../../videos_demo/DSC_3133_luminosa.mkv' cap = cv2.VideoCapture(source) has_at_least_one_frame, raw_image = cap.read() if not has_at_least_one_frame: comm_info.send_message(json.dumps(dict( info_id="EXIT WITH ERROR", id=identifier, content="<p>ERROR: Trying to open source but couldn't.</p>")), routing_key='info') print('EXIT %s with error: Source %s could not be loaded.' % (identifier, source)) exit() # Original FPS try: FPS = float(int(cap.get(cv2.CAP_PROP_FPS))) if FPS == 0.: FPS = DEFAULT_FPS_LIMIT except ValueError: FPS = DEFAULT_FPS_LIMIT reader = Thread(target=read_raw_input, daemon=True) reader.start() print("Working at", FPS, "FPS") SEC_PER_FRAME = 1. / FPS FPS_OVER_2 = (FPS / 2) # Getting width and height of captured images w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) print("Real resolution: Width", w, "Height", h) resolution_multiplier = find_resolution_multiplier(w, h) work_w = int(w / resolution_multiplier) work_h = int(h / resolution_multiplier) print("Work resolution: Width", work_w, "Height", work_h) send_patternrecognition_config(communicator, identifier, patternmaster_conf, resolution_multiplier) font = cv2.FONT_HERSHEY_SIMPLEX background_subtractor = BackgroundSubtractorKNN() if USE_BSUBTRACTOR_KNN \ else BackgroundSubtractorMOG2() blobs_detector = BlobDetector() # person_detector = Histogram2D() person_detection.pd_init_constants() tracker = Tracker(FPS, resolution_multiplier) loop_time = time.time() global number_frame _fps = "%.2f" % FPS previous_fps = FPS read_time = 0 max_read_time = 0 bg_sub_time = 0 max_bg_sub_time = 0 blob_det_time = 0 max_blob_det_time = 0 person_detection_time = 0 max_person_detection_time = 0 t_time = 0 max_t_time = 0 pattern_recogn_time = 0 max_pattern_recogn_time = 0 show_info_time = 0 max_show_info_time = 0 display_time = 0 max_display_time = 0 wait_key_time = 0 max_wait_key_time = 0 total_time = 0 max_total_time = 0 persons_in_scene = "Frame number (one-based), Current persons detected, " \ "Current tracklets, " \ "Current tracklets/persons interpol. num\n\n" model_load = True, "" if USE_HISTOGRAMS_FOR_PERSON_DETECTION: person_detection.set_histogram_size(shape=(work_w, work_h)) person_detection.set_create_model(CREATE_MODEL) model_load = person_detection.set_use_model(USE_MODEL) fps = 0 comparisons_by_color_image = [] positions_to_file = '' interpol_cant_persons_prev = 0 trayectos = [] tracklets = {} last_number_frame = number_frame p_matrix_history = '' if model_load[0]: # Start the main loop while has_more_images: t_total = time.time() # FPS calculation if number_frame > 10 and number_frame != last_number_frame: delay = (time.time() - loop_time) loop_time = time.time() # if LIMIT_FPS: # if delay < SEC_PER_FRAME: # time_aux = time.time() # time.sleep(max(SEC_PER_FRAME - delay, 0)) # delay += time.time() - time_aux fps = (1. / delay) * 0.25 + previous_fps * 0.75 previous_fps = fps _fps = "%.2f" % fps else: if LIMIT_FPS: while has_more_images and \ number_frame == last_number_frame: time.sleep(0.01) # Sleep for avoid Busy waiting if not has_more_images: break loop_time = time.time() t0 = time.time() aux_time = time.time() - t0 if number_frame > 200: read_time += aux_time max_read_time = max(aux_time, max_read_time) if has_more_images: # ########################################################## # # ## BLACK BOXES PROCESSES ## # # ########################################################## # # ########################## ## # ## BACKGROUND SUBTRACTOR # ## # ########################## ## t0 = time.time() # resize to a manageable work resolution if LIMIT_FPS: reader_lock.acquire() else: reader_condition.acquire() if number_frame == last_number_frame and has_more_images: reader_condition.wait(2) if not has_more_images: if LIMIT_FPS: reader_lock.release() else: reader_condition.notify() reader_condition.release() break else: last_number_frame = number_frame raw_frame_copy = raw_image.copy() if LIMIT_FPS: reader_lock.release() else: processed = True reader_condition.notify() reader_condition.release() frame_resized = cv2.resize(raw_frame_copy, (work_w, work_h)) frame_resized_copy = frame_resized.copy() bg_sub = background_subtractor.apply(frame_resized) bg_subtraction = cv2.cvtColor(bg_sub, cv2.COLOR_GRAY2BGR) to_show = bg_subtraction.copy() bg_subtraction_resized =\ cv2.resize(bg_subtraction, (work_w, work_h)) aux_time = time.time() - t0 if number_frame > 200: bg_sub_time += aux_time max_bg_sub_time = max(aux_time, max_bg_sub_time) # ################### ## # ## BLOBS DETECTOR # ## # ################### ## t0 = time.time() bounding_boxes = blobs_detector.apply(bg_sub) aux_time = time.time() - t0 if number_frame > 200: blob_det_time += aux_time max_blob_det_time = max(aux_time, max_blob_det_time) t0 = time.time() cant_personas = 0 if len(bounding_boxes): rectangles = x1y1x2y2_to_x1y1wh(bounding_boxes) del bounding_boxes for (x, y, w, h) in rectangles: # Draw in blue candidate blobs cv2.rectangle(frame_resized_copy, (x, y), (x + w, y + h), (255, 0, 0), 1) if len(rectangles) > 100: # Skip the cycle when it's full of small blobs continue # ##################### ## # ## PERSONS DETECTOR # ## # ##################### ## persons = person_detection.apply( rectangles, resolution_multiplier, raw_frame_copy, frame_resized_copy, number_frame, fps) cant_personas = len(persons) for p in persons: # Red and Yellow dots (x_a, y_a), (x_b, y_b) = p['box'] color = 0 if p['score'] == 1 else 255 cv2.circle(img=frame_resized_copy, center=(int((x_a + x_b) / 2), int((y_a + y_b) / 2)), radius=0, color=(0, color, 255), thickness=3) aux_time = time.time() - t0 if number_frame > 200: person_detection_time += aux_time max_person_detection_time = \ max(aux_time, max_person_detection_time) t0 = time.time() # ############ ## # ## TRACKER # ## # ############ ## rectangles_in_frame = [] trayectos_, info_to_send, tracklets, \ comparisons_by_color_image_aux, \ positions_in_frame,\ rectangles_in_frame,\ frame_p_matrix_history = \ tracker.apply(persons, frame_resized, bg_subtraction_resized, number_frame) del persons trayectos = trayectos_ if trayectos_ else trayectos if SAVE_POSITIONS_TO_FILE: if number_frame >= 50: positions_to_file += positions_in_frame for ((x1, y1), (x2, y2)) in rectangles_in_frame: # Draw in green candidate blobs cv2.rectangle(frame_resized_copy, (int(x1), int(y1)), (int(x2), int(y2)), (0, 255, 0), 1) p_matrix_history += frame_p_matrix_history if len(comparisons_by_color_image_aux) > 0: comparisons_by_color_image = \ comparisons_by_color_image_aux aux_time = time.time() - t0 if number_frame > 200: t_time += aux_time max_t_time = max(aux_time, max_t_time) t0 = time.time() # ################################################# ## # ## COMMUNICATION WITH PATTERN MASTER AND OTHERS # ## # ################################################# ## if number_frame % FPS_OVER_2 == 0: for info in info_to_send: info['tracker_id'] = identifier frame_resized_marks = frame_resized.copy() cv2.rectangle( frame_resized_marks, info['rectangle'][0], info['rectangle'][1], (200, 0, 0), -1) frame_resized_marks = \ cv2.addWeighted(frame_resized_marks, 0.2, frame_resized, 0.8, 0) cv2.circle(frame_resized_marks, (int(info['last_position'][0]), int(info['last_position'][1])), 70, (200, 200, 0), -1) frame_resized_marks = \ cv2.addWeighted(frame_resized_marks, 0.2, frame_resized, 0.8, 0) info['img'] = \ frame2base64png(frame_resized_marks).decode() # Send info to the pattern recognition # every half second communicator.apply(json.dumps(info_to_send), routing_key='track_info') if number_frame % (FPS*10) == 0: # Renew the config in pattern recognition every # 10 seconds send_patternrecognition_config( communicator, identifier, patternmaster_conf, resolution_multiplier) aux_time = time.time() - t0 if number_frame > 200: pattern_recogn_time += aux_time max_pattern_recogn_time = \ max(aux_time, max_pattern_recogn_time) t0 = time.time() now = dt.now() for tracklet in tracklets.values(): if getattr(tracklet, 'last_rule', None): time_pass = now - getattr(tracklet, 'last_rule_time') if time_pass.seconds < 9: if SHOW_VIDEO_OUTPUT: cv2.putText( to_show, tracklet.last_rule, (int(tracklet.last_point[0]), int(tracklet.last_point[1])), font, 0.3 - (time_pass.seconds/30), (255, 0, 0), 1) else: tracklet.last_rule = None if SHOW_VIDEO_OUTPUT: # Draw the journeys of the tracked persons draw_journeys(trayectos, [frame_resized_copy, to_show]) aux_time = time.time() - t0 if number_frame > 200: show_info_time += aux_time max_show_info_time = max(aux_time, max_show_info_time) if SAVE_POSITIONS_TO_FILE: if number_frame >= 50: persons_in_scene += str(number_frame) + ", " + \ str(cant_personas) + ", " + \ str(len(trayectos)) + ", " + str(round( (len(trayectos) * .85) + (cant_personas * .15))) + "\n" if SHOW_VIDEO_OUTPUT: # #################### ## # ## DISPLAY RESULTS # ## # #################### ## t0 = time.time() big_frame = \ np.vstack((np.hstack((bg_subtraction, to_show)), np.hstack((frame_resized, frame_resized_copy)))) # TEXT INFORMATION # Write FPS in the frame to show cv2.putText(big_frame, 'Current persons detected: ' + str(cant_personas), (20, 20), font, .5, (255, 255, 0), 1) cv2.putText(big_frame, 'Current tracklets: ' + str(len(trayectos)), (20, 40), font, .5, (255, 255, 0), 1) interpol_cant_persons = round( ((len(trayectos) * .7) + (cant_personas * .3)) * .35 + interpol_cant_persons_prev * .65) interpol_cant_persons_prev = interpol_cant_persons cv2.putText(big_frame, 'Current tracklets/persons interpol. num: ' + str(round((len(trayectos) * .85) + (cant_personas * .15))), (20, 60), font, .5, (255, 255, 0), 1) cv2.putText(big_frame, 'FPS: ' + _fps, (20, 80), font, .5, (255, 255, 0), 1) big_frame = cv2.resize(big_frame, (work_w*4, work_h*4)) cv2.imshow('result', big_frame) if SHOW_COMPARISONS_BY_COLOR: if len(comparisons_by_color_image) > 0: cv2.imshow('comparisons by color', comparisons_by_color_image) aux_time = time.time() - t0 if number_frame > 200: display_time += aux_time max_display_time = max(aux_time, max_display_time) t0 = time.time() if cv2.waitKey(1) & 0xFF in (ord('q'), ord('Q')): exit_cause = 'CLOSED BY PRESSING "Q|q"' break aux_time = time.time() - t0 if number_frame > 200: wait_key_time += aux_time max_wait_key_time = max(aux_time, max_wait_key_time) if VERBOSE: print("frame: ", str(number_frame), "; fps: ", str(_fps)) aux_time = time.time() - t_total if number_frame > 200: total_time += aux_time max_total_time = max(aux_time, max_total_time) global kill_reader kill_reader = True cv2.destroyAllWindows() if USE_HISTOGRAMS_FOR_PERSON_DETECTION and CREATE_MODEL: person_detection.save_histogram() number_frame_skip_first = number_frame - 200 avg_times_text = "Average times::::" read_time /= number_frame_skip_first avg_times_text += "\nRead time " + str(read_time) bg_sub_time /= number_frame_skip_first avg_times_text += "\nBackground subtraction time " + str(bg_sub_time) blob_det_time /= number_frame_skip_first avg_times_text += "\nBlob detector time " + str(blob_det_time) person_detection_time /= number_frame_skip_first avg_times_text += "\nPerson detector time " + \ str(person_detection_time) t_time /= number_frame_skip_first avg_times_text += "\nTracker time " + str(t_time) pattern_recogn_time /= number_frame_skip_first avg_times_text += "\nCommunication with pattern recognition time " + \ str(pattern_recogn_time) show_info_time /= number_frame_skip_first avg_times_text += "\nText and paths time " + str(show_info_time) display_time /= number_frame_skip_first avg_times_text += "\nDisplay time " + str(display_time) wait_key_time /= number_frame_skip_first avg_times_text += "\ncv2.waitKey time " + str(wait_key_time) total_time /= number_frame_skip_first avg_times_text += "\nTotal time " + str(total_time) avg_times_text += "\n\n\nMax times::::" avg_times_text += "\nRead time " + str(max_read_time) avg_times_text += "\nBackground subtraction time " + \ str(max_bg_sub_time) avg_times_text += "\nBlob detector time " + str(max_blob_det_time) avg_times_text += "\nPerson detector time " + \ str(max_person_detection_time) avg_times_text += "\nTracker time " + str(max_t_time) avg_times_text += "\nCommunication with pattern recognition time " + \ str(max_pattern_recogn_time) avg_times_text += "\nText and paths time " + str(max_show_info_time) avg_times_text += "\nDisplay time " + str(max_display_time) avg_times_text += "\ncv2.waitKey time " + str(max_wait_key_time) avg_times_text += "\nTotal time " + str(max_total_time) print(avg_times_text) if SAVE_POSITIONS_TO_FILE: with open("../experimental_analysis/raw_results/" + identifier + "-positions.txt", "w") as text_file: print(positions_to_file, file=text_file) with open("../experimental_analysis/raw_results/" + identifier + "-times.txt", "w") as text_file: print(avg_times_text, file=text_file) with open("../experimental_analysis/raw_results/" + identifier + "-counter.txt", "w") as text_file: print(persons_in_scene, file=text_file) with open("../experimental_analysis/raw_results/" + identifier + "-p_matrix.txt", "w") as text_file: print(p_matrix_history, file=text_file) comm_info = get_status_info_comm() comm_info.send_message(json.dumps(dict( info_id="EXIT", id=identifier, content="CAUSE: " + exit_cause, img=frame2base64png(frame_resized).decode())), routing_key='info') else: print(model_load[1]) if SHOW_COMPARISONS_BY_COLOR: cv2.imwrite("comparisons_by_color.png", comparisons_by_color_image) exit()
class Agent(threading.Thread): def __init__(self): threading.Thread.__init__(self) self.discrete_time_step = 0.5 # sec self.alive = False self.state = MentalState() self.actions = get_basic_actions() self.knowledge = Knowledge() self.goals = [] self.messages = [] def _load_knowledge(self, knowledge): for key, value in knowledge.items(): self.knowledge[key] = value # self.tuple_to_knowledge(k) # self.knowledge.append(k) def _load_goals(self, goals): self.goals = goals def spawn(self, spawn_id, unit_id, initial_knowledge={}, initial_goals=[]): logging.info(str(spawn_id) + ' is being spawned...') assert unit_id in units # Identifier for the unit self.spawn_id = spawn_id print("{} is spawned".format(spawn_id)) # Load basic characteristics of the unit self.load_unit(units[unit_id]) # Set initial statements in knowledge self._load_knowledge(initial_knowledge) #print(self.knowledge) # Store initial goals self._load_goals(initial_goals) # Give it a life self.alive = True logging.info(str(spawn_id) + ' has spawned.') def load_unit(self, spec): self.id = spec['id'] self.name = spec['name'] for action in spec['actions']: assert 'id' in action assert 'name' in action assert 'require' in action self.actions.append( Action(action_name=action['name'], actual_code='', require=action['require'], sc_action_id=action['id'])) ''' Communication to simulator ''' def init_comm_env(self): pass def deinit_comm_env(self): pass ''' Communication to other agents ''' def init_comm_agents(self): # self.comm_agents = Communicator(self.spawn_id) self.comm_agents = Communicator() def deinit_comm_agents(self): # It may need to send 'good bye' to others self.comm_agents.close() ''' Destroy myself Assumption : When the agent check that the goal is achieved, destory itself. ''' def destroy(self): # Need to broadcast "I am destroying" msg = "{} destroy".format(self.spawn_id) self.comm_agents.send(msg) # Close communications self.deinit_comm_env() self.deinit_comm_agents() self.alive = False self.join() ''' Sense information from its surroundings and other agents ''' def perceive(self): # Perceive from the environment (i.e., SC2) # Perceive from other agents message = self.comm_agents.read() if message.startswith('broadcasting'): message = message[13:] self.knowledge.update( json.loads(message, object_hook=as_python_object)) """ Change msg(str) to Knowledge """ def msg_to_knowledge(self, message): splited_msg = message.split() tuple_msg = tuple(splited_msg) if splited_msg is not None: if len(splited_msg) == 2: self.knowledge.append( Knowledge('type1', tuple_msg[0], [tuple_msg[1]])) elif len(splited_msg) == 3: self.knowledge.append( Knowledge('type2', tuple_msg[0], tuple_msg[1], [tuple_msg[2]])) else: pass else: pass """ Change tuple to Knowledge """ def tuple_to_knowledge(self, tuple_msg): if tuple_msg is not None: if len(tuple_msg) == 3: self.knowledge.append( Knowledge(tuple_msg[0], tuple_msg[1], tuple_msg[2])) elif len(tuple_msg) == 4: self.knowledge.append( Knowledge(tuple_msg[0], tuple_msg[1], tuple_msg[2], tuple_msg[3])) else: pass else: pass ''' Information / actions going to simulator ''' def act(self, action, task): self.state.change_state() # Update task state self.knowledge[task.__name__].update({'is': 'Active'}) logger.info('%s %s is performing %s' % (self.name, self.spawn_id, action)) if action.__name__ == 'move': req = action.perform(self.spawn_id) self.comm_agents.send(req, who='core') elif action.__name__ == 'gather': req = action.perform(self.spawn_id) self.comm_agents.send(req, who='core') elif action.__name__ == 'build_pylon': req = action.perform(self.spawn_id) self.comm_agents.send(req, who='core') # do gather after build_pylon # time.sleep(self.discrete_time_step) # for act in self.actions: # if act.__name__ == 'gather': # req = act.perform(self.spawn_id) elif action.__name__ == 'check': self.mineral_query(task.__name__, task.arguments['target'], task.arguments['amount']) # action.perform_query() return False elif action.__name__ == 'built': self.built_query(task.__name__, task.arguments['target'], task.arguments['built']) return False else: pass #print('act function --> else ERROR!!!!!!') return True def mineral_query(self, task_name, target, amount): # find knowledgebase if target in self.knowledge: current_amount = self.knowledge[target]['gathered'] if int(current_amount) >= int(amount): #print("성취됨!!!!!!!!!!!!!") # knowledgebase update self.knowledge[task_name].update({'is': 'Done'}) self.state.__init__() def built_query(self, task_name, target, built): if target in self.knowledge: current_built = self.knowledge[target]['built'] if int(current_built) >= int(built): #print("성취됨!!!!!!!!!!!!") self.knowledge[task_name].update({'is': 'Done'}) self.state.__init__() ''' Delivering information to other agents ''' def tell(self, statement): #logger.info('%d is telling "%s" to the agents' % (self.spawn_id, statement)) # msg = str(self.spawn_id) + " is " + self.state.state #print(">> {} is telling : {}".format(self.spawn_id, statement)) self.comm_agents.send(statement, broadcast=True) ''' Query to other agents ''' def ask(self, query, wait_timeout=3): pass ''' Returns an action that can perform the task ''' def _has_action_for_task(self, task): for action in self.actions: if action.can_perform(task.__name__): action.set_arguments(task.arguments) return action return None """ Check the goal tree recursively and update KB if it is active. """ def check_goal_active(self, goal): if goal is not None: if goal.can_be_active(): print('뭐 좀 찍어볼까?????') self.knowledge[goal.name].update({'is': 'active'}) print(goal.name) print(self.knowledge[goal.name]['is']) for subgoal in goal.subgoals: if subgoal.can_be_active(): print('뭐 좀 찍어볼까?????') self.knowledge[subgoal.name].update({'is': 'active'}) print(subgoal.name) print(self.knowledge[subgoal.name]['is']) self.check_goal_active(subgoal) return None """ Check the goal tree recursively and update KB if it is achieved. """ def check_goal_achieved(self, goal): if goal is not None: if goal.can_be_achieved(): #print('뭐 좀 찍어볼까?????') self.knowledge[goal.name].update({'is': 'achieved'}) #print(goal.name) #print(self.knowledge[goal.name]['is']) for subgoal in goal.subgoals: if subgoal.can_be_achieved(): #print('뭐 좀 찍어볼까?????') self.knowledge[subgoal.name].update({'is': 'achieved'}) #print(subgoal.name) #print(self.knowledge[subgoal.name]['is']) self.check_goal_achieved(subgoal) return None ''' Returns available actions based on the desires in the current situation ''' def next_action(self, current_goal, current_knowledge, mentalstate): list_actions = [] #print("####NEXT_ACTION: CURRENT GOAL's length: %s" % (len(current_goal))) if len(current_goal) == 0: # TODO: is an action always triggered by a goal? return None, None # TODO: all the goals may need to be examined for goal in current_goal: # Method name is dirty leaf_goal, tasks = goal.get_available_goal_and_tasks() # When the Query task is Done, the agent's mentalstate is Idle for task in tasks: if task.type == 'Query' and self.knowledge[ task.__name__]['is'] == 'Done': self.state.__init__() if len(tasks) != 0: if leaf_goal.goal_state != 'achieved': leaf_goal.goal_state = 'assigned' self.knowledge[leaf_goal.name].update({'is': 'assigned'}) for task in tasks: if mentalstate == 'idle': # ping if task.type == 'General': if task.state == 'Ready': task.state = 'Ping' pinglist = set() pinglist.add(self.spawn_id) self.knowledge[task.__name__].update( {'is': 'Ping'}) self.knowledge[task.__name__].update( {'ping': pinglist}) ''' #TODO - SangUk will do! self.knowledge[task.__name__].update({'is' : ('Ping', self.spawn_id)}) ''' break elif task.state == 'Ping': pinglist = self.knowledge[task.__name__]['ping'] amImin = True for ping in pinglist: if int(self.spawn_id) > int(ping): amImin = False break if amImin: if int(self.spawn_id) not in pinglist: pinglist.add(self.spawn_id) self.knowledge[task.__name__].update( {'ping': pinglist}) action = self._has_action_for_task(task) if action is not None: list_actions.append((action, task)) break else: continue #return None, None elif task.state == 'Active': return None, None elif mentalstate == 'working': if task.type == 'Query' and (task.state == 'Ready' or task.state == 'Active'): # Check whether query task is done action = self._has_action_for_task(task) if action is not None: list_actions.append((action, task)) # Select actions from the list of actions in terms of the current if len(list_actions) == 0: return None, None return_action = list_actions[0] # Return the most beneficial action from the selected actions return return_action def update_goal_tree(self, knowledge, goal): if goal.name in knowledge: if goal.goal_state != 'achieved': goal.goal_state = knowledge[goal.name]['is'] # check subgoals for subgoal in goal.subgoals: self.update_goal_tree(knowledge, subgoal) # check task for task in goal.tasks: if task.__name__ in knowledge: #print("!!", self.spawn_id, task.__name__, task.state, "-->", knowledge[task.__name__]['is']) if knowledge[task.__name__]['is'] == 'Done': knowledge[task.__name__]['ping'] = [] task.state = knowledge[task.__name__]['is'] return True ''' Main logic runs here (i.e., reasoning) ''' def run(self): # Initialize communications self.init_comm_agents() self.init_comm_env() while self.alive: # For debugging logger.info('%s %d is ticking' % (self.name, self.spawn_id)) #print() #for k in self.knowledge: # print(k) # Check if something to answer # query = self.check_being_asked(): # if query: # self.answer(query) # Perceive environment self.perceive() self.perceive() self.perceive() self.perceive() self.perceive() # check task state and change the agent's mentalstate # check knowledge and update the goal tree """ tasks = [] for g in self.goals: tasks = g.get_available_tasks() for k in self.knowledge: if k.type == 'type1': for goal in self.goals: if k.n == goal.name: goal.goal_state = k.na for task in tasks: if k.n == task.__name__: task.state = k.na """ # check knowledge and update the goal tree for goal in self.goals: self.update_goal_tree(self.knowledge, goal) """ #check every goal whether now achieved. for goal in self.goals: if goal.can_be_achieved(): print('뭐 좀 찍어볼까????') self.knowledge[goal.name].update({'is' : 'achieved'}) print(goal.name) print(self.knowledge[goal.name]['is']) for subgoal in goal.subgoals: #update subgoal's state in KB if subgoal.can_be_achieved(): #check the goal state print('뭐 좀 찍어볼까?????') self.knowledge[subgoal.name].update({'is' : 'achieved'}) print(subgoal.name) print(self.knowledge[subgoal.name]['is']) """ # check every goal whether now achieved. for goal in self.goals: self.check_goal_achieved(goal) # # check every goal whether now active. # for goal in self.goals: # self.check_goal_active(goal) # Reason next action selected_action, selected_task = self.next_action( self.goals, self.knowledge, self.state.state) #print(self.spawn_id, "다음은!!! ", selected_action, selected_task) # Perform the action if selected_action is not None: if not self.act(selected_action, selected_task): # Query task come here! pass else: # General task come here! selected_task.state = 'Done' # if selected_task.__name__.startswith('built'): # for act in self.actions: # if act.__name__ == 'gather': # req = act.perform(self.spawn_id) # self.comm_agents.send(req, who='core') self.knowledge[selected_task.__name__]['ping'] = [] self.knowledge[selected_task.__name__].update( {'is': 'Done'}) # Have to change agent's state to idle after finishing the task # self.state.__init__() else: #print('다 됐다!!!!!!!!!!!!!!!!!!!') if self.goals[0].goal_state == 'achieved': #print('여기 들어옴?? ???????') """ for act in self.actions: if act.__name__ == 'move': req=act.perform(self.spawn_id) self.comm_agents.send(req,who='core') """ # self.destroy() # break pass # TODO for Tony : Please Broadcast knowledge... self.tell(json.dumps(self.knowledge, cls=PythonObjectEncoder)) time.sleep(self.discrete_time_step)
def init_comm_agents(self): # self.comm_agents = Communicator(self.spawn_id) self.comm_agents = Communicator()