def __init__(self, host, port): # Initialize instance variables. self.host = host self.port = port self.model_name = None self.net_latency_method = None self.tls = False self.continue_on_error = False self.running = False self.num_success = 0 self.do_server_stats = False self.show_responses = False self.stats_latency_full_process = RunningStats() self.stats_latency_network_only = RunningStats() self.media_file_name = None self.out_dir = None self.latency_start_time = 0 self.loop_count = 0 self.num_repeat = 0 self.num_vid_repeat = 0 self.num_vid_plays = 0 self.filename_list = [] self.filename_list_index = 0 self.video = None self.video_frame_num = 0 self.resize = True self.resize_long = 240 self.resize_short = 180 self.skip_frames = 1 logger.debug("host:port = %s:%d" % (self.host, self.port))
def __init__(self): self.stats_processing_time = RunningStats() self.stats_cpu_utilization = RunningStats() self.stats_mem_utilization = RunningStats() self.stats_gpu_utilization = RunningStats() self.stats_gpu_mem_utilization = RunningStats() self.media_file_name = None self.loop_count = 0 self.num_repeat = 0 self.num_vid_repeat = 0 self.filename_list = [] self.filename_list_index = 0 self.video = None self.resize = True self.resize_long = 240 self.resize_short = 180 self.skip_frames = 1 pass
class Client: """ Base Client class """ MULTI_THREADED = False # Initialize "Grand total" class variables. stats_latency_full_process = RunningStats() stats_latency_network_only = RunningStats() num_success = 0 def __init__(self, host, port): # Initialize instance variables. self.host = host self.port = port self.model_name = None self.net_latency_method = None self.tls = False self.continue_on_error = False self.running = False self.num_success = 0 self.do_server_stats = False self.show_responses = False self.stats_latency_full_process = RunningStats() self.stats_latency_network_only = RunningStats() self.media_file_name = None self.out_dir = None self.latency_start_time = 0 self.loop_count = 0 self.num_repeat = 0 self.num_vid_repeat = 0 self.num_vid_plays = 0 self.filename_list = [] self.filename_list_index = 0 self.video = None self.video_frame_num = 0 self.resize = True self.resize_long = 240 self.resize_short = 180 self.skip_frames = 1 logger.debug("host:port = %s:%d" % (self.host, self.port)) def start(self): self.running = True logger.debug("media file(s) %s" % (self.filename_list)) video_extensions = ('mp4', 'avi', 'mov') if self.filename_list[0].endswith(video_extensions): logger.debug("It's a video") self.media_file_name = self.filename_list[0] self.video = cv2.VideoCapture(self.media_file_name) def get_next_image(self): if self.video is not None: for x in range(self.skip_frames): ret, image = self.video.read() self.out_file_name = f"video-frame-{self.video_frame_num:04}.jpg" self.video_frame_num += 1 if not ret: logger.debug("End of video") self.num_vid_plays += 1 print(f"{self.num_vid_repeat=} {self.num_vid_plays=}") if self.num_vid_plays < self.num_vid_repeat: logger.info("Restarting the video") self.video = cv2.VideoCapture(self.media_file_name) return self.get_next_image() else: logger.info("Done with repeats") return None else: # If the filename_list array has more than 1, get the next value. if len(self.filename_list) > 1: self.filename_list_index += 1 if self.filename_list_index >= len(self.filename_list): self.filename_list_index = 0 else: self.filename_list_index = 0 if self.stats_latency_full_process.n >= self.num_repeat: return None self.media_file_name = self.filename_list[self.filename_list_index] self.out_file_name = self.media_file_name image = cv2.imread(self.media_file_name) if self.resize: image = self.resize_image(image) # Whether it's from a video frame or image file, at this point the image # data is a numpy array. Here we convert it to a raw byte stream. res, image = cv2.imencode('.JPEG', image) image = image.tobytes() logger.debug("Image data (first 32 bytes logged): %s" % image[:32]) return image def resize_image(self, image): w = image.shape[1] h = image.shape[0] logger.debug("Frame size: %dx%d" % (w, h)) if w > h: resize_w = self.resize_long resize_h = self.resize_short else: resize_w = self.resize_short resize_h = self.resize_long image = cv2.resize(image, (resize_w, resize_h)) logger.debug("Resized image to: %dx%d" % (resize_w, resize_h)) return image def measure_network_latency(self): while self.running: if self.net_latency_method == "socket": self.time_open_socket() elif self.net_latency_method == "ping": self.icmp_ping() time.sleep(PING_INTERVAL) def measure_server_stats(self): # TODO: Use /v2/models/yolov4/versions/1/stats print("TODO") def time_open_socket(self): now = time.time() sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(2) result = sock.connect_ex((self.host, self.port)) if result != 0: logger.error("Could not connect to %s on port %d" % (self.host, self.port)) return millis = (time.time() - now) * 1000 elapsed = "%.3f" % millis if self.show_responses: logger.info("%s ms to open socket" % (elapsed)) self.stats_latency_network_only.push(millis) Client.stats_latency_network_only.push(millis) def icmp_ping(self): args = [PING_EXEC, '-c', '1', '-W', '1', self.host] p_ping = subprocess.Popen(args, shell=False, stdout=subprocess.PIPE) # save ping stdout p_ping_out = str(p_ping.communicate()[0]) if (p_ping.wait() == 0): # rtt min/avg/max/mdev = 61.994/61.994/61.994/0.000 ms search = re.search(PING_REGEX, p_ping_out, re.M | re.I) ping_rtt = float(search.group(2)) if self.show_responses: logger.info("%s ms ICMP ping" % (ping_rtt)) self.stats_latency_network_only.push(ping_rtt) Client.stats_latency_network_only.push(ping_rtt) else: logger.error("ICMP ping failed") def process_result(self, result, image, inference_header_content_length): millis = (time.time() - self.latency_start_time) * 1000 self.stats_latency_full_process.push(millis) Client.stats_latency_full_process.push(millis) if self.model_name == "ensemble_dali_inception": data = result[inference_header_content_length:] cls = data.split(':') confidence = float(cls[0]) class_name = cls[2] output = f"{class_name} - Confidence={confidence:0.2}" elif self.model_name == "ensemble_dali_yolov4": try: # This is the overall response, in JSON format. We have # to drill down a bit to get the data we're interested in, # which is stored as a string object, but is also JSON-encoded. decoded_json = json.loads(result) decoded_json = decoded_json['outputs'][0]['data'][0] output = decoded_json # Parse the actual object detection inference results decoded_json = json.loads(output) except Exception as e: logger.error( "Could not decode result. Exception: %s. Result: %s" % (e, result)) return if 'success' in decoded_json: if decoded_json['success'] == True: self.num_success += 1 Client.num_success += 1 if self.out_dir is not None: nparr = np.frombuffer(image, np.uint8) img_np = cv2.imdecode(nparr, cv2.IMREAD_COLOR) image_w = img_np.shape[1] image_h = img_np.shape[0] ratio_x = image_w / 608 ratio_y = image_h / 608 obj_num = 0 for json_object in decoded_json['objects']: label = json_object['class'] score = float(json_object['confidence']) percent = f"{score*100:0.1f}" rect = json_object['rect'] x1 = int(rect[0] * ratio_x) y1 = int(rect[1] * ratio_y) x2 = int(rect[2] * ratio_x) y2 = int(rect[3] * ratio_y) text = f"{label} ({percent}%)" retval, _ = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 0.50, 2) cv2.rectangle(img_np, (x1, y1), (x2, y2), colors[obj_num], 2) cv2.putText(img_np, text, (x1 + 2, y1 + retval[1] + 2), cv2.FONT_HERSHEY_SIMPLEX, 0.67, colors[obj_num], 2) obj_num += 1 cv2.imwrite( self.out_dir + "/" + os.path.basename(self.out_file_name), img_np) if self.show_responses: elapsed = "%.3f" % millis logger.info("%s ms to send and receive: %s" % (elapsed, output)) def display_results(self): self.running = False if not self.show_responses or not Client.MULTI_THREADED: return if self.stats_latency_full_process.n > 0: logger.info( "====> Average Latency Full Process=%.3f ms (stddev=%.3f)" % (self.stats_latency_full_process.mean(), self.stats_latency_full_process.stddev())) if self.stats_latency_network_only.n > 0: logger.info( "====> Average Latency Network Only=%.3f ms (stddev=%.3f)" % (self.stats_latency_network_only.mean(), self.stats_latency_network_only.stddev()))
logger.addHandler(fh) parser = argparse.ArgumentParser() parser.add_argument("-s", "--seconds", required=True, help="Number of seconds to run") parser.add_argument("-d", "--delay", required=False, default=0.25, help="Number of seconds to wait between each sample") args = parser.parse_args() from stats import RunningStats stats_cpu_util = RunningStats() stats_mem_util = RunningStats() stats_gpu_util = RunningStats() stats_gpu_mem_util = RunningStats() stats_memory_total = RunningStats() stats_memory_used = RunningStats() stats_memory_free = RunningStats() print( "%CPU, %MEM, %GPU, %GPU_MEM, MEM_TOTAL (MiB), MEM_TOTAL (MiB), MEM_FREE (MiB)," ) time.sleep(float(0.1)) finish_time = datetime.datetime.now() + datetime.timedelta( seconds=float(args.seconds)) while datetime.datetime.now() < finish_time: ret1 = usage_cpu_and_mem()
def __init__(self, host, port): # Initialize instance variables. self.host = host self.port = port self.running = False self.do_server_stats = False self.show_responses = False self.stats_latency_full_process = RunningStats() self.stats_latency_network_only = RunningStats() self.stats_server_processing_time = RunningStats() self.stats_cpu_util = RunningStats() self.stats_mem_util = RunningStats() self.stats_gpu_util = RunningStats() self.stats_gpu_util_max = RunningStats() self.stats_gpu_util_avg = RunningStats() self.stats_gpu_mem_util = RunningStats() self.stats_gpu_mem_util_max = RunningStats() self.stats_gpu_mem_util_avg = RunningStats() self.media_file_name = None self.latency_start_time = 0 self.loop_count = 0 self.num_repeat = 0 self.num_vid_repeat = 0 self.filename_list = [] self.filename_list_index = 0 self.json_params = None self.base64 = False self.video = None self.resize = True self.resize_long = 240 self.resize_short = 180 self.skip_frames = 1 logger.debug("host:port = %s:%d" % (self.host, self.port))
class Client: """ Base Client class """ MULTI_THREADED = False # Initialize "Grand total" class variables. stats_latency_full_process = RunningStats() stats_latency_network_only = RunningStats() stats_server_processing_time = RunningStats() stats_cpu_util = RunningStats() stats_mem_util = RunningStats() stats_gpu_util = RunningStats() stats_gpu_util_max = RunningStats() stats_gpu_util_avg = RunningStats() stats_gpu_mem_util = RunningStats() stats_gpu_mem_util_max = RunningStats() stats_gpu_mem_util_avg = RunningStats() def __init__(self, host, port): # Initialize instance variables. self.host = host self.port = port self.running = False self.do_server_stats = False self.show_responses = False self.stats_latency_full_process = RunningStats() self.stats_latency_network_only = RunningStats() self.stats_server_processing_time = RunningStats() self.stats_cpu_util = RunningStats() self.stats_mem_util = RunningStats() self.stats_gpu_util = RunningStats() self.stats_gpu_util_max = RunningStats() self.stats_gpu_util_avg = RunningStats() self.stats_gpu_mem_util = RunningStats() self.stats_gpu_mem_util_max = RunningStats() self.stats_gpu_mem_util_avg = RunningStats() self.media_file_name = None self.latency_start_time = 0 self.loop_count = 0 self.num_repeat = 0 self.num_vid_repeat = 0 self.filename_list = [] self.filename_list_index = 0 self.json_params = None self.base64 = False self.video = None self.resize = True self.resize_long = 240 self.resize_short = 180 self.skip_frames = 1 logger.debug("host:port = %s:%d" % (self.host, self.port)) def start(self): self.running = True logger.debug("media file(s) %s" % (self.filename_list)) video_extensions = ('mp4', 'avi', 'mov') if self.filename_list[0].endswith(video_extensions): logger.debug("It's a video") self.media_file_name = self.filename_list[0] self.video = cv2.VideoCapture(self.media_file_name) def get_next_image(self): if self.video is not None: for x in range(self.skip_frames): ret, image = self.video.read() if not ret: logger.debug("End of video") return None vw = image.shape[1] vh = image.shape[0] logger.debug("Video size: %dx%d" % (vw, vh)) if self.resize: if vw > vh: resize_w = self.resize_long resize_h = self.resize_short else: resize_w = self.resize_short resize_h = self.resize_long # if vw > vh: # resize_w = int(self.resize_short / (vh/vw)) # resize_h = self.resize_short # else: # resize_w = int(self.resize_long / (vw/vh)) # resize_h = self.resize_long image = cv2.resize(image, (resize_w, resize_h)) logger.debug("Resized image to: %dx%d" % (resize_w, resize_h)) res, image = cv2.imencode('.JPEG', image) image = image.tobytes() else: # If the filename_list array has more than 1, get the next value. if len(self.filename_list) > 1: self.filename_list_index += 1 if self.filename_list_index >= len(self.filename_list): self.filename_list_index = 0 else: self.filename_list_index = 0 if self.stats_latency_full_process.n >= self.num_repeat: return None self.media_file_name = self.filename_list[self.filename_list_index] f = open(self.media_file_name, "rb") image = f.read() logger.debug("Image data (first 32 bytes logged): %s" % image[:32]) return image def measure_network_latency(self): while self.running: if self.net_latency_method == "SOCKET": self.time_open_socket() elif self.net_latency_method == "PING": self.icmp_ping() time.sleep(PING_INTERVAL) def measure_server_stats(self): url = "http://%s:%d%s" % (self.host, self.port, "/server/usage/") if self.tls: url = url.replace("http", "https", 1) time.sleep(SERVER_STATS_DELAY) while self.running: decoded_json = json.loads(requests.get(url).content) if 'cpu_util' in decoded_json: self.stats_cpu_util.push(float(decoded_json['cpu_util'])) Client.stats_cpu_util.push(float(decoded_json['cpu_util'])) if 'mem_util' in decoded_json: self.stats_mem_util.push(float(decoded_json['mem_util'])) Client.stats_mem_util.push(float(decoded_json['mem_util'])) if 'gpu_util' in decoded_json: self.stats_gpu_util.push(float(decoded_json['gpu_util'])) Client.stats_gpu_util.push(float(decoded_json['gpu_util'])) if 'gpu_util_max' in decoded_json: self.stats_gpu_util_max.push( float(decoded_json['gpu_util_max'])) Client.stats_gpu_util_max.push( float(decoded_json['gpu_util_max'])) if 'gpu_util_avg' in decoded_json: self.stats_gpu_util_avg.push( float(decoded_json['gpu_util_avg'])) Client.stats_gpu_util_avg.push( float(decoded_json['gpu_util_avg'])) if 'gpu_mem_util' in decoded_json: self.stats_gpu_mem_util.push( float(decoded_json['gpu_mem_util'])) Client.stats_gpu_mem_util.push( float(decoded_json['gpu_mem_util'])) if 'gpu_mem_util_max' in decoded_json: self.stats_gpu_mem_util_max.push( float(decoded_json['gpu_mem_util_max'])) Client.stats_gpu_mem_util_max.push( float(decoded_json['gpu_mem_util_max'])) if 'gpu_mem_util_avg' in decoded_json: self.stats_gpu_mem_util_avg.push( float(decoded_json['gpu_mem_util_avg'])) Client.stats_gpu_mem_util_avg.push( float(decoded_json['gpu_mem_util_avg'])) if self.show_responses: logger.info(requests.get(url).content) time.sleep(SERVER_STATS_INTERVAL) def time_open_socket(self): now = time.time() sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(2) result = sock.connect_ex((self.host, self.port)) if result != 0: logger.error("Could not connect to %s on port %d" % (self.host, self.port)) return millis = (time.time() - now) * 1000 elapsed = "%.3f" % millis if self.show_responses: logger.info("%s ms to open socket" % (elapsed)) self.stats_latency_network_only.push(millis) Client.stats_latency_network_only.push(millis) def icmp_ping(self): args = [PING, '-c', '1', '-W', '1', self.host] p_ping = subprocess.Popen(args, shell=False, stdout=subprocess.PIPE) # save ping stdout p_ping_out = str(p_ping.communicate()[0]) if (p_ping.wait() == 0): # logger.info(p_ping_out) # rtt min/avg/max/mdev = 61.994/61.994/61.994/0.000 ms search = re.search(PING_REGEX, p_ping_out, re.M | re.I) ping_rtt = float(search.group(2)) if self.show_responses: logger.info("%s ms ICMP ping" % (ping_rtt)) self.stats_latency_network_only.push(ping_rtt) Client.stats_latency_network_only.push(ping_rtt) else: logger.error("ICMP ping failed") def process_result(self, result): global TEST_PASS try: decoded_json = json.loads(result) except Exception as e: logger.error("Could not decode result. Exception: %s. Result: %s" % (e, result)) TEST_PASS = False return if 'success' in decoded_json: if decoded_json['success'] == "true": TEST_PASS = True else: TEST_PASS = False if 'latency_start' in decoded_json: millis = (time.time() - decoded_json['latency_start']) * 1000 self.stats_latency_network_only.push(millis) Client.stats_latency_network_only.push(millis) else: millis = (time.time() - self.latency_start_time) * 1000 self.stats_latency_full_process.push(millis) Client.stats_latency_full_process.push(millis) if 'server_processing_time' in decoded_json: server_processing_time = decoded_json['server_processing_time'] self.stats_server_processing_time.push( float(server_processing_time)) Client.stats_server_processing_time.push( float(server_processing_time)) if self.show_responses: elapsed = "%.3f" % millis logger.info("%s ms to send and receive: %s" % (elapsed, result)) def display_results(self): self.running = False if not self.show_responses or not Client.MULTI_THREADED: return if self.stats_latency_full_process.n > 0: logger.info( "====> Average Latency Full Process=%.3f ms (stddev=%.3f)" % (self.stats_latency_full_process.mean(), self.stats_latency_full_process.stddev())) if self.stats_latency_network_only.n > 0: logger.info( "====> Average Latency Network Only=%.3f ms (stddev=%.3f)" % (self.stats_latency_network_only.mean(), self.stats_latency_network_only.stddev())) if self.stats_server_processing_time.n > 0: logger.info( "====> Average Server Processing Time=%.3f ms (stddev=%.3f)" % (self.stats_server_processing_time.mean(), Client.stats_server_processing_time.stddev()))
import gym from stats import RunningStats env = ... state_dim = ... action_dim = ... state_dim += 1 policy = ... stats = RunningStats() value_function = ValueFunction(state_dim) policy = Policy(state_dim, action_dim, target_kl) run_policy(env, policy, scaler, episodes=5) episode = 0 while episode < num_episodes: trajectory = run_policy(env, policy, scaler, episodes=batch_size) episode += len(trajectory) values = value_function.predict(states) discounted_reward = 0 target_values = [] for r in rewards[::-1]: discounted_reward = r + gamma * discounted_reward target_values.append(discounted_reward) target_values = target_values[::-1] advantage = target_values - values normalized_advantage = (advantage - advantage.mean()) / advantage.std() policy.update(state, action, advantage) value_function.fig(states, target_values) def runEpisode():
class ObjectDetector(object): MULTI_THREADED = False stats_processing_time = RunningStats() stats_cpu_utilization = RunningStats() stats_mem_utilization = RunningStats() stats_gpu_utilization = RunningStats() stats_gpu_mem_utilization = RunningStats() def __init__(self): self.stats_processing_time = RunningStats() self.stats_cpu_utilization = RunningStats() self.stats_mem_utilization = RunningStats() self.stats_gpu_utilization = RunningStats() self.stats_gpu_mem_utilization = RunningStats() self.media_file_name = None self.loop_count = 0 self.num_repeat = 0 self.num_vid_repeat = 0 self.filename_list = [] self.filename_list_index = 0 self.video = None self.resize = True self.resize_long = 240 self.resize_short = 180 self.skip_frames = 1 pass def start(self): self.running = True logger.debug("media file(s) %s" % (self.filename_list)) video_extensions = ('mp4', 'avi', 'mov') if self.filename_list[0].endswith(video_extensions): logger.debug("It's a video") self.media_file_name = self.filename_list[0] self.video = cv2.VideoCapture(self.media_file_name) while True: image = self.get_next_image() if image is None: break results = self.process_image(image, self.outdir) if self.show_responses: logger.info(results) self.display_results() def get_next_image(self): if self.video is not None: for x in range(self.skip_frames): ret, image = self.video.read() if not ret: logger.debug("End of video") return None vw = image.shape[1] vh = image.shape[0] logger.debug("Video size: %dx%d" % (vw, vh)) if self.resize: if vw > vh: resize_w = self.resize_long resize_h = self.resize_short else: resize_w = self.resize_short resize_h = self.resize_long image = cv2.resize(image, (resize_w, resize_h)) logger.debug("Resized image to: %dx%d" % (resize_w, resize_h)) else: # If the filename_list array has more than 1, get the next value. if len(self.filename_list) > 1: self.filename_list_index += 1 if self.filename_list_index >= len(self.filename_list): self.filename_list_index = 0 else: self.filename_list_index = 0 if self.stats_processing_time.n >= self.num_repeat: return None self.media_file_name = self.filename_list[self.filename_list_index] image = cv2.imread(self.media_file_name) return image def display_results(self): self.running = False if not self.show_responses or not ObjectDetector.MULTI_THREADED: return if self.stats_processing_time.n > 0: logger.info("====> Average Processing Time=%.3f ms (stddev=%.3f)" % (self.stats_processing_time.mean(), self.stats_processing_time.stddev())) def measure_server_stats(self): time.sleep(SERVER_STATS_DELAY) while self.running: ret1 = usage_cpu_and_mem() ret2 = usage_gpu() ret = ret1.copy() ret.update(ret2) decoded_json = ret if 'cpu_utilization' in decoded_json: self.stats_cpu_utilization.push( float(decoded_json['cpu_utilization'])) ObjectDetector.stats_cpu_utilization.push( float(decoded_json['cpu_utilization'])) if 'mem_utilization' in decoded_json: self.stats_mem_utilization.push( float(decoded_json['mem_utilization'])) ObjectDetector.stats_mem_utilization.push( float(decoded_json['mem_utilization'])) if 'gpu_utilization' in decoded_json: self.stats_gpu_utilization.push( float(decoded_json['gpu_utilization'])) ObjectDetector.stats_gpu_utilization.push( float(decoded_json['gpu_utilization'])) if 'gpu_mem_utilization' in decoded_json: self.stats_gpu_mem_utilization.push( float(decoded_json['gpu_mem_utilization'])) ObjectDetector.stats_gpu_mem_utilization.push( float(decoded_json['gpu_mem_utilization'])) if self.show_responses: logger.info(json.dumps(decoded_json)) time.sleep(SERVER_STATS_INTERVAL) def is_gpu_supported(self): return gpu_support def detect_image(self, img): # scale and pad image ratio = min(img_size / img.size[0], img_size / img.size[1]) imw = round(img.size[0] * ratio) imh = round(img.size[1] * ratio) img_transforms = transforms.Compose([ transforms.Resize((imh, imw)), transforms.Pad((max(int( (imh - imw) / 2), 0), max(int( (imw - imh) / 2), 0), max(int( (imh - imw) / 2), 0), max(int((imw - imh) / 2), 0)), (128, 128, 128)), transforms.ToTensor(), ]) # convert image to Tensor image_tensor = img_transforms(img).float() image_tensor = image_tensor.unsqueeze_(0) input_img = Variable(image_tensor.type(Tensor)) # run inference on the model and get detections with torch.no_grad(): detections = model(input_img) detections = utils.non_max_suppression(detections, 80, conf_thres, nms_thres) return detections[0] def process_image(self, img, outdir=None): """ Convert and process the image. If "outdir" is included, save the processed image there. (Only for command-line executions. It will always be None when called by the Django server.) """ # load image and get detections img = Image.fromarray(img, 'RGB') prev_time = time.time() detections = self.detect_image(img) millis = (time.time() - prev_time) * 1000 elapsed = "%.3f" % millis self.stats_processing_time.push(millis) ObjectDetector.stats_processing_time.push(millis) if outdir is not None: filename = img.filename img = np.array(img) # Get bounding-box colors cmap = plt.get_cmap('tab20b') colors = [cmap(i) for i in np.linspace(0, 1, 20)] plt.figure() fig, ax = plt.subplots(1, figsize=(12, 9)) ax.imshow(img) else: img = np.array(img) pad_x = max(img.shape[0] - img.shape[1], 0) * (img_size / max(img.shape)) pad_y = max(img.shape[1] - img.shape[0], 0) * (img_size / max(img.shape)) unpad_h = img_size - pad_y unpad_w = img_size - pad_x # Build list of results objects = [] if detections is not None: if outdir is not None: unique_labels = detections[:, -1].cpu().unique() n_cls_preds = len(unique_labels) bbox_colors = random.sample(colors, n_cls_preds) # browse detections and draw bounding boxes for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections: box_h = ((y2 - y1) / unpad_h) * img.shape[0] box_w = ((x2 - x1) / unpad_w) * img.shape[1] y1 = ((y1 - pad_y // 2) / unpad_h) * img.shape[0] x1 = ((x1 - pad_x // 2) / unpad_w) * img.shape[1] if outdir is not None: color = bbox_colors[int( np.where(unique_labels == int(cls_pred))[0])] bbox = patches.Rectangle((x1, y1), box_w, box_h, linewidth=2, edgecolor=color, facecolor='none') ax.add_patch(bbox) plt.text(x1, y1, s=classes[int(cls_pred)], color='white', verticalalignment='top', bbox={ 'color': color, 'pad': 0 }) confidence = "%.2f" % float(cls_conf) object = { "rect": [int(x1), int(y1), int(x1 + box_w), int(y1 + box_h)], "class": classes[int(cls_pred)], "confidence": confidence } objects.append(object) if outdir is not None: plt.axis('off') outname = outdir + "/det-" + os.path.basename(filename) print("Saving to " + outname) plt.savefig(outname, bbox_inches='tight', pad_inches=0.0) #plt.show() plt.close('all') return objects
This script is for launching multiple simultaneous multi_client instances to benchmark a given server. """ import sys import os import json import time import logging import argparse import requests from threading import Thread util_dir = "../utilities" sys.path.append(os.path.join(os.path.dirname(__file__), util_dir)) from stats import RunningStats stats_latency_full_process = RunningStats() stats_latency_network_only = RunningStats() stats_server_processing_time = RunningStats() stats_cpu_util = RunningStats() stats_mem_util = RunningStats() stats_gpu_util = RunningStats() stats_gpu_util_max = RunningStats() stats_gpu_util_avg = RunningStats() stats_gpu_mem_util = RunningStats() stats_gpu_mem_util_max = RunningStats() stats_gpu_mem_util_avg = RunningStats() LAUNCH_INTERVAL = 2 # Seconds filename = "objects_320x180.mp4" def launch_remote_benchmark(url):
logger.addHandler(fh) parser = argparse.ArgumentParser() parser.add_argument("-s", "--seconds", required=True, help="Number of seconds to run") parser.add_argument("-d", "--delay", required=False, default=0.25, help="Number of seconds to wait between each sample") args = parser.parse_args() from stats import RunningStats stats_cpu_util = RunningStats() stats_mem_util = RunningStats() stats_gpu_util = RunningStats() stats_gpu_mem_util = RunningStats() stats_gpu_util_max = RunningStats() stats_gpu_mem_util_max = RunningStats() stats_gpu_util_avg = -1 stats_gpu_mem_util_avg = -1 print( "%CPU, %MEM, %GPU, %GPU_MAX, %GPU_AVG, %GPU_MEM, %GPU_MEM_MAX, %GPU_MEM_AVG" ) time.sleep(float(0.1)) finish_time = datetime.datetime.now() + datetime.timedelta( seconds=float(args.seconds)) while datetime.datetime.now() < finish_time:
def prepare_input_mask(): # define mixture spectrograms tensor if task == 'test': print "%s - Creating spectrograms tensors." % time.ctime() Xmix_tensor = np.zeros((n_mix, n_freq, min_frames,n_channels), dtype=np.complex64) pressure_tensor = np.zeros([n_mix,n_sources,n_freq,min_frames,3], np.complex64) # define tensors and lists theta_tensor = np.zeros([n_mix,n_freq,min_frames]) Gx_tensor = np.zeros([n_mix,n_freq,min_frames]) Gy_tensor = np.zeros([n_mix,n_freq,min_frames]) IRM_tensor = np.zeros([n_mix,n_sources,n_freq,min_frames]) #IBM_tensor = np.zeros([n_mix,n_sources,n_freq,min_frames]) # running mean and std runstats = RunningStats(n_freq, np.float64) stats = [] ### INPUT DATA GENERATION # create mixtures print "%s - Creating data." % (time.ctime()) [vel_tensor, p0_tensor, raw_label_tensor] = prepare_data.prepare_data(n_sources_string,task,Train_Room ) if task == 'train': del raw_label_tensor print "%s - Data created!." % (time.ctime()) print "%s - Filling data tensors." % (time.ctime()) for mix_index in range(0,p0_tensor.shape[0]): #print raw_label_tensor[mix_index,:] # pressure paths p0 = p0_tensor[mix_index,:,:] vel = vel_tensor[mix_index,:,:,:] # create mixtures p0_mix = np.sum(p0,axis=0) vel_mix = np.sum(vel,axis=0) # pressure tensors X = fill_spectrogram_tensor(p0_mix, vel_mix[:,0], vel_mix[:,1]) del p0_mix, vel_mix # angles distribution theta_deg = angles_dist(X[:,:,0], X[:,:,1], X[:,:,2]) # initialize tensor for all sources X_all = np.zeros([n_sources,n_freq,min_frames,2],np.complex64) for source_index in range(0,n_sources): # sources pressures X_source = fill_spectrogram_tensor(p0[source_index,:], vel[source_index,:,0], vel[source_index,:,1]) X_all[source_index,:,:,:] = X_source[:,:,1:3] if task == 'test': pressure_tensor[mix_index,source_index,:,:,:] = X_source ''' plt.imshow(np.abs(pressure_tensor[mix_index,0,:,:,1]), cmap=plt.cm.spectral, aspect='auto', interpolation='none') clb = plt.colorbar() plt.gca().invert_yaxis() plt.savefig(os.path.join(figures_path, 'pressure_tensor' + str(mix_index) + '_' + str(int(raw_label_tensor[mix_index,0])) )) plt.clf() plt.show() plt.imshow(np.abs(pressure_tensor[mix_index,1,:,:,1]), cmap=plt.cm.spectral, aspect='auto', interpolation='none') clb = plt.colorbar() plt.gca().invert_yaxis() plt.savefig(os.path.join(figures_path, 'pressure_tensor' + str(mix_index) + '_' + str(int(raw_label_tensor[mix_index,1])) )) plt.clf() plt.show() plt.imshow(np.abs(pressure_tensor[mix_index,2,:,:,1]), cmap=plt.cm.spectral, aspect='auto', interpolation='none') clb = plt.colorbar() plt.gca().invert_yaxis() plt.savefig(os.path.join(figures_path, 'pressure_tensor' + str(mix_index) + '_' + str(int(raw_label_tensor[mix_index,2])) )) plt.clf() plt.show() ''' # define ideal masks for source_index in range(0,n_sources): # define IRMs (Ideal Ratio Masks) IRM_tensor[mix_index,source_index,:,:] = np.mean(evaluate_IRM(X_all, source_index),axis=2) plt.imshow(IRM_tensor[mix_index,source_index,:,:], cmap=plt.cm.spectral, aspect='auto', interpolation='none') clb = plt.colorbar() plt.tick_params(labelsize=18) plt.xlabel(r'$\omega$', fontsize=25) plt.ylabel('f', fontsize=25) clb.ax.tick_params(labelsize=18) plt.title('IRM source ' + str(source_index), fontsize=30) plt.gca().invert_yaxis() plt.savefig(os.path.join(figures_path, 'IRM' + str(mix_index) + '_' + str(source_index))) plt.clf() plt.show() # define IBMs (Ideal Binary Masks) #IBM_tensor[mix_index,source_index,:,:] = np.mean(evaluate_IBM(X_all, source_index),axis=2) # fill tensor theta_tensor[mix_index,:,:] = theta_deg print mix_index*10 Gx_tensor[mix_index,:,:] = mag(X[:,:,1]) Gy_tensor[mix_index,:,:] = mag(X[:,:,2]) fig,ax = plt.subplots(1,figsize=(14,14)) n, bins, patches = plt.hist(theta_deg.flatten(), bins=72, facecolor='blue', alpha=0.5, edgecolor='black', linewidth=1.2) plt.tick_params(labelsize=28) plt.xlabel(r'$\theta$ (degrees)', fontsize=36) plt.ylabel(r'$\theta$ count', fontsize=36) plt.title('Distribution of angles', fontsize=42) plt.show() plt.savefig(os.path.join(figures_path, 'theta_deg' + str(mix_index)),bbox_inches='tight') plt.clf() plt.imshow(theta_tensor[mix_index,:,:], cmap=plt.cm.spectral, aspect='auto', interpolation='none') clb = plt.colorbar() plt.tick_params(labelsize=28) plt.xlabel('Time', fontsize=36) plt.ylabel('Frequency', fontsize=36) clb.ax.tick_params(labelsize=28) plt.title('Distribution of angles', fontsize=42) plt.show() plt.savefig(os.path.join(figures_path, 'theta_tensor' + str(mix_index) ), bbox_inches='tight') plt.clf() plt.show() plt.imshow(Gx_tensor[mix_index,:,:], cmap=plt.cm.spectral, aspect='auto', interpolation='none') clb = plt.colorbar() plt.tick_params(labelsize=28) plt.xlabel('Time', fontsize=36) plt.ylabel('Frequency', fontsize=36) clb.ax.tick_params(labelsize=28) plt.title('Spectrogram: ' + '$G_x$', fontsize=42) plt.gca().invert_yaxis() plt.savefig(os.path.join(figures_path, 'Gx' + str(mix_index) ),bbox_inches='tight' ) plt.clf() plt.show() plt.imshow(Gy_tensor[mix_index,:,:], cmap=plt.cm.spectral, aspect='auto', interpolation='none') clb = plt.colorbar() plt.tick_params(labelsize=28) plt.xlabel('Time', fontsize=36) plt.ylabel('Frequency', fontsize=36) clb.ax.tick_params(labelsize=28) plt.title('Spectrogram: ' + '$G_y$', fontsize=42) plt.gca().invert_yaxis() plt.savefig(os.path.join(figures_path, 'Gy' + str(mix_index) ),bbox_inches='tight') plt.clf() plt.show() # update stats runstats.update(np.hstack([theta_tensor[mix_index,:,:],Gx_tensor[mix_index,:,:],Gy_tensor[mix_index,:,:] ]).T ) if task == 'test': # fill original mixtures spectrograms Xmix_tensor[mix_index,:,:,0] = X[:,:,1] #left Xmix_tensor[mix_index,:,:,1] = X[:,:,2] #right del X, X_all, X_source, theta_deg ### FINAL EVALUATION # cut empty elements in tensor theta_tensor = theta_tensor[0:p0_tensor.shape[0],:,:] IRM_tensor = IRM_tensor[0:p0_tensor.shape[0],:,:,:] #IBM_tensor = IBM_tensor[0:p0_tensor.shape[0],:,:,:] print IRM_tensor.shape if 'MV' in features_name: Gx_tensor = Gx_tensor[0:p0_tensor.shape[0],:,:] Gy_tensor = Gy_tensor[0:p0_tensor.shape[0],:,:] # cut spectrograms if task == 'test': raw_label_tensor = raw_label_tensor[0:p0_tensor.shape[0],:] Xmix_tensor = Xmix_tensor[0:p0_tensor.shape[0],:,:,:] pressure_tensor = pressure_tensor[0:p0_tensor.shape[0],:,:,:,:] # fill features tensor features_all = np.zeros([theta_tensor.shape[0],theta_tensor.shape[1],theta_tensor.shape[2],n_features]) features_all[:,:,:,0] = theta_tensor features_all[:,:,:,1] = Gx_tensor features_all[:,:,:,2] = Gy_tensor del theta_tensor, Gx_tensor, Gy_tensor # use mean and variance if task == 'train': # fill stats list print "%s - Evaluating mean and std for normalization." % (time.ctime()) mean = np.zeros([n_freq]) std = np.zeros([n_freq]) mean = runstats.stats['mean'] std = np.sqrt(runstats.stats['var']) print mean print std stats.append([mean,std]) #print(np.mean(np.hstack([theta_tensor[mix_index,:,:],Gx_tensor[mix_index,:,:],Gy_tensor[mix_index,:,:] ]).T, axis=0)) #print(np.sqrt(np.var(np.hstack([theta_tensor[mix_index,:,:],Gx_tensor[mix_index,:,:],Gy_tensor[mix_index,:,:] ]).T, axis=0, ddof=1))) elif task == 'test': # use train mean and std with h5py.File(save_train_data_path, 'r') as hf: stats = hf.get('stats') stats = np.array(stats) mean = stats[0][0] std = stats[0][1] # normalization print "%s - Normalizing." % (time.ctime()) for f in range(0,features_all.shape[1]): features_all[:,f,:,:] = (features_all[:,f,:,:]-mean[f])/std[f] # save data print "%s - Saving %s data tensor." % (time.ctime(), task) if task == 'train': with h5py.File(save_train_data_path, 'w') as hf: hf.create_dataset('data', data=features_all) hf.create_dataset('IRM', data=IRM_tensor) #hf.create_dataset('IBM', data=IBM_tensor) hf.create_dataset('stats', data=stats) elif task == 'test': with h5py.File(save_test_data_path, 'w') as hf: hf.create_dataset('data', data=features_all) hf.create_dataset('spectrograms_mixture', data=Xmix_tensor) hf.create_dataset('spectrograms_original', data=pressure_tensor) hf.create_dataset('labels', data=raw_label_tensor) print "%s - Done!." % (time.ctime())