def video(): cap = cv2.VideoCapture(input_file_path) while cap.isOpened(): _, frame = cap.read() resized = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) resized = cv2.resize(resized, (HEIGHT, WIDTH)) resized = (2.0 / 255.0) * resized - 1.0 resized = resized.transpose((2, 0, 1)) # resized = cv2.resize(frame, (WIDTH, HEIGHT)) # resized = resized.astype(np.float32) # resized = np.rollaxis(resized, 2, 0) h_input, d_input, h_output, d_output, stream = inf.allocate_buffers( engine, 1, trt.float32) out = inf.do_inference(engine, resized, h_input, d_input, h_output, d_output, stream, 1, HEIGHT, WIDTH) #pr = out.reshape(( 360, 640 , 256 ) ).argmax( axis=2 ) #pr = pr.astype(np.uint8) # cv2.imshow('frame',frame) print(out) if cv2.waitKey(1) & 0xFF == ord('q'): break
def main(args): input_file_path = args.input_image serialized_plan_fp32 = args.engine_file HEIGHT = args.height WIDTH = args.width image = np.asarray(Image.open(input_file_path)) img = rescale_image(image, (HEIGHT, WIDTH), order=1) im = np.array(img, dtype=np.float32, order='C') im = im.transpose((2, 0, 1)) im = sub_mean_chw(im) engine = eng.load_engine(trt_runtime, serialized_plan_fp32) h_input, d_input, h_output, d_output, stream = inf.allocate_buffers( engine, 1, trt.float32) out = inf.do_inference(engine, im, h_input, d_input, h_output, d_output, stream, 1, HEIGHT, WIDTH) out = color_map(out) colorImage_trt = Image.fromarray(out.astype(np.uint8)) colorImage_trt.save('trt_output.png') semantic_model = keras.models.load_model(args.hdf5_file) out_keras = semantic_model.predict(im.reshape(-1, 3, HEIGHT, WIDTH)) out_keras = color_map(out_keras) colorImage_k = Image.fromarray(out_keras.astype(np.uint8)) colorImage_k.save('keras_output.png')
def main(args): serialized_plan_fp32 = args.engine_file print("[INFO] Loading Engine...") engine = eng.load_engine(trt_runtime, serialized_plan_fp32) print("[INFO] Allocate Buffer...") print("[INFO] Apply Inference...") disp_tensors_pred = [] disp_tensors_gt = [] for i in range(config.NUM_VAL//config.batch_size): (force_tensor,disp_tensor_gt) = next(gen) h_input, d_input, h_output, d_output, stream = inf.allocate_buffers(engine, config.batch_size, trt.float16)#batch_size start = time.time() TensorRT_pred = inf.do_inference(engine, force_tensor, h_input, d_input, h_output, d_output, stream, config.batch_size) #batch_size end = time.time() print("inference time including buffer copy", end-start) print("TensorRT_pred",TensorRT_pred.shape) disp_tensors_pred.append(TensorRT_pred) disp_tensors_gt.append(disp_tensor_gt) #break disp_tensors_pred = np.asarray(disp_tensors_pred).reshape(-1,config.data_shape[0],config.data_shape[1],config.data_shape[2],config.data_shape[3]) disp_tensors_gt = np.asarray(disp_tensors_gt).reshape(-1,config.data_shape[0],config.data_shape[1],config.data_shape[2],config.data_shape[3]) print(disp_tensors_pred.shape) Visualize.gen_video( disp_tensors_pred, disp_tensors_gt, config) #visualize the results
def route_inference(): data = parse_json(request.data) features, facts, insights, relationships, descriptions = inference.do_inference( data) facts = recursive_numpy_array_removal(facts) output_string = json.dumps({ 'features': features, 'facts': facts, 'insights': insights, 'relationships': relationships, 'feature_descriptions': descriptions }) return output_string
def classify(self, original_image, top_k=1): start_time = time.time() image = cv2.resize(original_image, (self.height, self.width)) image = self.preprocess(image) results = inf.do_inference(self.context, self.engine, image, self.h_input, self.d_input, self.h_output, self.d_output, self.stream, 1, self.height, self.width) elapsed_ms = (time.time() - start_time) * 1000 FPS = 1000 / elapsed_ms idx = np.argmax(results) prob = results[idx] text = 'Class: %s Confidence: %.2f FPS: %.1f' % (self.labels[idx], prob, FPS) cv2.putText(original_image, text, (10, 20), cv2.FONT_HERSHEY_SIMPLEX, self.width / 400, (0, 0, 255), 2, True) return original_image
def sub_mean_chw(data): data = data.transpose((1, 2, 0)) # CHW -> HWC data -= np.array(MEAN) # Broadcast subtract data = data.transpose((2, 0, 1)) # HWC -> CHW return data def rescale_image(image, output_shape, order=1): image = skimage.transform.resize(image, output_shape, order=order, preserve_range=True, mode='reflect') return image import engine as eng import inference as inf import tensorrt as trt input_file_path = "data/yolact_example_0.png" serialized_plan_fp32 = "my_engine.trt" HEIGHT = 550 WIDTH = 550 import cv2 img = cv2.imread(input_file_path) print(img.shape) dim = (WIDTH, HEIGHT) img = cv2.resize(img, dim, interpolation = cv2.INTER_AREA) print(img.shape) engine = eng.load_engine(trt_runtime, serialized_plan_fp32) h_input, d_input, h_output, d_output, stream = inf.allocate_buffers(engine, 1, trt.float32) out = inf.do_inference(engine, img, h_input, d_input, h_output, d_output, stream, 1, HEIGHT, WIDTH)
def route_inference(): data = parse_json(request.data) features, facts, insights, relationships, descriptions = inference.do_inference(data) facts = recursive_numpy_array_removal(facts) output_string = json.dumps({'features':features,'facts':facts,'insights':insights,'relationships':relationships,'feature_descriptions':descriptions}) return output_string
image = np.asarray(Image.open(input_file_path)) img = rescale_image(image, (512, 1024), order=1) im = np.array(img, dtype=np.float32, order='C') im = im.transpose((2, 0, 1)) im = sub_mean_chw(im) engine = eng.load_engine(trt_runtime, serialized_plan_fp32) h_input, d_input, h_output, d_output, stream = inf.allocate_buffers( engine, 1, trt.float32) out = inf.do_inference(engine, im, h_input, d_input, h_output, d_output, stream, 1, HEIGHT, WIDTH, output_image=True) out = color_map(out) colorImage_trt = Image.fromarray(out.astype(np.uint8)) colorImage_trt.save("trt_output.png") semantic_model = keras.models.load_model( '/content/drive/My Drive/Colab Notebooks/deployment/semantic_segmentation.hdf5' ) out_keras = semantic_model.predict(im.reshape(-1, 3, HEIGHT, WIDTH))
faces_embeddings = normalize_vectors(faces_embeddings) detector = MTCNN() face_array = extract_face_from_image(input_file_path, detector) face_pixels = face_array # scale pixel values face_pixels = face_pixels.astype('float32') # standardize pixel values across channels (global) mean, std = face_pixels.mean(), face_pixels.std() face_pixels = (face_pixels - mean) / std # transform face into one sample samples = np.expand_dims(face_pixels, axis=0) # make prediction to get embedding engine = eng.load_engine(trt_runtime, engine_path) h_input, d_input, h_output, d_output, stream = inf.allocate_buffers( engine, 1, trt.float32) yhat = inf.do_inference(engine, samples, h_input, d_input, h_output, d_output, stream, 1, HEIGHT, WIDTH) print(yhat.shape) face_to_predict_embedding = normalize_vectors(yhat) result = predict_using_min_l2_distance(faces_embeddings, labels, face_to_predict_embedding) print('Predicted name: %s' % (str(result).title()))