img = img[:, :, ::-1] inp = tf.convert_to_tensor(img[None], tf.float32) inp = tf.concat([inp, inp], 0) outs = model(inp, training=False) num = outs["valid_detections"].numpy()[1] boxes = outs["nmsed_boxes"].numpy()[1] scores = outs["nmsed_scores"].numpy()[1] classes = outs["nmsed_classes"].numpy()[1] for i in range(num): box = boxes[i] # if scores[i] < 0.5: # continue # box = boxes[i] * np.array([height, width, height, width]) c = classes[i] + 1 print(box, c) img = draw(img, box, c, scores[i], coco_id_mapping, random_color(int(c))) cv2.imshow("img", img) cv2.waitKey(0) cv2.destroyAllWindows() tf.saved_model.save( model.detector, "/home/bail/Data/data2/pretrained_weights/%s" % torch_weight_name) model.save_weights("/home/bail/Data/data2/pretrained_weights/%s.h5" % torch_weight_name)
_get_weights_from_pretrained(model.detector, "/home/bail/Data/data2/pretrained_weights/%s.pth" % torch_weight_name) img = cv2.imread("/home/bail/Workspace/TRTNets/images/bus.jpg") img, _, _, _ = preprocess(img, 1024) img = img[:, :, ::-1] inp = tf.convert_to_tensor(img[None], tf.float32) inp = tf.concat([inp, inp], 0) outs = model(inp, training=False) num = outs["valid_detections"].numpy()[1] boxes = outs["nmsed_boxes"].numpy()[1] scores = outs["nmsed_scores"].numpy()[1] classes = outs["nmsed_classes"].numpy()[1] for i in range(num): box = boxes[i] # if scores[i] < 0.5: # continue # box = boxes[i] * np.array([height, width, height, width]) c = classes[i] + 1 print(box, c) img = draw(img, box, c, scores[i], coco_id_mapping, random_color(int(c))) cv2.imshow("img", img) cv2.waitKey(0) cv2.destroyAllWindows() tf.saved_model.save(model.detector, "/home/bail/Data/data2/pretrained_weights/%s" % torch_weight_name) model.save_weights("/home/bail/Data/data2/pretrained_weights/%s.h5" % torch_weight_name)