Ejemplo n.º 1
0
 def __init__(self):
     self.trt_logger = trt.Logger(trt.Logger.WARNING)
     if os.path.exists(args.trt_save_path):
         # If a serialized engine exists, load it instead of building a new one.
         print("Reading engine from file {}".format(args.trt_save_path))
         with open(args.trt_save_path,
                   "rb") as f, trt.Runtime(self.trt_logger) as runtime:
             engine = runtime.deserialize_cuda_engine(f.read())
     else:
         engine = gen_trt_engine()
     self.engine = Engine(engine)
     self.inputs_shape = self.engine.inputs[0].shape
     print('engine input shape', self.inputs_shape)
Ejemplo n.º 2
0
 def __init__(self):
     self.trt_logger = trt.Logger(trt.Logger.WARNING)
     opt = Ops()
     opt.n_classes = 2
     opt.batch_size = 20
     opt.sample_size = (112, 112)
     opt.n_input_channels = 3
     opt.mean, opt.std = get_mean_std(opt.value_scale,
                                      dataset=opt.mean_dataset)
     self.spatial_transform = get_spatial_transform(opt)
     engine = ModelTensorRT(self.trt_logger, resnet_model_url,
                            opt).generate_engine()
     self.engine = Engine(engine)
     self.opt = opt
     self.inputs_shape = self.engine.inputs[0].shape
     print('engine input shape', self.inputs_shape)
Ejemplo n.º 3
0
    def __init__(self):
        self.trt_logger = trt.Logger(trt.Logger.WARNING)
        resnet_model_url = '/DATA/disk1/libing/online/3D-ResNets-PyTorch/data/results_size_112_56/save_30.pth'

        opt = Ops()
        opt.n_classes = 13
        opt.sample_size = (112, 56)
        opt.batch_size = 1
        opt.inference_crop = 'resize'
        opt.n_input_channels = 3
        opt.mean, opt.std = get_mean_std(opt.value_scale,
                                         dataset=opt.mean_dataset)
        self.spatial_transform = get_spatial_transform(opt)
        engine = ModelTensorRT(self.trt_logger, resnet_model_url,
                               opt).generate_engine()
        self.engine = Engine(engine)
        self.opt = opt
        self.inputs_shape = self.engine.inputs[0].shape
        print('engine input shape', self.inputs_shape)
Ejemplo n.º 4
0
class ModelTensorRT:
    def __init__(self):
        self.trt_logger = trt.Logger(trt.Logger.WARNING)
        if os.path.exists(args.trt_save_path):
            # If a serialized engine exists, load it instead of building a new one.
            print("Reading engine from file {}".format(args.trt_save_path))
            with open(args.trt_save_path,
                      "rb") as f, trt.Runtime(self.trt_logger) as runtime:
                engine = runtime.deserialize_cuda_engine(f.read())
        else:
            engine = gen_trt_engine()
        self.engine = Engine(engine)
        self.inputs_shape = self.engine.inputs[0].shape
        print('engine input shape', self.inputs_shape)

    def less_predict(self, inputs):
        print('inputs batch less than engine inputs')
        inp_batch = inputs.shape[0]
        inputs = np.vstack([
            inputs,
            np.zeros(
                (self.inputs_shape[0] - inp_batch, *self.inputs_shape[1:]),
                dtype=np.float16)
        ])
        outputs = self.engine.run([inputs])
        outputs0 = outputs[0][:inp_batch, :]
        outputs1 = outputs[1][:inp_batch, :]
        return outputs0, outputs1

    def forward(self, img_path):
        try:
            img = cv2.imread(img_path)
            transforms_train, transforms_val = get_transforms(
                config["image_size"])
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            res = transforms_val(image=img)
            img1 = res['image'].astype(np.float32)
            img1 = img1.transpose(2, 0, 1)
            inputs = img1
            inputs = np.expand_dims(inputs, axis=0)
            inputs = np.array(inputs, copy=True, dtype=np.float16)
            inp_batch = inputs.shape[0]
            if inp_batch < self.inputs_shape[0]:
                outputs0, outputs1 = self.less_predict(inputs)
            elif inp_batch == self.inputs_shape[0]:
                print('batch size equal ')
                outputs = self.engine.run([inputs])
                outputs0 = outputs[0]
                outputs1 = outputs[1]
            else:
                print('inputs batch greater than engine inputs')
                outputs0 = []
                outputs1 = []
                ixs = list(range(0, inp_batch,
                                 self.inputs_shape[0])) + [inp_batch]
                for i in ixs:
                    if i != 0:
                        inp = inputs[li:i, :]
                        if inp.shape[0] == self.inputs_shape[0]:
                            outs = self.engine.run([inp])
                            outs0, outs1 = outs[0], outs[1]
                        else:
                            outs0, outs1 = self.less_predict(inp)
                        t0 = outs0.copy()
                        outputs0.append(t0)
                        t1 = outs1.copy()
                        outputs0.append(t1)
                    li = i
                outputs0 = np.vstack(outputs0)
                outputs1 = np.vstack(outputs1)
            outputs0 = torch.tensor(outputs0)
            outputs1 = torch.tensor(outputs1)
            print("outputs0:", outputs0)
            print("outputs1:", outputs1)
            probs_color = F.softmax(outputs0, dim=1)
            probs_color = probs_color.cpu().detach().numpy()
            ouputs_color = probs_color.argmax(1)
            probs_color = [
                probs_color[i][ouputs_color[i]]
                for i in range(len(ouputs_color))
            ]
            probs_action = F.softmax(outputs1, dim=1)
            probs_action = probs_action.cpu().detach().numpy()
            ouputs_action = probs_action.argmax(1)
            probs_action = [
                probs_action[i][ouputs_action[i]]
                for i in range(len(ouputs_action))
            ]
            return ouputs_color, probs_color, ouputs_action, probs_action
        except Exception as e:
            raise e
Ejemplo n.º 5
0
    image = Image.open(img_path)
    image = np.array(image)
    image = cv2.resize(image, (H, W))
    # image = (image / 255.0).astype(np.float32)
    image = Image.fromarray(image)
    image = test_transforms(image)
    image = image.unsqueeze(0)  ##(1,H,W,C)
    return image


cfg = Config()

G_LOGGER = trt.infer.ConsoleLogger(trt.infer.LogSeverity.INFO)
trt_engine = trt.utils.load_engine(G_LOGGER, 'engine/test_engine.engine')
CROWD_ENGINE = Engine(trt_engine)

# img_path = 'head_data/001/1/0_3.jpg'
#
# ims = process_image(img_path)
# np_ims = np.asarray(ims.data.cpu())
#
# start = time.time()
# result = CROWD_ENGINE.run([np_ims])
# print(result)
# print(time.time()-start)


def get_feature(img_path):  # bbox shape must be []
    ims = process_image(img_path)
    np_ims = np.asarray(ims.data.cpu())
Ejemplo n.º 6
0
class Res3DModelTensorRT:
    def __init__(self):
        self.trt_logger = trt.Logger(trt.Logger.WARNING)
        resnet_model_url = '/DATA/disk1/libing/online/3D-ResNets-PyTorch/data/results_size_112_56/save_30.pth'

        opt = Ops()
        opt.n_classes = 13
        opt.sample_size = (112, 56)
        opt.batch_size = 1
        opt.inference_crop = 'resize'
        opt.n_input_channels = 3
        opt.mean, opt.std = get_mean_std(opt.value_scale,
                                         dataset=opt.mean_dataset)
        self.spatial_transform = get_spatial_transform(opt)
        engine = ModelTensorRT(self.trt_logger, resnet_model_url,
                               opt).generate_engine()
        self.engine = Engine(engine)
        self.opt = opt
        self.inputs_shape = self.engine.inputs[0].shape
        print('engine input shape', self.inputs_shape)

    def less_predict(self, inputs):
        print('inputs batch less than engine inputs')
        inp_batch = inputs.shape[0]
        inputs = np.vstack([
            inputs,
            np.zeros(
                (self.inputs_shape[0] - inp_batch, *self.inputs_shape[1:]),
                dtype=np.float16)
        ])
        outputs = self.engine.run([inputs])[0]
        outputs = outputs[:inp_batch, :]
        return outputs

    def forward(self, clips, batch_flag=True):
        try:
            if batch_flag:
                clip = batch_preprocessing(clips, self.spatial_transform)
            else:
                clip = preprocessing(clips, self.spatial_transform)
            inputs = clip.cpu().numpy()
            inputs = np.array(inputs, copy=True, dtype=np.float16)
            inp_batch = inputs.shape[0]
            if inp_batch < self.inputs_shape[0]:
                outputs = self.less_predict(inputs)
            elif inp_batch == self.inputs_shape[0]:
                print('batch size equal ')
                outputs = self.engine.run([inputs])[0]
            else:
                print('inputs batch greater than engine inputs')
                outputs = []
                for i in range(0, inp_batch, self.inputs_shape[0]):
                    if i != 0:
                        inp = inputs[li:i, :]
                        if inp.shape[0] == self.inputs_shape[0]:
                            outs = self.engine.run([inp])[0]
                        else:
                            outs = self.less_predict(inp)
                        t = outs.copy()
                        outputs.append(t)
                    li = i
                outputs = np.vstack(outputs)
            # pickle.dump(outputs,open('./outpus.pkl','wb'))
            outputs = torch.tensor(outputs)
            outputs = F.softmax(outputs, dim=1).cpu()
            score, class_prediction = torch.max(outputs, 1)
            classes = [
                "ballet_foot_stretch", "negtive", "ballet_openfly",
                "ballet_waist", "basketball_bounce", "basketball_dribble",
                "basketball_shoot", "soccer_carry", "soccer_highfive",
                "soccer_shoot", "backhand_hit", "forehand_hit", "serve"
            ]

            return score.item(), classes[class_prediction]
        except Exception as e:
            raise e
Ejemplo n.º 7
0
class ResNetModelTensorRT:
    def __init__(self):
        self.trt_logger = trt.Logger(trt.Logger.WARNING)
        opt = Ops()
        opt.n_classes = 2
        opt.batch_size = 20
        opt.sample_size = (112, 112)
        opt.n_input_channels = 3
        opt.mean, opt.std = get_mean_std(opt.value_scale,
                                         dataset=opt.mean_dataset)
        self.spatial_transform = get_spatial_transform(opt)
        engine = ModelTensorRT(self.trt_logger, resnet_model_url,
                               opt).generate_engine()
        self.engine = Engine(engine)
        self.opt = opt
        self.inputs_shape = self.engine.inputs[0].shape
        print('engine input shape', self.inputs_shape)

    def less_predict(self, inputs):
        print('inputs batch less than engine inputs')
        inp_batch = inputs.shape[0]
        inputs = np.vstack([
            inputs,
            np.zeros(
                (self.inputs_shape[0] - inp_batch, *self.inputs_shape[1:]),
                dtype=np.float16)
        ])
        outputs = self.engine.run([inputs])[0]
        outputs = outputs[:inp_batch, :]
        return outputs

    def forward(self, clips, batch_flag=True):
        try:
            if batch_flag:
                clip = batch_preprocessing(clips, self.spatial_transform)
            else:
                clip = preprocessing(clips, self.spatial_transform)
            inputs = clip.cpu().numpy()
            inputs = np.array(inputs, copy=True, dtype=np.float16)
            inp_batch = inputs.shape[0]
            if inp_batch < self.inputs_shape[0]:
                outputs = self.less_predict(inputs)
            elif inp_batch == self.inputs_shape[0]:
                print('batch size equal ')
                outputs = self.engine.run([inputs])[0]
            else:
                print('inputs batch greater than engine inputs')
                outputs = []
                for i in range(0, inp_batch, self.inputs_shape[0]):
                    if i != 0:
                        inp = inputs[li:i, :]
                        if inp.shape[0] == self.inputs_shape[0]:
                            outs = self.engine.run([inp])[0]
                        else:
                            outs = self.less_predict(inp)
                        t = outs.copy()
                        outputs.append(t)
                    li = i
                outputs = np.vstack(outputs)
            pickle.dump(outputs, open('./outpus.pkl', 'wb'))
            outputs = torch.tensor(outputs)
            outputs = F.softmax(outputs, dim=1).cpu()
            score, class_prediction = torch.max(outputs, 1)
            classes = ['basketball_shot', 'negtive']
            return score, [classes[i] for i in class_prediction]
        except Exception as e:
            raise e