Exemplo n.º 1
0
 def load_branch(self):
     opt = self.opt
     if opt.rgb_model != '':
         print('create rgb model')
         self.rgb_model_backbone, self.rgb_model_branch = create_inference_model(
             opt.arch,
             opt.branch_info,
             opt.head_conv,
             opt.K,
             flip_test=opt.flip_test)
         self.rgb_model_backbone, self.rgb_model_branch = load_inference_model(
             self.rgb_model_backbone, self.rgb_model_branch, opt.rgb_model)
         self.rgb_model_branch = DataParallel(
             self.rgb_model_branch,
             device_ids=opt.gpus,
             chunk_sizes=opt.chunk_sizes).to(opt.device)
         self.rgb_model_branch.eval()
     if opt.flow_model != '':
         print('create flow model')
         self.flow_model_backbone, self.flow_model_branch = create_inference_model(
             opt.arch,
             opt.branch_info,
             opt.head_conv,
             opt.K,
             flip_test=opt.flip_test)
         self.flow_model_backbone = convert2flow(opt.ninput,
                                                 self.flow_model_backbone)
         self.flow_model_backbone, self.flow_model_branch = load_inference_model(
             self.flow_model_backbone, self.flow_model_branch,
             opt.flow_model)
         self.flow_model_branch = DataParallel(
             self.flow_model_branch,
             device_ids=opt.gpus,
             chunk_sizes=opt.chunk_sizes).to(opt.device)
         self.flow_model_branch.eval()
Exemplo n.º 2
0
    def __init__(self, opt):
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')

        self.rgb_model_backbone, self.rgb_model_branch = None, None
        self.flow_model_backbone, self.flow_model_branch = None, None
        if opt.rgb_model != '':
            print('create rgb model')
            self.rgb_model_backbone, self.rgb_model_branch = create_inference_model(
                opt.arch,
                opt.branch_info,
                opt.head_conv,
                opt.K,
                flip_test=opt.flip_test)
            self.rgb_model_backbone, self.rgb_model_branch = load_inference_model(
                self.rgb_model_backbone, self.rgb_model_branch, opt.rgb_model)
            self.rgb_model_backbone = DataParallel(self.rgb_model_backbone,
                                                   device_ids=[opt.gpus[0]],
                                                   chunk_sizes=[1]).to(
                                                       opt.device)
            self.rgb_model_branch = DataParallel(self.rgb_model_branch,
                                                 device_ids=[opt.gpus[0]],
                                                 chunk_sizes=[1
                                                              ]).to(opt.device)
            self.rgb_model_backbone.eval()
            self.rgb_model_branch.eval()
        if opt.flow_model != '':
            print('create flow model')
            self.flow_model_backbone, self.flow_model_branch = create_inference_model(
                opt.arch,
                opt.branch_info,
                opt.head_conv,
                opt.K,
                flip_test=opt.flip_test)
            self.flow_model_backbone = convert2flow(opt.ninput,
                                                    self.flow_model_backbone)
            self.flow_model_backbone, self.flow_model_branch = load_inference_model(
                self.flow_model_backbone, self.flow_model_branch,
                opt.flow_model)
            self.flow_model_backbone = DataParallel(self.flow_model_backbone,
                                                    device_ids=[opt.gpus[0]],
                                                    chunk_sizes=[1]).to(
                                                        opt.device)
            self.flow_model_branch = DataParallel(self.flow_model_branch,
                                                  device_ids=[opt.gpus[0]],
                                                  chunk_sizes=[1]).to(
                                                      opt.device)
            self.flow_model_backbone.eval()
            self.flow_model_branch.eval()

        self.num_classes = opt.num_classes
        self.opt = opt

        self.rgb_buffer = []
        self.flow_buffer = []
        self.rgb_buffer_flip = []
        self.flow_buffer_flip = []
Exemplo n.º 3
0
    def __init__(self, opt):
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            assert 'cpu is not supported!'

        self.rgb_model_backbone, self.rgb_model_deconv, self.rgb_model_branch = None, None, None
        self.flow_model_backbone, self.flow_model_branch = None, None
        if opt.rgb_model != '':
            self.rgb_model_backbone, self.rgb_model_deconv, self.rgb_model_branch = create_inference_model(
                opt.arch,
                opt.branch_info,
                opt.head_conv,
                opt.K,
                flip_test=opt.flip_test)
            print('create rgb model', flush=True)
            self.rgb_model_backbone, self.rgb_model_deconv, self.rgb_model_branch = load_inference_model(
                self.rgb_model_backbone, self.rgb_model_deconv,
                self.rgb_model_branch, opt.rgb_model)
            print('load rgb model', flush=True)
            self.rgb_model_backbone = self.rgb_model_backbone.to(opt.device)
            self.rgb_model_deconv = self.rgb_model_deconv.to(opt.device)
            self.rgb_model_branch = self.rgb_model_branch.to(opt.device)
            print('put rgb model to gpu', flush=True)
            self.rgb_model_backbone.eval()
            self.rgb_model_deconv.eval()
            self.rgb_model_branch.eval()
        if opt.flow_model != '':
            self.flow_model_backbone, self.flow_model_branch = create_inference_model(
                opt.arch,
                opt.branch_info,
                opt.head_conv,
                opt.K,
                flip_test=opt.flip_test)
            self.flow_model_backbone = convert2flow(opt.ninput,
                                                    self.flow_model_backbone)
            print('create flow model', flush=True)
            self.flow_model_backbone, self.flow_model_branch = load_inference_model(
                self.flow_model_backbone, self.flow_model_branch,
                opt.flow_model)
            print('load flow model', flush=True)
            self.flow_model_backbone = self.flow_model_backbone.to(opt.device)
            self.flow_model_branch = self.flow_model_branch.to(opt.device)
            print('put flow model to gpu', flush=True)
            self.flow_model_backbone.eval()
            self.flow_model_branch.eval()

        self.num_classes = opt.num_classes
        self.opt = opt

        self.rgb_buffer = []
        self.flow_buffer = []
        self.rgb_buffer_flip = []
        self.flow_buffer_flip = []
Exemplo n.º 4
0
    def __init__(self, opt):
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            assert 'cpu is not supported!'

        self.rgb_model_backbone, self.rgb_model_deconv, self.rgb_model_branch = None, None, None
        self.flow_model_backbone, self.flow_model_branch = None, None
        self.pa_model_backbone, self.pa_model_deconv, self.pa_model_branch = None, None, None
        if opt.rgb_model != '':
            self.rgb_model_backbone, self.rgb_model_deconv, self.rgb_model_branch = create_inference_model(
                opt.arch,
                opt.branch_info,
                opt.head_conv,
                opt.K,
                flip_test=opt.flip_test)
            print('create rgb model', flush=True)
            self.rgb_model_backbone, self.rgb_model_deconv, self.rgb_model_branch = load_inference_model(
                self.rgb_model_backbone, self.rgb_model_deconv,
                self.rgb_model_branch, opt.rgb_model)
            print('load rgb model', flush=True)
            self.rgb_model_backbone = self.rgb_model_backbone.to(opt.device)
            self.rgb_model_deconv = self.rgb_model_deconv.to(opt.device)
            self.rgb_model_branch = self.rgb_model_branch.to(opt.device)
            print('put rgb model to gpu', flush=True)
            self.rgb_model_backbone.eval()
            self.rgb_model_deconv.eval()
            self.rgb_model_branch.eval()

        elif opt.flow_model != '':
            self.flow_model_backbone, self.flow_model_branch = create_inference_model(
                opt.arch,
                opt.branch_info,
                opt.head_conv,
                opt.K,
                flip_test=opt.flip_test)
            self.flow_model_backbone = convert2flow(opt.ninput,
                                                    self.flow_model_backbone)
            print('create flow model', flush=True)
            self.flow_model_backbone, self.flow_model_branch = load_inference_model(
                self.flow_model_backbone, self.flow_model_branch,
                opt.flow_model)
            print('load flow model', flush=True)
            self.flow_model_backbone = self.flow_model_backbone.to(opt.device)
            self.flow_model_branch = self.flow_model_branch.to(opt.device)
            print('put flow model to gpu', flush=True)
            self.flow_model_backbone.eval()
            self.flow_model_branch.eval()

        elif opt.pa_model != '':
            self.pa_model_PA, self.pa_model_backbone, self.pa_model_deconv, self.pa_model_branch = create_inference_model(
                opt.arch,
                opt.branch_info,
                opt.head_conv,
                opt.K,
                flip_test=opt.flip_test,
                is_pa=True,
                pa_fuse_mode=opt.pa_fuse_mode,
                rgb_w3=opt.rgb_w3)
            print('create PA model', flush=True)
            '''
            if opt.pa_fuse_mode == 'PAN':
                self.pa_model = convert2PAN(opt.ninput, self.pa_model_backbone, conv_idx=1)
            
            elif opt.pa_fuse_mode == 'TDN':
                self.pa_model = convert2TDN(opt.ninput, self.pa_model_backbone, conv_idx=2) # idx 1 or 2? does not matter here as trained weight would be loaded here?
            
            # Single PAN stream
            else:
                self.pa_model = convert2PAN(opt.ninput, self.pa_model, conv_idx=1)
            '''
            self.pa_model_PA, self.pa_model_backbone, self.pa_model_deconv, self.pa_model_branch = load_inference_model(
                self.pa_model_PA, self.pa_model_backbone, self.pa_model_deconv,
                self.pa_model_branch, opt.pa_model)
            print('load PA model', flush=True)

            self.pa_model_PA = self.pa_model_PA.to(opt.device)
            self.pa_model_backbone = self.pa_model_backbone.to(opt.device)
            self.pa_model_deconv = self.pa_model_deconv.to(opt.device)
            self.pa_model_branch = self.pa_model_branch.to(opt.device)

            print('put PA model to gpu', flush=True)
            self.pa_model_PA.eval()
            self.pa_model_backbone.eval()
            self.pa_model_deconv.eval()
            self.pa_model_branch.eval()

        self.num_classes = opt.num_classes
        self.opt = opt

        self.rgb_buffer = []
        self.flow_buffer = []
        self.rgb_buffer_flip = []
        self.flow_buffer_flip = []

        self.pa_buffer = []
        self.pa_buffer_flip = []

        # added: for speed measurement
        self.total_time = 0