Ejemplo n.º 1
0
 def load_branch(self):
     opt = self.opt
     if opt.rgb_model != '':
         print('create rgb model')
         self.rgb_model_backbone, self.rgb_model_branch = create_inference_model(
             opt.arch,
             opt.branch_info,
             opt.head_conv,
             opt.K,
             flip_test=opt.flip_test)
         self.rgb_model_backbone, self.rgb_model_branch = load_inference_model(
             self.rgb_model_backbone, self.rgb_model_branch, opt.rgb_model)
         self.rgb_model_branch = DataParallel(
             self.rgb_model_branch,
             device_ids=opt.gpus,
             chunk_sizes=opt.chunk_sizes).to(opt.device)
         self.rgb_model_branch.eval()
     if opt.flow_model != '':
         print('create flow model')
         self.flow_model_backbone, self.flow_model_branch = create_inference_model(
             opt.arch,
             opt.branch_info,
             opt.head_conv,
             opt.K,
             flip_test=opt.flip_test)
         self.flow_model_backbone = convert2flow(opt.ninput,
                                                 self.flow_model_backbone)
         self.flow_model_backbone, self.flow_model_branch = load_inference_model(
             self.flow_model_backbone, self.flow_model_branch,
             opt.flow_model)
         self.flow_model_branch = DataParallel(
             self.flow_model_branch,
             device_ids=opt.gpus,
             chunk_sizes=opt.chunk_sizes).to(opt.device)
         self.flow_model_branch.eval()
Ejemplo n.º 2
0
    def __init__(self, opt):
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')

        self.rgb_model_backbone, self.rgb_model_branch = None, None
        self.flow_model_backbone, self.flow_model_branch = None, None
        if opt.rgb_model != '':
            print('create rgb model')
            self.rgb_model_backbone, self.rgb_model_branch = create_inference_model(
                opt.arch,
                opt.branch_info,
                opt.head_conv,
                opt.K,
                flip_test=opt.flip_test)
            self.rgb_model_backbone, self.rgb_model_branch = load_inference_model(
                self.rgb_model_backbone, self.rgb_model_branch, opt.rgb_model)
            self.rgb_model_backbone = DataParallel(self.rgb_model_backbone,
                                                   device_ids=[opt.gpus[0]],
                                                   chunk_sizes=[1]).to(
                                                       opt.device)
            self.rgb_model_branch = DataParallel(self.rgb_model_branch,
                                                 device_ids=[opt.gpus[0]],
                                                 chunk_sizes=[1
                                                              ]).to(opt.device)
            self.rgb_model_backbone.eval()
            self.rgb_model_branch.eval()
        if opt.flow_model != '':
            print('create flow model')
            self.flow_model_backbone, self.flow_model_branch = create_inference_model(
                opt.arch,
                opt.branch_info,
                opt.head_conv,
                opt.K,
                flip_test=opt.flip_test)
            self.flow_model_backbone = convert2flow(opt.ninput,
                                                    self.flow_model_backbone)
            self.flow_model_backbone, self.flow_model_branch = load_inference_model(
                self.flow_model_backbone, self.flow_model_branch,
                opt.flow_model)
            self.flow_model_backbone = DataParallel(self.flow_model_backbone,
                                                    device_ids=[opt.gpus[0]],
                                                    chunk_sizes=[1]).to(
                                                        opt.device)
            self.flow_model_branch = DataParallel(self.flow_model_branch,
                                                  device_ids=[opt.gpus[0]],
                                                  chunk_sizes=[1]).to(
                                                      opt.device)
            self.flow_model_backbone.eval()
            self.flow_model_branch.eval()

        self.num_classes = opt.num_classes
        self.opt = opt

        self.rgb_buffer = []
        self.flow_buffer = []
        self.rgb_buffer_flip = []
        self.flow_buffer_flip = []
Ejemplo n.º 3
0
    def __init__(self, opt):
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            assert 'cpu is not supported!'

        self.rgb_model_backbone, self.rgb_model_deconv, self.rgb_model_branch = None, None, None
        self.flow_model_backbone, self.flow_model_branch = None, None
        if opt.rgb_model != '':
            self.rgb_model_backbone, self.rgb_model_deconv, self.rgb_model_branch = create_inference_model(
                opt.arch,
                opt.branch_info,
                opt.head_conv,
                opt.K,
                flip_test=opt.flip_test)
            print('create rgb model', flush=True)
            self.rgb_model_backbone, self.rgb_model_deconv, self.rgb_model_branch = load_inference_model(
                self.rgb_model_backbone, self.rgb_model_deconv,
                self.rgb_model_branch, opt.rgb_model)
            print('load rgb model', flush=True)
            self.rgb_model_backbone = self.rgb_model_backbone.to(opt.device)
            self.rgb_model_deconv = self.rgb_model_deconv.to(opt.device)
            self.rgb_model_branch = self.rgb_model_branch.to(opt.device)
            print('put rgb model to gpu', flush=True)
            self.rgb_model_backbone.eval()
            self.rgb_model_deconv.eval()
            self.rgb_model_branch.eval()
        if opt.flow_model != '':
            self.flow_model_backbone, self.flow_model_branch = create_inference_model(
                opt.arch,
                opt.branch_info,
                opt.head_conv,
                opt.K,
                flip_test=opt.flip_test)
            self.flow_model_backbone = convert2flow(opt.ninput,
                                                    self.flow_model_backbone)
            print('create flow model', flush=True)
            self.flow_model_backbone, self.flow_model_branch = load_inference_model(
                self.flow_model_backbone, self.flow_model_branch,
                opt.flow_model)
            print('load flow model', flush=True)
            self.flow_model_backbone = self.flow_model_backbone.to(opt.device)
            self.flow_model_branch = self.flow_model_branch.to(opt.device)
            print('put flow model to gpu', flush=True)
            self.flow_model_backbone.eval()
            self.flow_model_branch.eval()

        self.num_classes = opt.num_classes
        self.opt = opt

        self.rgb_buffer = []
        self.flow_buffer = []
        self.rgb_buffer_flip = []
        self.flow_buffer_flip = []
    def __init__(self, opt):
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')
        self.rgb_model = None
        self.flow_model = None
        if opt.rgb_model != '':
            print('create rgb model')
            self.rgb_model = create_model(opt.arch,
                                          opt.branch_info,
                                          opt.head_conv,
                                          opt.K,
                                          flip_test=opt.flip_test)
            self.rgb_model = load_model(self.rgb_model, opt.rgb_model)
            self.rgb_model = DataParallel(self.rgb_model,
                                          device_ids=opt.gpus,
                                          chunk_sizes=opt.chunk_sizes).to(
                                              opt.device)
            self.rgb_model.eval()
        if opt.flow_model != '':
            print('create flow model')
            self.flow_model = create_model(opt.arch,
                                           opt.branch_info,
                                           opt.head_conv,
                                           opt.K,
                                           flip_test=opt.flip_test)
            self.flow_model = convert2flow(opt.ninput, self.flow_model)
            self.flow_model = load_model(self.flow_model, opt.flow_model)

            self.flow_model = DataParallel(self.flow_model,
                                           device_ids=opt.gpus,
                                           chunk_sizes=opt.chunk_sizes).to(
                                               opt.device)
            self.flow_model.eval()
        self.num_classes = opt.num_classes
        self.opt = opt
Ejemplo n.º 5
0
    def __init__(self, opt):
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            opt.device = torch.device('cpu')
        self.rgb_model = None
        self.flow_model = None
        self.pa_model = None
        if opt.rgb_model != '':
            print('create rgb model')
            self.rgb_model = create_model(opt.arch,
                                          opt.branch_info,
                                          opt.head_conv,
                                          opt.K,
                                          flip_test=opt.flip_test,
                                          is_pa=False)
            self.rgb_model = load_model(self.rgb_model,
                                        opt.save_root + opt.rgb_model)
            '''
            # ADDED: debug param weights
            for i, child in enumerate(self.rgb_model.children()):
                if i < 2 : 
                    continue
                
                for l, param in enumerate(child.parameters()):
                    if l == 0:
                        vistensor(param)
                            
                    #param.requires_grad = False
                    #print(param.size())
            '''
            # ORIG
            self.rgb_model = DataParallel(self.rgb_model,
                                          device_ids=opt.gpus,
                                          chunk_sizes=opt.chunk_sizes).to(
                                              opt.device)

            self.rgb_model.eval()
        if opt.flow_model != '':
            print('create flow model')
            self.flow_model = create_model(opt.arch,
                                           opt.branch_info,
                                           opt.head_conv,
                                           opt.K,
                                           flip_test=opt.flip_test)
            self.flow_model = convert2flow(opt.ninput, self.flow_model)
            self.flow_model = load_model(self.flow_model, opt.flow_model)

            self.flow_model = DataParallel(self.flow_model,
                                           device_ids=opt.gpus,
                                           chunk_sizes=opt.chunk_sizes).to(
                                               opt.device)
            self.flow_model.eval()

        if opt.pa_model != '':
            print('create PA model')
            self.pa_model = create_model(opt.arch,
                                         opt.branch_info,
                                         opt.head_conv,
                                         opt.K,
                                         flip_test=opt.flip_test,
                                         is_pa=True,
                                         pa_fuse_mode=opt.pa_fuse_mode,
                                         rgb_w3=opt.rgb_w3)

            if opt.pa_fuse_mode == 'PAN':
                self.pa_model = convert2PAN(opt.ninput,
                                            self.pa_model,
                                            conv_idx=1)

            elif opt.pa_fuse_mode == 'TDN':
                pass
                #self.pa_model = convert2TDN(opt.ninput, self.pa_model, conv_idx=2) # idx 1 or 2? does not matter here as trained weight would be loaded here?

            # Single PAN stream
            else:
                self.pa_model = convert2PAN(opt.ninput,
                                            self.pa_model,
                                            conv_idx=1)

            self.pa_model = load_model(self.pa_model,
                                       opt.save_root + opt.pa_model)

            self.pa_model = DataParallel(
                self.pa_model,
                device_ids=opt.gpus,  #[0]
                chunk_sizes=opt.chunk_sizes).to(opt.device)
            self.pa_model.eval()

        self.num_classes = opt.num_classes
        self.opt = opt

        # added: for speed measurement
        self.total_time = 0
Ejemplo n.º 6
0
    def __init__(self, opt):
        if opt.gpus[0] >= 0:
            opt.device = torch.device('cuda')
        else:
            assert 'cpu is not supported!'

        self.rgb_model_backbone, self.rgb_model_deconv, self.rgb_model_branch = None, None, None
        self.flow_model_backbone, self.flow_model_branch = None, None
        self.pa_model_backbone, self.pa_model_deconv, self.pa_model_branch = None, None, None
        if opt.rgb_model != '':
            self.rgb_model_backbone, self.rgb_model_deconv, self.rgb_model_branch = create_inference_model(
                opt.arch,
                opt.branch_info,
                opt.head_conv,
                opt.K,
                flip_test=opt.flip_test)
            print('create rgb model', flush=True)
            self.rgb_model_backbone, self.rgb_model_deconv, self.rgb_model_branch = load_inference_model(
                self.rgb_model_backbone, self.rgb_model_deconv,
                self.rgb_model_branch, opt.rgb_model)
            print('load rgb model', flush=True)
            self.rgb_model_backbone = self.rgb_model_backbone.to(opt.device)
            self.rgb_model_deconv = self.rgb_model_deconv.to(opt.device)
            self.rgb_model_branch = self.rgb_model_branch.to(opt.device)
            print('put rgb model to gpu', flush=True)
            self.rgb_model_backbone.eval()
            self.rgb_model_deconv.eval()
            self.rgb_model_branch.eval()

        elif opt.flow_model != '':
            self.flow_model_backbone, self.flow_model_branch = create_inference_model(
                opt.arch,
                opt.branch_info,
                opt.head_conv,
                opt.K,
                flip_test=opt.flip_test)
            self.flow_model_backbone = convert2flow(opt.ninput,
                                                    self.flow_model_backbone)
            print('create flow model', flush=True)
            self.flow_model_backbone, self.flow_model_branch = load_inference_model(
                self.flow_model_backbone, self.flow_model_branch,
                opt.flow_model)
            print('load flow model', flush=True)
            self.flow_model_backbone = self.flow_model_backbone.to(opt.device)
            self.flow_model_branch = self.flow_model_branch.to(opt.device)
            print('put flow model to gpu', flush=True)
            self.flow_model_backbone.eval()
            self.flow_model_branch.eval()

        elif opt.pa_model != '':
            self.pa_model_PA, self.pa_model_backbone, self.pa_model_deconv, self.pa_model_branch = create_inference_model(
                opt.arch,
                opt.branch_info,
                opt.head_conv,
                opt.K,
                flip_test=opt.flip_test,
                is_pa=True,
                pa_fuse_mode=opt.pa_fuse_mode,
                rgb_w3=opt.rgb_w3)
            print('create PA model', flush=True)
            '''
            if opt.pa_fuse_mode == 'PAN':
                self.pa_model = convert2PAN(opt.ninput, self.pa_model_backbone, conv_idx=1)
            
            elif opt.pa_fuse_mode == 'TDN':
                self.pa_model = convert2TDN(opt.ninput, self.pa_model_backbone, conv_idx=2) # idx 1 or 2? does not matter here as trained weight would be loaded here?
            
            # Single PAN stream
            else:
                self.pa_model = convert2PAN(opt.ninput, self.pa_model, conv_idx=1)
            '''
            self.pa_model_PA, self.pa_model_backbone, self.pa_model_deconv, self.pa_model_branch = load_inference_model(
                self.pa_model_PA, self.pa_model_backbone, self.pa_model_deconv,
                self.pa_model_branch, opt.pa_model)
            print('load PA model', flush=True)

            self.pa_model_PA = self.pa_model_PA.to(opt.device)
            self.pa_model_backbone = self.pa_model_backbone.to(opt.device)
            self.pa_model_deconv = self.pa_model_deconv.to(opt.device)
            self.pa_model_branch = self.pa_model_branch.to(opt.device)

            print('put PA model to gpu', flush=True)
            self.pa_model_PA.eval()
            self.pa_model_backbone.eval()
            self.pa_model_deconv.eval()
            self.pa_model_branch.eval()

        self.num_classes = opt.num_classes
        self.opt = opt

        self.rgb_buffer = []
        self.flow_buffer = []
        self.rgb_buffer_flip = []
        self.flow_buffer_flip = []

        self.pa_buffer = []
        self.pa_buffer_flip = []

        # added: for speed measurement
        self.total_time = 0