def _init_modules(self):
        if self.num_layers == 18:
            resnet = resnet18()
        elif self.num_layers == 34:
            resnet = resnet34()
        elif self.num_layers == 50:
            resnet = resnet50()
        elif self.num_layers == 101:
            resnet = resnet101()
        elif self.num_layers == 152:
            resnet = resnet152()
        else:
            assert 0, "network not defined"

        if self.pretrained == True:
            print("Loading pretrained weights from %s" % (self.model_path))
            state_dict = torch.load(self.model_path)
            resnet.load_state_dict({
                k: v
                for k, v in state_dict.items() if k in resnet.state_dict()
            })

        # Build resnet.
        if self._fs == 16:
            self.base = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu,
                                      resnet.maxpool, resnet.layer1,
                                      resnet.layer2, resnet.layer3)
        elif self._fs == 32:
            self.base = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu,
                                      resnet.maxpool, resnet.layer1,
                                      resnet.layer2, resnet.layer3,
                                      resnet.layer4)
    def _init_modules(self):
        if self.num_layers == 18:
            resnet = resnet18()
        elif self.num_layers == 34:
            resnet = resnet34()
        elif self.num_layers == 50:
            resnet = resnet50()
        elif self.num_layers == 101:
            resnet = resnet101()
        elif self.num_layers == 152:
            resnet = resnet152()
        else:
            assert 0, "network not defined"

        if self.pretrained == True:
            print("Loading pretrained weights from %s" % (self.model_path))
            state_dict = torch.load(self.model_path)
            resnet.load_state_dict({
                k: v
                for k, v in state_dict.items() if k in resnet.state_dict()
            })

        # Build resnet.
        if self._fs == 16:
            self.base = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu,
                                      resnet.maxpool, resnet.layer1,
                                      resnet.layer2, resnet.layer3)

            self.ROIGN_top = nn.Sequential(
                Bottleneck(256 * self.expansions, 64 * self.expansions),
                Bottleneck(256 * self.expansions, 64 * self.expansions),
                Bottleneck(256 * self.expansions, 64 * self.expansions))

            # initialize grasp top
            def kaiming_init(m):
                def xavier(param):
                    init.kaiming_normal(param, nonlinearity='relu')

                if isinstance(m, nn.Conv2d):
                    xavier(m.weight.data)

            self.ROIGN_top.apply(kaiming_init)

        else:
            assert 0, "only support stride 16."

        def set_bn_fix(m):
            classname = m.__class__.__name__
            if classname.find('BatchNorm') != -1:
                for p in m.parameters():
                    p.requires_grad = False

        # fix batch normalization
        self.base.apply(set_bn_fix)
示例#3
0
    def _init_modules(self):
        if self.num_layers == 50:
            resnet = resnet50()
        elif self.num_layers == 101:
            resnet = resnet101()
        else:
            assert 0, "network not defined"

        if self.pretrained == True:
            print("Loading pretrained weights from %s" % (self.model_path))
            state_dict = torch.load(self.model_path)
            resnet.load_state_dict({k: v for k, v in state_dict.items() if k in resnet.state_dict()})

        # Build resnet.
        self.VMRN_base = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu,
                                       resnet.maxpool, resnet.layer1, resnet.layer2, resnet.layer3)

        self.VMRN_obj_top = nn.Sequential(resnet.layer4)

        # VMRN layers
        if cfg.VMRN.SHARE_WEIGHTS:
            self.VMRN_rel_top = copy.deepcopy(nn.Sequential(resnet.layer4))
        else:
            self.VMRN_rel_top_o1 = copy.deepcopy(nn.Sequential(resnet.layer4))
            self.VMRN_rel_top_o2 = copy.deepcopy(nn.Sequential(resnet.layer4))
            self.VMRN_rel_top_union = copy.deepcopy(nn.Sequential(resnet.layer4))

        self.VMRN_obj_cls_score = nn.Linear(2048, self.n_classes)
        if self.class_agnostic:
            self.VMRN_obj_bbox_pred = nn.Linear(2048, 4)
        else:
            self.VMRN_obj_bbox_pred = nn.Linear(2048, 4 * self.n_classes)

        self.VMRN_rel_cls_score = vmrn_rel_classifier(2048 * 3)

        # Fix blocks
        for p in self.VMRN_base[0].parameters(): p.requires_grad = False
        for p in self.VMRN_base[1].parameters(): p.requires_grad = False

        assert (0 <= cfg.RESNET.FIXED_BLOCKS < 4)
        if cfg.RESNET.FIXED_BLOCKS >= 3:
            for p in self.VMRN_base[6].parameters(): p.requires_grad = False
        if cfg.RESNET.FIXED_BLOCKS >= 2:
            for p in self.VMRN_base[5].parameters(): p.requires_grad = False
        if cfg.RESNET.FIXED_BLOCKS >= 1:
            for p in self.VMRN_base[4].parameters(): p.requires_grad = False

        def set_bn_fix(m):
            classname = m.__class__.__name__
            if classname.find('BatchNorm') != -1:
                for p in m.parameters(): p.requires_grad = False

        self.VMRN_base.apply(set_bn_fix)
        self.VMRN_obj_top.apply(set_bn_fix)
    def _init_modules(self):
        resnet = resnet101()

        if self.pretrained == True:
            print("Loading pretrained weights from %s" % (self.model_path))
            state_dict = torch.load(self.model_path)
            resnet.load_state_dict({
                k: v
                for k, v in state_dict.items() if k in resnet.state_dict()
            })

        # Build resnet.
        self.RCNN_base = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu,
                                       resnet.maxpool, resnet.layer1,
                                       resnet.layer2, resnet.layer3)

        self.RCNN_top = nn.Sequential(resnet.layer4)

        self.RCNN_cls_score = nn.Linear(2048, self.n_classes)
        if self.class_agnostic:
            self.RCNN_bbox_pred = nn.Linear(2048, 4)
        else:
            self.RCNN_bbox_pred = nn.Linear(2048, 4 * self.n_classes)

        # Fix blocks
        for p in self.RCNN_base[0].parameters():
            p.requires_grad = False
        for p in self.RCNN_base[1].parameters():
            p.requires_grad = False

        assert (0 <= cfg.RESNET.FIXED_BLOCKS < 4)
        if cfg.RESNET.FIXED_BLOCKS >= 3:
            for p in self.RCNN_base[6].parameters():
                p.requires_grad = False
        if cfg.RESNET.FIXED_BLOCKS >= 2:
            for p in self.RCNN_base[5].parameters():
                p.requires_grad = False
        if cfg.RESNET.FIXED_BLOCKS >= 1:
            for p in self.RCNN_base[4].parameters():
                p.requires_grad = False

        def set_bn_fix(m):
            classname = m.__class__.__name__
            if classname.find('BatchNorm') != -1:
                for p in m.parameters():
                    p.requires_grad = False

        self.RCNN_base.apply(set_bn_fix)
        self.RCNN_top.apply(set_bn_fix)
  def _init_modules(self):
    if self.num_layers == 18:
        resnet = resnet18()
    elif self.num_layers == 34:
        resnet = resnet34()
    elif self.num_layers == 50:
        resnet = resnet50()
    elif self.num_layers == 101:
        resnet = resnet101()
    elif self.num_layers == 152:
        resnet = resnet152()
    else:
        assert 0, "network not defined"

    if self.pretrained == True:
      print("Loading pretrained weights from %s" %(self.model_path))
      state_dict = torch.load(self.model_path)
      resnet.load_state_dict({k:v for k,v in state_dict.items() if k in resnet.state_dict()})

    # Build resnet.
    self.RCNN_base = nn.Sequential(resnet.conv1, resnet.bn1,resnet.relu,
      resnet.maxpool)

    self.RCNN_feat_layers = nn.ModuleList()
    self.RCNN_feat_layers.append(resnet.layer1)
    self.RCNN_feat_layers.append(resnet.layer2)
    self.RCNN_feat_layers.append(resnet.layer3)
    self.RCNN_feat_layers.append(resnet.layer4)
    self.RCNN_feat_layers.append(nn.MaxPool2d(kernel_size=3, stride=2))

    # FPN channel reducing layers
    self.RCNN_newconvs = nn.ModuleList()
    self.RCNN_newconvs.append(nn.Conv2d(64 * self.expansions, 256, 1, stride=1))
    self.RCNN_newconvs.append(nn.Conv2d(128 * self.expansions, 256, 1, stride=1))
    self.RCNN_newconvs.append(nn.Conv2d(256 * self.expansions, 256, 1, stride=1))
    self.RCNN_newconvs.append(nn.Conv2d(512 * self.expansions, 256, 1, stride=1))
    self.RCNN_newconvs.append(nn.Conv2d(512 * self.expansions, 256, 1, stride=1))

    # FPN mix conv layers
    self.RCNN_mixconvs = nn.ModuleList()
    self.RCNN_mixconvs.append(nn.Conv2d(256, 256, 3, stride=1, padding=1))
    self.RCNN_mixconvs.append(nn.Conv2d(256, 256, 3, stride=1, padding=1))
    self.RCNN_mixconvs.append(nn.Conv2d(256, 256, 3, stride=1, padding=1))
    self.RCNN_mixconvs.append(nn.Conv2d(256, 256, 3, stride=1, padding=1))
    self.RCNN_mixconvs.append(nn.Conv2d(256, 256, 3, stride=1, padding=1))

    # FPN deconvoutional layers (totally 3)
    self.RCNN_deconvs = nn.ModuleList()
    for i in range(3):
        self.RCNN_deconvs.append(
            nn.Conv2d(256, 256, 1, stride=1)
        )
    hidden_num = 1024
    # fully connected classifier and regressor
    self.RCNN_top = nn.Sequential(
        nn.Linear(self.expansions * 64 * 7 * 7, hidden_num),
        nn.ReLU(),
        nn.Linear(hidden_num, hidden_num),
        nn.ReLU())

    self.RCNN_cls_score = nn.Linear(hidden_num, self.n_classes)
    if self.class_agnostic:
      self.RCNN_bbox_pred = nn.Linear(hidden_num, 4)
    else:
      self.RCNN_bbox_pred = nn.Linear(1024, 4 * self.n_classes)

    # Fix blocks
    for p in self.RCNN_base[0].parameters(): p.requires_grad=False
    for p in self.RCNN_base[1].parameters(): p.requires_grad=False

    assert (0 <= cfg.RESNET.FIXED_BLOCKS < 4)
    if cfg.RESNET.FIXED_BLOCKS >= 3:
      for p in self.RCNN_feat_layers[2].parameters(): p.requires_grad=False
    if cfg.RESNET.FIXED_BLOCKS >= 2:
      for p in self.RCNN_feat_layers[1].parameters(): p.requires_grad=False
    if cfg.RESNET.FIXED_BLOCKS >= 1:
      for p in self.RCNN_feat_layers[0].parameters(): p.requires_grad=False

    def set_bn_fix(m):
      classname = m.__class__.__name__
      if classname.find('BatchNorm') != -1:
        for p in m.parameters(): p.requires_grad=False

    self.RCNN_base.apply(set_bn_fix)
    self.RCNN_feat_layers.apply(set_bn_fix)
示例#6
0
    def _init_modules(self):
        resnet = resnet101()

        if self.pretrained == True:
            print("Loading pretrained weights from %s" % (self.model_path))
            state_dict = torch.load(self.model_path)
            resnet.load_state_dict({k: v for k, v in state_dict.items() if k in resnet.state_dict()})

        # Build resnet.
        self.VMRN_base = nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu,
                                       resnet.maxpool, resnet.layer1, resnet.layer2, resnet.layer3)

        self.VMRN_obj_top = nn.Sequential(resnet.layer4)

        # VMRN layers
        if cfg.VMRN.SHARE_WEIGHTS:
            self.VMRN_rel_top = copy.deepcopy(nn.Sequential(resnet.layer4))
        else:
            self.VMRN_rel_top_o1 = copy.deepcopy(nn.Sequential(resnet.layer4))
            self.VMRN_rel_top_o2 = copy.deepcopy(nn.Sequential(resnet.layer4))
            self.VMRN_rel_top_union = copy.deepcopy(nn.Sequential(resnet.layer4))

        self.VMRN_obj_cls_score = nn.Linear(2048, self.n_classes)
        if self.class_agnostic:
            self.VMRN_obj_bbox_pred = nn.Linear(2048, 4)
        else:
            self.VMRN_obj_bbox_pred = nn.Linear(2048, 4 * self.n_classes)

        self.VMRN_rel_cls_score = vmrn_rel_classifier(2048 * 3)

        # Fix blocks
        for p in self.VMRN_base[0].parameters(): p.requires_grad = False
        for p in self.VMRN_base[1].parameters(): p.requires_grad = False

        assert (0 <= cfg.RESNET.FIXED_BLOCKS < 4)
        if cfg.RESNET.FIXED_BLOCKS >= 3:
            for p in self.VMRN_base[6].parameters(): p.requires_grad = False
        if cfg.RESNET.FIXED_BLOCKS >= 2:
            for p in self.VMRN_base[5].parameters(): p.requires_grad = False
        if cfg.RESNET.FIXED_BLOCKS >= 1:
            for p in self.VMRN_base[4].parameters(): p.requires_grad = False

        def set_bn_fix(m):
            classname = m.__class__.__name__
            if classname.find('BatchNorm') != -1:
                for p in m.parameters(): p.requires_grad = False

        self.VMRN_base.apply(set_bn_fix)
        self.VMRN_obj_top.apply(set_bn_fix)

        self.MGN_top = nn.Sequential(
            Bottleneck(256 * self.expansions, 64 * self.expansions),
            Bottleneck(256 * self.expansions, 64 * self.expansions),
            Bottleneck(256 * self.expansions, 64 * self.expansions)
        )

        # initialize grasp top
        def kaiming_init(m):
            def xavier(param):
                init.kaiming_normal(param, nonlinearity='relu')

            if isinstance(m, nn.Conv2d):
                xavier(m.weight.data)

        self.MGN_top.apply(kaiming_init)