def __init__(self, nhidden, dropout): super(Concat, self).__init__() self.conv1 = Conv2d(3 * nhidden, nhidden, 1) self.conv2 = Conv2d(nhidden, 256, 1) self.conv3 = Conv2d(256, 128, 1) self.compress = FC(640, nhidden, relu=True) self.compress_r = FC(640, nhidden, relu=True) self.dropout = dropout
def __init__(self, nhidden): super(DualMask, self).__init__() self.conv1 = Conv2d(2, 96, kernel_size=5) self.conv2 = Conv2d(96, 128, kernel_size=5) self.conv3 = Conv2d(128, 64, kernel_size=8) self.fc1_dm = FC(18496, nhidden, relu=True) self.fc2_dm = FC(25600, 2 * nhidden, relu=True) faster_rcnn.network.weights_normal_init(self.fc1_dm, 0.01) faster_rcnn.network.weights_normal_init(self.fc2_dm, 0.01)
def __init__(self): super(RPN, self).__init__() self.features = VGG16(bn=False) self.conv1 = Conv2d(512, 512, 3, same_padding=True) self.score_conv = Conv2d(512, len(self.anchor_scales) * 3 * 2, 1, relu=False, same_padding=False) self.bbox_conv = Conv2d(512, len(self.anchor_scales) * 3 * 4, 1, relu=False, same_padding=False) # loss self.cross_entropy = None self.los_box = None
def __init__(self, debug=False): super(RPN, self).__init__() self.anchor_scales = cfg.ANCHOR_SCALES self.anchor_ratios = cfg.ANCHOR_RATIOS self.feat_stride = cfg.FEAT_STRIDE[0] self._vgg16 = VGG16() self.conv1 = Conv2d(512, 512, 3, same_padding=True) self.score_conv = Conv2d(512, len(self.anchor_scales) * len(self.anchor_ratios) * 2, 1, relu=False, same_padding=False) self.bbox_conv = Conv2d(512, len(self.anchor_scales) * len(self.anchor_ratios) * 4, 1, relu=False, same_padding=False) # define proposal layer self.proposal_layer = proposal_layer_py(self.feat_stride, self.anchor_scales, self.anchor_ratios) # define anchor target layer self.anchor_target_layer = anchor_target_layer_py( self.feat_stride, self.anchor_scales, self.anchor_ratios) # loss self.cross_entropy = 0 self.loss_box = 0 # for log self.debug = debug
def __init__(self, nhidden, dropout): super(GraphicalModel, self).__init__() self.f = FC(nhidden, nhidden, relu=True) self.g = FC(nhidden, nhidden, relu=True) self.conv1 = Conv2d(3 * nhidden, nhidden, 1) self.conv2 = Conv2d(nhidden, 256, 1) self.conv3 = Conv2d(256, 128, 1) self.conv1_r = Conv2d(3 * nhidden, nhidden, 1) self.conv2_r = Conv2d(nhidden, 256, 1) self.conv3_r = Conv2d(256, 128, 1) self.compress = FC(640, nhidden, relu=True) self.compress_r = FC(640, nhidden, relu=True) self.dropout = dropout
def __init__(self, bn=False): super(VGG16, self).__init__() self.conv1 = nn.Sequential(Conv2d(3, 64, 3, same_padding=True, bn=bn), Conv2d(64, 64, 3, same_padding=True, bn=bn), nn.MaxPool2d(2)) self.conv2 = nn.Sequential( Conv2d(64, 128, 3, same_padding=True, bn=bn), Conv2d(128, 128, 3, same_padding=True, bn=bn), nn.MaxPool2d(2)) faster_rcnn.network.set_trainable(self.conv1, requires_grad=False) faster_rcnn.network.set_trainable(self.conv2, requires_grad=False) self.conv3 = nn.Sequential( Conv2d(128, 256, 3, same_padding=True, bn=bn), Conv2d(256, 256, 3, same_padding=True, bn=bn), Conv2d(256, 256, 3, same_padding=True, bn=bn), nn.MaxPool2d(2)) self.conv4 = nn.Sequential( Conv2d(256, 512, 3, same_padding=True, bn=bn), Conv2d(512, 512, 3, same_padding=True, bn=bn), Conv2d(512, 512, 3, same_padding=True, bn=bn), nn.MaxPool2d(2)) self.conv5 = nn.Sequential( Conv2d(512, 512, 3, same_padding=True, bn=bn), Conv2d(512, 512, 3, same_padding=True, bn=bn), Conv2d(512, 512, 3, same_padding=True, bn=bn))