예제 #1
0
 def __init__(self, **kwargs):
     super(BoxNet, self).__init__()
     self.conv_size = nn.Sequential(
         resnet.resnet18(pretrained=False, input_channel=4, fc=False),
         nn.Conv2d(512, 512, kernel_size=1, bias=False),
         nn.BatchNorm2d(512), nn.ReLU(inplace=True), nn.AvgPool2d(7))
     self.fc_size = nn.Sequential(nn.Linear(512, 256),
                                  nn.ReLU(inplace=True), nn.Linear(256, 3))
     self.conv_rot6 = nn.Sequential(
         resnet.resnet18(pretrained=False, input_channel=4, fc=False),
         nn.Conv2d(512, 512, kernel_size=1, bias=False),
         nn.BatchNorm2d(512), nn.ReLU(inplace=True), nn.AvgPool2d(7))
     self.fc_rot6 = nn.Sequential(nn.Linear(512, 256),
                                  nn.ReLU(inplace=True), nn.Linear(256, 6),
                                  nn.Tanh())
예제 #2
0
파일: TouchPtN.py 프로젝트: samhu1989/PON
 def __init__(self, **kwargs):
     super(ScaleNet, self).__init__()
     self.enc = resnet.resnet18(pretrained=False, input_channel=4, fc=False)
     self.dec = nn.Sequential(
         nn.Conv2d(1024, 512, kernel_size=1, bias=False),
         nn.BatchNorm2d(512), nn.ReLU(inplace=True), nn.AvgPool2d(7))
     self.fc = nn.Linear(512, 1)
예제 #3
0
    def _build_model(self, inputs):
        with tf.variable_scope('resnet'):
            c2, c3, c4, c5 = resnet.resnet18(is_training=self.is_training).forward(inputs)

            p5 = _conv(c5, 128, [1,1], is_training=self.is_training, name="conv_p5")
            up_p5 = upsampling(p5)

            reduce_dim_c4 = _conv(c4, 128, [1,1], is_training=self.is_training, name="conv_c4")
            p4 = 0.5*up_p5 + 0.5*reduce_dim_c4
            up_p4 = upsampling(p4)
            
            reduce_dim_c3 = _conv(c3, 128, [1,1], is_training=self.is_training, name="conv_c3")
            p3 = 0.5*up_p4 + 0.5*reduce_dim_c3
            up_p3 = upsampling(p3)

            reduce_dim_c2 = _conv(c2, 128, [1,1], is_training=self.is_training, name="conv_c2")
            p2 = 0.5*up_p3 + 0.5*reduce_dim_c2
            features = _conv(p2, 128, [3,3], is_training=self.is_training, name="conv_p2")

            # IDA-up
            # p2 = _conv(c2, 128, [1,1], is_training=self.is_training)
            # p3 = _conv(c3, 128, [1,1], is_training=self.is_training)
            # p4 = _conv(c4, 128, [1,1], is_training=self.is_training)
            # p5 = _conv(c5, 128, [1,1], is_training=self.is_training)

            # up_p3 = upsampling(p3, method='resize')
            # p2 = _conv(p2+up_p3, 128, [3,3], is_training=self.is_training)

            # up_p4 = upsampling(upsampling(p4, method='resize'), method='resize')
            # p2 = _conv(p2+up_p4, 128, [3,3], is_training=self.is_training)

            # up_p5 = upsampling(upsampling(upsampling(p5, method='resize'), method='resize'), method='resize')
            # features = _conv(p2+up_p5, 128, [3,3], is_training=self.is_training)
        
        with tf.variable_scope('detector'):
            print('feature shape: ', features.shape)
            hm = _conv(features, 64, [3,3], is_training=self.is_training, name="hm_conv_1")
            hm = _conv_nn(hm, cfg.num_classes, [1, 1], padding='VALID', activation = tf.nn.relu, name='hm_conv')
            #hm = _conv(hm, cfg.num_classes, [1, 1], padding="valid", name="hm")
        

            wh = _conv(features, 64, [3,3], is_training=self.is_training, name="wh_conv_1")
            #wh = tf.layers.conv2d(wh, 2, 1, 1, padding='valid', activation = None, bias_initializer=tf.constant_initializer(-np.log(99.)), name='wh')
            wh = _conv_nn(wh, 2, [1, 1], padding='VALID', activation = tf.nn.relu, name="wh_conv")
            #wh = tf.reshape(wh, [-1, wh.shape[1], wh.shape[2], wh.shape[3]])
            #wh = tf.layers.conv2d(wh, 2, 1, 1, padding='valid', activation = None, name='wh')
            #wh = _conv(wh, 2, [1, 1], padding="valid", name="wh")
        

            reg =  _conv(features, 64, [3,3], is_training=self.is_training, name="reg_conv_1")
            #reg = tf.layers.conv2d(reg, 2, 1, 1, padding='valid', activation = None, bias_initializer=tf.constant_initializer(-np.log(99.)), name='reg')
            reg = _conv_nn(reg, 2, [1, 1], padding='VALID', activation = tf.nn.relu, name="reg_conv")
            #reg = tf.reshape(reg, [-1, reg.shape[1], reg.shape[2], reg.shape[3]])
            #reg = _conv(reg, 2, [1, 1], padding="valid", name="reg")
     

        return hm, wh, reg
예제 #4
0
파일: BoxIN.py 프로젝트: samhu1989/PON
 def __init__(self, **kwargs):
     super(BoxNet, self).__init__()
     self.dec_size = nn.Sequential(
         resnet.resnet18(pretrained=False,
                         input_channel=4,
                         fc=False,
                         norm=nn.InstanceNorm2d),
         nn.Conv2d(512, 256, kernel_size=1, bias=False),
         nn.InstanceNorm2d(256, affine=True), nn.ReLU(inplace=True),
         nn.AvgPool2d(7), nn.Conv2d(256, 256, kernel_size=1, bias=True),
         nn.ReLU(inplace=True), nn.Conv2d(256, 3, kernel_size=1, bias=True))
     self.dec_rot6 = nn.Sequential(
         resnet.resnet18(pretrained=False,
                         input_channel=4,
                         fc=False,
                         norm=nn.InstanceNorm2d),
         nn.Conv2d(512, 256, kernel_size=1, bias=False),
         nn.InstanceNorm2d(256, affine=True), nn.ReLU(inplace=True),
         nn.AvgPool2d(7), nn.Conv2d(256, 256, kernel_size=1, bias=True),
         nn.ReLU(inplace=True), nn.Conv2d(256, 6, kernel_size=1, bias=True),
         nn.Tanh())
예제 #5
0
 def __init__(self, **kwargs):
     super(TouchPtNet, self).__init__()
     self.enc = resnet.resnet18(pretrained=False, input_channel=4, fc=False)
     self.dec = nn.Sequential(
         nn.Conv2d(1024, 512, kernel_size=1, bias=False),
         nn.BatchNorm2d(512), nn.ReLU(inplace=True), nn.AvgPool2d(7))
     self.w1 = nn.Sequential(
         nn.Conv1d(512 + 3, 256, kernel_size=1, bias=False),
         nn.BatchNorm1d(256), nn.ReLU(inplace=True),
         nn.Conv1d(256, 1, kernel_size=1, bias=True), nn.Softmax(dim=2))
     self.w2 = nn.Sequential(
         nn.Conv1d(512 + 3, 256, kernel_size=1, bias=False),
         nn.BatchNorm1d(256), nn.ReLU(inplace=True),
         nn.Conv1d(256, 1, kernel_size=1, bias=True), nn.Softmax(dim=2))