示例#1
0
 def __init__(self,):
     super().__init__()
     self.layers = nn.Sequential(
         Conv2D(3, 32, 3, 2, 1),
         Conv2D(32, 64, 3, 2, 1),
         Conv2D(64, 128, 3, 2, 1),
         Conv2D(128, 128, 3, 2, 1)
     )
     self.reset_parameters()
示例#2
0
 def __init__(self, answer_num, in_dim, proj_dim, fc_dim, feat_h, norm_type, add_coord):
     super().__init__()
     if add_coord:
         proj_l = Conv2D(in_dim, proj_dim, 1, norm_type=norm_type, orders=('coord', 'conv', 'act'))
     else:
         proj_l = Conv2D(in_dim, proj_dim, 1, norm_type=norm_type, orders=('conv', 'act'))
     self.layers = nn.Sequential(
         proj_l,
         nn.MaxPool2d(feat_h, feat_h, 0),
         Flatten(),
         Linear(proj_dim if proj_dim > 0 else in_dim, fc_dim, norm_type=norm_type, orders=('linear', 'norm', 'act')),
         Linear(fc_dim, answer_num, orders=('linear',))
     )
     self.reset_parameters()
示例#3
0
 def __init__(self, answer_num, img_dim, query_dim, proj_dim, fc_dim, feat_h, norm_type, act_type, add_coord):
     super().__init__()
     from .layers.film_layers import HardAttnSumLayer
     if add_coord:
         proj_l = Conv2D(img_dim + query_dim, proj_dim, 1, orders=('coord', 'conv', 'norm', 'act'), act_type=act_type,
                         norm_type=norm_type)
     else:
         proj_l = Conv2D(img_dim + query_dim, proj_dim, 1, orders=('conv', 'norm', 'act'), act_type=act_type,
                         norm_type=norm_type)
     self.layers = nn.Sequential(
         proj_l,
         HardAttnSumLayer(),
         Linear(proj_dim, fc_dim, norm_type=norm_type,
                orders=('linear', 'norm', 'act'), act_type=act_type
                ),
         Linear(fc_dim, answer_num, orders=('linear',))
     )
     self.reset_parameters()
示例#4
0
 def __init__(self):
     super().__init__()
     import torchvision
     resnet = torchvision.models.resnet101(False)
     layers = [
         nn.Sequential(resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool),
         resnet.layer1,
         resnet.layer2,
         resnet.layer3,
         Conv2D(1024, 128, 3, padding=1, orders=('coord', 'conv', 'norm', 'act'))
     ]
     self.layers = nn.Sequential(*layers)
     self.reset_parameters()
示例#5
0
 def __init__(self, in_dim, layer_dims, layer_norm_types, layer_act_types, layer_coord_types,
              layer_se_types, layer_orders=None):
     super().__init__()
     self.layers = nn.ModuleList()
     layer_orders = layer_orders if layer_orders is not None else (self.default_orders,) * len(layer_dims)
     for idx, out_dim in enumerate(layer_dims):
         se_type = layer_se_types[idx]
         orders = layer_orders[idx]
         if se_type is not None:
             orders = list(orders)
             orders.insert(orders.index('act'), 'se')
         layer = Conv2D(in_dim, out_dim, 3, padding=1, orders=orders, norm_type=layer_norm_types[idx],
                        act_type=layer_act_types[idx], coord_type=layer_coord_types[idx], se_type=se_type)
         self.layers.append(layer)
         in_dim = out_dim
     self.reset_parameters()