示例#1
0
    def __init__(self,
                 block=None,
                 layers=None,
                 block_depth=(64, 128, 256, 512),
                 fcs=tuple(),
                 input_dim=3,
                 output_dim=8,
                 num_skills=1,
                 return_features=False,
                 num_conv_layers=4,
                 num_conv_filters=128,
                 master_head_type='fc',
                 size_master_conv_filters=3,
                 **kwargs):
        super(ResNetFilm, self).__init__(block,
                                         layers,
                                         block_depth,
                                         input_dim,
                                         return_features,
                                         filmed=True,
                                         condition_dim=num_skills,
                                         **kwargs)
        self.num_skills = num_skills

        # one fc head to embed the condition in feature space
        self.num_conv_filters = num_conv_filters

        head_fc = []
        head_fc.append(nn.Linear(block_depth[-1], output_dim))
        self.head_fc = nn.Sequential(*head_fc)

        init_weights(self.modules())
示例#2
0
    def __init__(self,
                 block=None,
                 layers=None,
                 block_depth=(64, 128, 256, 512),
                 fcs=tuple(),
                 input_dim=3,
                 output_dim=1000,
                 input_signal_dim=0,
                 return_features=False,
                 **kwargs):
        self.inplanes = 64

        super(ResNetSignals,
              self).__init__(block, layers, block_depth, input_dim,
                             return_features, **kwargs)

        # fully connected layers on top of features
        fcs_cnn = [
            nn.Linear(block_depth[3] * block.expansion, 64),
            nn.ReLU(),
        ]
        self.fcs_cnn = nn.Sequential(*fcs_cnn)

        fcs_signal = [
            nn.Linear(input_signal_dim, 64),
            nn.ReLU(),
            nn.Linear(64, 64),
            nn.ReLU(),
        ]
        self.fcs_signal = nn.Sequential(*fcs_signal)

        fcs = [nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, output_dim)]
        self.fcs = nn.Sequential(*fcs)

        init_weights(self.modules())
示例#3
0
    def __init__(self,
                 block=None,
                 layers=None,
                 block_depth=(64, 128, 256, 512),
                 fcs=tuple(),
                 input_dim=3,
                 output_dim=8,
                 num_skills=1,
                 return_features=False,
                 num_conv_layers=4,
                 num_conv_filters=128,
                 master_head_type='fc',
                 size_master_conv_filters=3,
                 **kwargs):
        super(ResNetSkills,
              self).__init__(block, layers, block_depth, input_dim,
                             return_features, **kwargs)
        self.num_skills = num_skills

        head_convs, head_fcs = [], []
        # one conv head per skill
        for k in range(num_skills):
            # first create convs
            head_conv, insize, inplanes = make_resnet_layer(
                block=block,
                planes=num_conv_filters,
                num_blocks=num_conv_layers,
                normalization=self.normalization,
                insize=int(self.insize),
                inplanes=self.inplanes,
                stride=1)
            head_convs.append(head_conv)

            # then create fully connected layers (if required)
            head_fc = []
            current_dim = num_conv_filters
            for i, fc_size in enumerate(fcs):
                head_fc.append(nn.Linear(current_dim, fc_size))
                if i < len(fcs) - 1:
                    head_fc.append(nn.ReLU())
                current_dim = fc_size
            head_fc.append(nn.Linear(current_dim, output_dim))
            head_fcs.append(nn.Sequential(*head_fc))

        # master head
        master_conv, master_fc = make_master_head(
            master_head_type=master_head_type,
            num_skills=self.num_skills,
            num_channels=64,
            insize=int(self.insize),
            inplanes=self.inplanes,
            size_conv_filters=size_master_conv_filters)
        head_convs.append(master_conv)
        head_fcs.append(master_fc)

        self.head_convs = nn.Sequential(*head_convs)
        self.head_fcs = nn.Sequential(*head_fcs)

        init_weights(self.modules())
示例#4
0
    def __init__(self,
                 block=None,
                 layers=None,
                 block_depth=(64, 128, 256, 512),
                 fcs=tuple(),
                 input_dim=3,
                 output_dim=1000,
                 return_features=False,
                 **kwargs):
        self.inplanes = 64
        super(ResNetFlat, self).__init__(block, layers, block_depth, input_dim,
                                         return_features, **kwargs)

        # fully connected layers on top of features
        fc_size = [block_depth[3] * block.expansion] + fcs + [output_dim]
        fcs = []
        for i in range(len(fc_size) - 1):
            fcs.append(nn.Linear(fc_size[i], fc_size[i + 1]))
            if i < len(fc_size) - 2:
                fcs.append(nn.ReLU())
        self.fcs = nn.Sequential(*fcs)

        init_weights(self.modules())
示例#5
0
文件: feat.py 项目: wx-b/rlbc
    def __init__(self,
                 block=None,
                 layers=None,
                 block_depth=(64, 128, 256, 512),
                 input_dim=3,
                 return_features=False,
                 normalization='batchnorm',
                 filmed=False,
                 condition_dim=0):
        self.inplanes = 64
        self.features_dim = block_depth[-1]
        self.normalization = normalization
        super(ResNetFeat, self).__init__()
        self.conv1 = nn.Conv2d(
            input_dim,
            self.inplanes,
            kernel_size=7,
            stride=2,
            padding=3,
            bias=False)
        self.insize = 224 / 2  # we use stride=2 in self.conv1
        assert normalization in ('batchnorm', 'layernorm', 'instancenorm')
        if normalization == 'batchnorm':
            self.norm1 = nn.BatchNorm2d(self.inplanes)
        elif normalization == 'layernorm':
            self.norm1 = nn.LayerNorm(
                [self.inplanes,
                 int(self.insize),
                 int(self.insize)])
        else:
            self.norm1 = nn.InstanceNorm2d(self.inplanes)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.insize /= 2  # max pooling
        self.layer1, self.insize, self.inplanes = make_resnet_layer(
            block,
            block_depth[0],
            layers[0],
            normalization,
            self.insize,
            self.inplanes,
            stride=1,
            filmed=filmed,
            condition_dim=condition_dim)
        self.layer2, self.insize, self.inplanes = make_resnet_layer(
            block,
            block_depth[1],
            layers[1],
            normalization,
            self.insize,
            self.inplanes,
            stride=2,
            filmed=filmed,
            condition_dim=condition_dim)
        self.layer3, self.insize, self.inplanes = make_resnet_layer(
            block,
            block_depth[2],
            layers[2],
            normalization,
            self.insize,
            self.inplanes,
            stride=2,
            filmed=filmed,
            condition_dim=condition_dim)
        self.layer4, self.insize, self.inplanes = make_resnet_layer(
            block,
            block_depth[3],
            layers[3],
            normalization,
            self.insize,
            self.inplanes,
            stride=2,
            filmed=filmed,
            condition_dim=condition_dim)
        self.avgpool = nn.AvgPool2d(7, stride=1)

        init_weights(self.modules())
        self.return_features = return_features