Beispiel #1
0
    def __init__(self,
                 class_num=31,
                 alpha=1.0,
                 beta=1.0,
                 gamma=1.0,
                 use_gpu=False,
                 writer=None):
        self.use_gpu = use_gpu
        self.alpha = alpha
        self.beta = beta
        self.gamma = gamma
        self.writer = writer

        self.f_net = FeatureExtractor()
        high_feature_dim = self.f_net.output_dim()
        self.bn = BatchNorm1d(high_feature_dim)
        self.c_net_a = FeatureClassifier(feature_dim=high_feature_dim,
                                         class_num=class_num)
        self.c_net_b = FeatureClassifier(feature_dim=high_feature_dim,
                                         class_num=class_num)

        if self.use_gpu:
            self.f_net = self.f_net.cuda()
            self.c_net_a = self.c_net_a.cuda()
            self.c_net_b = self.c_net_b.cuda()
            self.bn = self.bn.cuda()
Beispiel #2
0
    def __init__(self, input_dim, virtual_batch_size=64, momentum=0.01):
        super(GhostNormalization, self).__init__()

        self.input_dim = input_dim
        self.virtual_batch_size = virtual_batch_size
        self.bn = BatchNorm1d(self.input_dim, momentum=momentum, affine=False)
        self.weight = Parameter(torch.Tensor(self.input_dim))
        self.bias = Parameter(torch.Tensor(self.input_dim))

        init.uniform_(self.bias)
        init.uniform_(self.weight)
Beispiel #3
0
 def __init__(self,
              channels_in=1,
              channels_out=1,
              kernel_size=1,
              use_batch_norm=True,
              apply_relu=True):
     super(Block, self).__init__()
     self.conv = nn.Conv1d(channels_in, channels_out, kernel_size)
     self.relu = nn.ReLU()
     self.batchnorm = BatchNorm1d(channels_out)
     self.use_batch_norm = use_batch_norm
     self.apply_relu = apply_relu
Beispiel #4
0
 def __init__(self, encoder_hidden, decoder_hidden, output_dim,
              embedding_dim, sos_token, device):
     super().__init__()
     self.decoder_hidden = decoder_hidden
     self.gru = nn.GRU(2 * encoder_hidden + embedding_dim, decoder_hidden)
     self.attn = AttentionNetwork(encoder_hidden, decoder_hidden)
     self.embedding = nn.Embedding(output_dim, embedding_dim)
     self.output_decoder = nn.Linear(decoder_hidden, output_dim)
     self.batch_norm = BatchNorm1d(output_dim)
     self.output_dim = output_dim
     self.device = device
     self.sos_token = sos_token
Beispiel #5
0
    def __init__(self, feature_dim=256, class_num=31):
        super(FeatureClassifier, self).__init__()
        self.classifier_layer_1 = nn.Linear(feature_dim, feature_dim // 2)
        self.classifier_layer_2 = nn.Linear(feature_dim // 2, class_num)
        self.bn = BatchNorm1d(feature_dim // 2)
        self.relu = nn.ReLU()
        self.dropout = nn.Dropout(0.5)
        self.softmax = nn.Softmax()

        ## initialization
        self.classifier_layer_1.weight.data.normal_(0, 0.01)
        self.classifier_layer_1.bias.data.fill_(0.0)
        self.classifier_layer_2.weight.data.normal_(0, 0.01)
        self.classifier_layer_2.bias.data.fill_(0.0)
Beispiel #6
0
 def __init__(self):
     super(FeatureExtractor, self).__init__()
     ## set base network
     model_resnet50 = models.resnet50(pretrained=True)
     self.conv1 = model_resnet50.conv1
     self.bn1 = model_resnet50.bn1
     self.relu = model_resnet50.relu
     self.maxpool = model_resnet50.maxpool
     self.layer1 = model_resnet50.layer1
     self.layer2 = model_resnet50.layer2
     self.layer3 = model_resnet50.layer3
     self.layer4 = model_resnet50.layer4
     self.avgpool = model_resnet50.avgpool
     self.high_dim = model_resnet50.fc.in_features
     self.bn = BatchNorm1d(self.high_dim)
Beispiel #7
0
    def __init__(self,
                 class_num=31,
                 trade_off=1.0,
                 use_gpu=False,
                 writer=None):
        self.use_gpu = use_gpu
        self.trade_off = trade_off
        self.writer = writer

        self.f_net = FeatureExtractor()
        high_feature_dim = self.f_net.output_dim()
        self.bn = BatchNorm1d(high_feature_dim, affine=False).cuda()
        #feature_dim = adaptor_dim
        self.c_net_a = FeatureClassifier(feature_dim=high_feature_dim,
                                         class_num=class_num)
        self.c_net_b = FeatureClassifier(feature_dim=high_feature_dim,
                                         class_num=class_num)

        if self.use_gpu:
            self.f_net = self.f_net.cuda()
            self.c_net_a = self.c_net_a.cuda()
            self.c_net_b = self.c_net_b.cuda()
Beispiel #8
0
 def __init__(self, feature_dim=2048, output_dim=256, final_bn=True):
     super(SingleLayerFeatureExtractor, self).__init__()
     self.classifier_layer_1 = nn.Linear(feature_dim, output_dim)
     self.classifier_layer_1.weight.data.normal_(0, 0.005)
     self.classifier_layer_1.bias.data.fill_(0.1)
     self.bn = BatchNorm1d(output_dim)