def __init__(self, n_classes=7, hidden_layers=256, graph_hidden_layers=512): super(LevelReasoning, self).__init__() self.hidden_layers = hidden_layers self.nclasses = n_classes self.graph_transfer = nn.Conv1d(graph_hidden_layers, graph_hidden_layers, kernel_size=1, stride=1) self.graph_transfer_back = nn.Conv1d(graph_hidden_layers, hidden_layers, kernel_size=1, stride=1) self.attn1 = gcn.GraphContextReasoning(graph_hidden_layers, 'relu') self.attn2 = gcn.GraphContextReasoning(graph_hidden_layers, 'relu') self.attn3 = gcn.GraphContextReasoning(graph_hidden_layers, 'relu') self.bn1 = SynchronizedBatchNorm1d(graph_hidden_layers) self.bn2 = SynchronizedBatchNorm1d(graph_hidden_layers) self.bn3 = SynchronizedBatchNorm1d(graph_hidden_layers)
def testSyncBatchNormSyncEval(self): bn = nn.BatchNorm1d(10, eps=1e-5, affine=False) sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False) sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1]) bn.cuda() sync_bn.cuda() self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10), False, cuda=True)
def __init__(self, backbone, out_dim, pretrained_model=None): super(enetv2, self).__init__() self.enet = enet.EfficientNet.from_name(backbone) if pretrained_model is not None: self.enet.load_state_dict(torch.load(pretrained_model[backbone])) self.myfc = nn.Linear(self.enet._fc.in_features, out_dim) # self.batchnorm= nn.BatchNorm1d(self.enet._fc.in_features) self.batchnorm = SynchronizedBatchNorm1d(self.enet._fc.in_features) self.enet._fc = nn.Identity()
def __init__(self, backbone, out_dim): super(enetv2, self).__init__() self.enet = enet.EfficientNet.from_name(backbone) self.batchnorm = SynchronizedBatchNorm1d(self.enet._fc.in_features) self.myfc = nn.Linear(self.enet._fc.in_features, out_dim) self.enet._fc = nn.Identity()
def testSyncBatchNormNormalEval(self): bn = nn.BatchNorm1d(10) sync_bn = SynchronizedBatchNorm1d(10) self._checkBatchNormResult(bn, sync_bn, torch.rand(16, 10), False)