def __init__(self, args, dropout=0.2): super().__init__() if args.model_type == 'ConvNet': from feat.networks.convnet import ConvNet self.encoder = ConvNet() z_dim = 64 elif args.model_type == 'ResNet': from feat.networks.resnet import ResNet self.encoder = ResNet() z_dim = 640 elif args.model_type == 'AmdimNet': from feat.networks.amdimnet import AmdimNet self.encoder = AmdimNet(ndf=args.ndf, n_rkhs=args.rkhs, n_depth=args.nd) z_dim = args.rkhs else: raise ValueError('') self.slf_attn = MultiHeadAttention(args, args.head, z_dim, z_dim, z_dim, dropout=dropout) self.z_dim = z_dim self.args = args
def __init__(self, args): super(MatchNet, self).__init__() self.use_bilstm = args.use_bilstm self.args = args # information about Shot and Way if args.model_type == 'ConvNet': from feat.networks.convnet import ConvNet self.encoder = ConvNet() layer_size = 32 elif args.model_type == 'ResNet': from feat.networks.resnet import ResNet self.encoder = ResNet() layer_size = 320 elif args.model_type == 'AmdimNet': from feat.networks.amdimnet import AmdimNet self.encoder = AmdimNet(ndf=args.ndf, n_rkhs=args.rkhs, n_depth=args.nd) layer_size = int(args.rkhs / 2) else: raise ValueError('') if self.use_bilstm: self.bilstm = BidirectionalLSTM(layer_sizes=[layer_size], batch_size=args.query * args.way, vector_dim=layer_size * 2)
def __init__(self, args): super().__init__() self.args = args if args.model_type == 'ConvNet': from feat.networks.convnet import ConvNet self.encoder = ConvNet() elif args.model_type == 'ResNet': from feat.networks.resnet import ResNet self.encoder = ResNet() else: raise ValueError('')
def __init__(self, args): super().__init__() self.args = args if args.model_type == 'ConvNet': hdim = 64 from feat.networks.convnet import ConvNet self.encoder = ConvNet() elif args.model_type == 'ResNet': hdim = 640 from feat.networks.resnet import ResNet as ResNet self.encoder = ResNet() else: raise ValueError('') self.fc = nn.Linear(hdim, args.num_class)
def __init__(self, args): super().__init__() self.args = args if args.model_type == 'ConvNet': from feat.networks.convnet import ConvNet self.encoder = ConvNet() elif args.model_type == 'ResNet': from feat.networks.resnet import ResNet self.encoder = ResNet() elif args.model_type == 'AmdimNet': from feat.networks.amdimnet import AmdimNet model = AmdimNet(ndf=args.ndf, n_rkhs=args.rkhs, n_depth=args.nd) self.encoder = model #torch.nn.DataParallel(model, device_ids=[ torch.device("cuda:0"), torch.device("cuda:1")]) else: raise ValueError('')
def __init__(self, args, dropout=0.1): super().__init__() if args.model_type == 'ConvNet': from feat.networks.convnet import ConvNet self.encoder = ConvNet() z_dim = 512 self.softmax = nn.Softmax(dim=1) elif args.model_type == 'ResNet': from feat.networks.resnet import ResNet self.encoder = ResNet() z_dim = 640 else: raise ValueError('') self.slf_attn = MultiHeadAttention(1, z_dim, z_dim, z_dim, dropout=dropout) self.args = args
def __init__(self, args): super().__init__() self.args = args if args.model_type == 'ConvNet': from feat.networks.convnet import ConvNet self.encoder = ConvNet() elif args.model_type == 'ResNet': from feat.networks.resnet import ResNet self.encoder = ResNet() elif args.model_type == 'AmdimNet': from feat.networks.amdimnet import AmdimNet self.encoder = AmdimNet(ndf=args.ndf, n_rkhs=args.rkhs, n_depth=args.nd) else: raise ValueError('')
def __init__(self, use_bilstm, model_type, num_shot, num_way, num_query, temperature = 1): super(MatchNet, self).__init__() self.use_bilstm = use_bilstm if model_type == 'ConvNet4': from feat.networks.convnet import ConvNet4 self.encoder = ConvNet4() layer_size = 32 elif model_type == 'ResNet': from feat.networks.resnet import ResNet self.encoder = ResNet() layer_size = 320 else: raise ValueError('') if self.use_bilstm: self.bilstm = BidirectionalLSTM(layer_sizes=[layer_size], batch_size=num_query * num_way, vector_dim=layer_size * 2)