def __init__(self, channels, dim): super().__init__() td_in, td_out = [], [] for c in reversed(channels): td_in.append(nnlayers.conv_layer(c, dim, ks=1)) td_out.append(nnlayers.conv_layer(dim, dim, ks=3)) self.td_in = nn.ModuleList(td_in) self.td_out = nn.ModuleList(td_out)
def __init__(self, fpn, classifier, n_obj): super().__init__() self.fpn = fpn self.classifier = classifier self.embeddings = nn.ModuleList([embedding(1, d) for d in mod_dims]) self.output_conv = nn.Sequential(nnlayers.conv_layer(n_obj, 128), nnlayers.conv_layer(128, 128)) self.register_buffer('inst', torch.zeros(1, dtype=torch.long)) self.n_obj = n_obj
def resolve_embedding(emb_type, keys, dim): if emb_type == 'conv': emb = {k: nn.Sequential(nnlayers.conv_layer(dim, 2 * dim), nnlayers.conv_layer(2 * dim, dim)) for k in keys} emb = ApplyModuleDict(emb) elif emb_type == 'emb-add': emb_dict = EmbeddingDict(keys, dim) emb = ApplyEmbedding(emb_dict) else: raise ValueError return emb
def __init__(self, origin_layer, target_layer, channels, ks=3, detach=False): super().__init__(origin_layer, target_layer, detach) self.conv = conv_layer(channels, channels, ks=ks)
def __init__(self, body, out_dims, fpn_dim=256, emb_type='conv'): super().__init__() self.ifn, self.bu, self.td, self.fusion, ch = build_fpn(body, fpn_dim, bu_in_lateral=True, out_dim=out_dims['object']) self.embedding = resolve_embedding(emb_type, out_dims.keys(), ch[-1]) head = {key: nn.Sequential(nnlayers.conv_layer(fpn_dim, fpn_dim), conv2d(fpn_dim, fn, ks=1, bias=True)) for key, fn in out_dims.items()} self.head = nn.ModuleDict(head)
def __init__(self, tree, weights_encoder='', weights_decoder=''): super().__init__() self.fpn = get_fpn(tree, weights_encoder=weights_encoder, weights_decoder=weights_decoder) fpn_dim = 512 self.obj_branch = nn.ModuleList([ nnlayers.conv_layer(fpn_dim, fpn_dim), conv2d(fpn_dim, tree.n_obj, ks=1, bias=True) ]) self.bu = nn.ModuleList([ nnlayers.conv_layer(tree.n_obj, fpn_dim // 2), nnlayers.conv_layer(fpn_dim, fpn_dim) ]) self.part_branch = nn.Sequential( nnlayers.conv_layer(fpn_dim, fpn_dim), conv2d(fpn_dim, tree.n_parts, ks=1, bias=True)) self.lateral = nnlayers.conv_layer(fpn_dim, fpn_dim // 2)
def __init__(self, instructor, tree, weights_encoder='', weights_decoder='', emb_op=torch.mul): super().__init__() self.fpn = get_fpn(tree, weights_encoder=weights_encoder, weights_decoder=weights_decoder) fpn_dim = 512 self.td = nn.Sequential( nnlayers.conv_layer(fpn_dim, fpn_dim // 4), nnlayers.conv_layer(fpn_dim // 4, fpn_dim // 8), fv.conv2d(fpn_dim // 8, 1, ks=1, bias=True)) self.embedding = fv.embedding(tree.n_obj, fpn_dim) self.instructor = instructor self.emb_op = emb_op
def __init__(self, tree, weights_encoder='', weights_decoder='', hidden=2): super().__init__() self.fpn = get_fpn(tree, weights_encoder=weights_encoder, weights_decoder=weights_decoder) fpn_dim = 512 self.embedding = fv.embedding(tree.n_obj_with_parts + 1, fpn_dim) self.td = nn.ModuleList( [nnlayers.conv_layer(fpn_dim, fpn_dim) for _ in range(hidden)]) dims = tree.sections + [tree.n_obj] self.heads = nn.ModuleList( [fv.conv2d(fpn_dim, dim, ks=1, bias=True) for dim in dims]) self.bu_start = nn.ModuleList( [fv.conv2d(dim, fpn_dim // 2) for dim in dims]) self.bu_lateral = nn.ModuleList([ nnlayers.conv_layer(fpn_dim, fpn_dim // 2) for _ in range(hidden) ]) self.bu = nn.ModuleList([ nnlayers.conv_layer(fpn_dim, fpn_dim // 2) for _ in range(hidden - 1) ] + [nnlayers.conv_layer(fpn_dim, fpn_dim)]) self.obj_inst = tree.n_obj_with_parts self.tree = tree
def __init__(self, origin_layer, target_layer, ni, nf): super().__init__(origin_layer, target_layer) self.bn = nn.BatchNorm2d(ni) self.conv = conv_layer(ni, nf)
def __init__(self, instructor, fpn, fpn_dim=512): classifier = nn.Sequential(nnlayers.conv_layer(fpn_dim, fpn_dim), conv2d(fpn_dim, 1, ks=1, bias=True)) super().__init__(instructor, fpn, classifier)
def __init__(self, n, dim): super().__init__() self.conv = nnlayers.conv_layer(n * dim, dim, ks=3)
def __init__(self, modules, channels_out, channels_in): super().__init__() self.bb = nn.ModuleList(modules) laterals = [nnlayers.conv_layer(c_in, c_out) for c_in, c_out in zip(channels_in, channels_out)] self.laterals = nn.ModuleList(laterals)