def initialize(self, num_heads, dim_per_head, drop): self.att = MultiHeadAtt(num_heads, dim_per_head, drop) self.l1 = M.Dense(dim_per_head * num_heads * 4) self.l2 = M.Dense(dim_per_head * num_heads) self.ln1 = M.LayerNorm(1) self.ln2 = M.LayerNorm(1) self.drop = drop
def initialize(self, dim, num_heads, attn_drop): self.num_heads = num_heads self.head_dim = dim // num_heads self.scale = self.head_dim ** -0.5 self.attn_drop = attn_drop self.qkv = M.Dense(dim*3, usebias=True) self.proj = M.Dense(dim)
def initialize(self, num_layers, channel, final_chn): self.layers = nn.ModuleList() for i in range(num_layers): self.layers.append( M.Dense( channel, activation=M.PARAM_PRELU, usebias=False, batch_norm=True)) # we do Laplasian norm in previous step self.layers.append(M.Dense(final_chn))
def initialize(self, D=8, W=256, skips=[4]): # use_viewdirs = True self.layers = nn.ModuleList() for i in range(D): self.layers.append(M.Dense(W, activation=M.PARAM_RELU)) self.skips = skips self.alpha_fc = M.Dense(1) self.bottleneck = M.Dense(256) self.hidden = M.Dense(W//2) self.out_fc = M.Dense(3)
def initialize(self, emb_dim=512): self.backbone = Res10() self.trans = TransformerNet(num_enc=4, num_heads=8, dim_per_head=64, latent_token=True) # whether need last embedding layer self.emb = M.Dense(emb_dim)
def initialize(self, channel_list, blocknum_list, embedding_size, embedding_bn=True): self.c1 = M.ConvLayer(3, channel_list[0], activation=M.PARAM_PRELU) # self.u1 = ResBlock_v1(channel_list[1], stride=2) self.stage1 = Stage(channel_list[1], blocknum_list[0]) self.stage2 = Stage(channel_list[2], blocknum_list[1]) self.stage3 = Stage(channel_list[3], blocknum_list[2]) self.stage4 = Stage(channel_list[4], blocknum_list[3]) self.bn1 = M.BatchNorm() self.fc1 = M.Dense(512)
def initialize(self, channel_list, blocknum_list, embedding_size): self.c1 = M.ConvLayer(3, channel_list[0], 1, usebias=False, activation=M.PARAM_PRELU, batch_norm=True) # self.u1 = ResBlock_v1(channel_list[1], stride=2) self.stage1 = Stage(channel_list[1], blocknum_list[0]) self.stage2 = Stage(channel_list[2], blocknum_list[1]) self.stage3 = Stage(channel_list[3], blocknum_list[2]) self.stage4 = Stage(channel_list[4], blocknum_list[3]) self.bn1 = M.BatchNorm() self.fcs = nn.ModuleList() if isinstance(embedding_size, list): for size in embedding_size: self.fcs.append(M.Dense(size, usebias=False)) else: self.fcs.append(M.Dense(embedding_size, usebias=False))
def initialize(self, channel_list, blocknum_list, embedding_size, embedding_bn=True): self.c1 = M.ConvLayer(3, channel_list[0], 1, usebias=False, activation=M.PARAM_PRELU, batch_norm=True) # self.u1 = ResBlock_v1(channel_list[1], stride=2) self.stage1 = Stage(channel_list[1], blocknum_list[0]) self.stage2 = Stage(channel_list[2], blocknum_list[1]) self.stage3 = Stage(channel_list[3], blocknum_list[2]) self.stage4 = Stage(channel_list[4], blocknum_list[3]) self.bn1 = M.BatchNorm() print('Embedding_size:', embedding_size) self.fc1 = M.Dense(embedding_size, usebias=False)
def initialize(self, channel_list, blocknum_list, drop_prob): self.c1 = M.ConvLayer(3, channel_list[0], usebias=False, batch_norm=True, activation=M.PARAM_PRELU) self.stage1 = Stage(channel_list[1], blocknum_list[0], drop_prob) self.stage2 = Stage(channel_list[2], blocknum_list[1], drop_prob) self.stage3 = Stage(channel_list[3], blocknum_list[2], drop_prob) self.stage4 = Stage(channel_list[4], blocknum_list[3], drop_prob) self.bn1 = M.BatchNorm() self.fc1 = M.Dense(512, usebias=False, batch_norm=True)
def initialize(self, channel_list, blocknum_list): self.c1 = M.ConvLayer(7, channel_list[0], stride=2, usebias=False, batch_norm=True, activation=M.PARAM_RELU) self.maxpool = M.MaxPool2D(3, 2) self.stage1 = Stage(channel_list[1], blocknum_list[0], stride=1) self.stage2 = Stage(channel_list[2], blocknum_list[1], stride=2) self.stage3 = Stage(channel_list[3], blocknum_list[2], stride=2) self.stage4 = Stage(channel_list[4], blocknum_list[3], stride=2) self.fc1 = M.Dense(1000)
def initialize(self): self.body = Body(5, 32, 3, 3) self.fc1 = M.Dense(512, usebias=False)
def initialize(self): self.f1 = M.Dense(256, activation=M.PARAM_PRELU) self.f3 = M.Dense(256, activation=M.PARAM_PRELU) self.f2 = M.Dense(49)
def initialize(self): self.fc1 = M.Dense(512, activation=M.PARAM_GELU) self.fc2 = M.Dense(512, activation=M.PARAM_GELU) self.fc3 = M.Dense(512, activation=M.PARAM_GELU) self.fc4 = M.Dense(2)
def initialize(self, dim, mlp_ratio): self.fc1 = M.Dense(dim*mlp_ratio, usebias=True, activation=M.PARAM_GELU) self.fc2 = M.Dense(dim)
def initialize(self): self.trans = FNet(patch_size=16, patch_stride=8, emb_dim=512, depth=12) self.fc1 = M.Dense(2048, activation=M.PARAM_GELU) self.fc2 = M.Dense(512)
def initialize(self): self.trans = Transformer(patch_size=8, patch_stride=8, emb_dim=512, depth=12, num_heads=8) self.fc1 = M.Dense(2048, activation=M.PARAM_GELU) self.fc2 = M.Dense(512)
def initialize(self): self.f3 = M.Dense(3 * 17)
def initialize(self): self.body = Body([2,2,10,2], [64,128,256,512], 3, 3) self.fc1 = M.Dense(512, usebias=False)
def initialize(self): self.l0 = M.Dense(512, activation=M.PARAM_GELU) self.l1 = M.Dense(512, activation=M.PARAM_GELU) self.l2 = M.Dense(9) self.l3 = M.Dense(17 * 3)