def __init__(self, D_in, D_out, ks, activation=F.elu, fact=factorize, resid=residual, bn=True, momentum=0.001): nn.Module.__init__(self) st = (1, 1, 1) pd = layers.pad_size(ks, "same") # conv layer constructor conv_constr = layers.FactConv if fact else layers.Conv bias = not bn self.resid = resid self.bn = bn self.activation = activation first_pd = layers.pad_size((1, ks[1], ks[2]), "same") self.conv1 = conv_constr(D_in, D_out, (1, ks[1], ks[2]), st, first_pd, bias) self.conv2 = conv_constr(D_out, D_out, ks, st, pd, bias) self.conv3 = conv_constr(D_out, D_out, ks, st, pd, bias) if self.bn: self.bn1 = nn.BatchNorm3d(D_out, momentum=momentum) self.bn2 = nn.BatchNorm3d(D_out, momentum=momentum) self.bn3 = nn.BatchNorm3d(D_out, momentum=momentum)
def __init__(self, D_in, D_out, ks, up=2, activation=F.elu, fact=factorize, resid=residual, bn=True, momentum=0.001): nn.Module.__init__(self) #ConvT constructor upsampling_constr = layers.ResizeConv self.bn = bn self.activation = activation pd = layers.pad_size(ks, "same") st = (1, 1, 1) bias = not bn self.upsampling = upsampling_constr(D_in, D_out, ks, st=st, pd=pd, bias=bias, mode="nearest") if bn: self.bn1 = nn.BatchNorm3d(D_out, momentum=momentum) self.convmod = ConvMod(D_out, D_out, ks, fact=fact, resid=resid, bn=bn)
def __init__(self, D_in, D_out, ks, st=(1,1,1), activation = F.elu, fact=factorize): nn.Module.__init__(self) pd = layers.pad_size(ks, "same") conv_constr = layers.FactConv if fact else layers.Conv self.activation = activation self.conv = conv_constr(D_in, D_out, ks, st, pd, bias=True)
def __init__(self, D_in, outspec, ks=io_size, st=io_stride): """ outspec should be an Ordered Dict """ nn.Module.__init__(self) pd = layers.pad_size(ks, "same") self.output_layers = [] for (name,d_out) in outspec.items(): setattr(self, name, layers.Conv(D_in, d_out, ks, st, pd, bias=True)) self.output_layers.append(name)