def __init__(self,
                 c_dim=128,
                 dim=3,
                 hidden_dim=128,
                 scatter_type='max',
                 unet=False,
                 unet_kwargs=None,
                 plane_resolution=None,
                 grid_resolution=None,
                 plane_type='xz',
                 padding=0.1,
                 n_blocks=5,
                 pos_encoding=False,
                 n_channels=3,
                 plane_net='FCPlanenet'):
        super().__init__()
        self.c_dim = c_dim
        self.num_channels = n_channels

        if pos_encoding == True:
            dim = 60

        self.fc_pos = nn.Linear(dim, 2 * hidden_dim)
        self.blocks = nn.ModuleList([
            ResnetBlockFC(2 * hidden_dim, hidden_dim) for i in range(n_blocks)
        ])
        self.fc_c = nn.Linear(hidden_dim, c_dim)
        planenet_hidden_dim = hidden_dim
        self.fc_plane_net = FCPlanenet(n_dim=dim, hidden_dim=hidden_dim)

        # Create FC layers based on the number of planes
        self.plane_params = nn.ModuleList(
            [nn.Linear(planenet_hidden_dim, 3) for i in range(n_channels)])

        self.plane_params_hdim = nn.ModuleList(
            [nn.Linear(3, hidden_dim) for i in range(n_channels)])

        self.actvn = nn.ReLU()
        self.hidden_dim = hidden_dim

        if unet:
            self.unet = UNet(c_dim, in_channels=c_dim, **unet_kwargs)
        else:
            self.unet = None

        self.reso_plane = plane_resolution
        self.reso_grid = grid_resolution
        self.plane_type = plane_type
        self.padding = padding

        if scatter_type == 'max':
            self.scatter = scatter_max
        elif scatter_type == 'mean':
            self.scatter = scatter_mean
        else:
            raise ValueError('incorrect scatter type')

        self.pos_encoding = pos_encoding
        if pos_encoding:
            self.pe = positional_encoding()
Esempio n. 2
0
    def __init__(self,
                 c_dim=128,
                 dim=3,
                 hidden_dim=128,
                 scatter_type='max',
                 unet=False,
                 unet_kwargs=None,
                 unet3d=False,
                 unet3d_kwargs=None,
                 plane_resolution=None,
                 grid_resolution=None,
                 plane_type='xz',
                 padding=0.1,
                 n_blocks=5,
                 pos_encoding=False):
        super().__init__()
        self.c_dim = c_dim

        if pos_encoding == True:
            dim = 60
        self.fc_pos = nn.Linear(dim, 2 * hidden_dim)
        self.blocks = nn.ModuleList([
            ResnetBlockFC(2 * hidden_dim, hidden_dim) for i in range(n_blocks)
        ])
        self.fc_c = nn.Linear(hidden_dim, c_dim)

        self.actvn = nn.ReLU()
        self.hidden_dim = hidden_dim

        if unet:
            self.unet = UNet(c_dim, in_channels=c_dim, **unet_kwargs)
        else:
            self.unet = None

        if unet3d:
            #self.unet3d = UNet3D(**unet3d_kwargs)
            self.unet3d = UNet3D_latent(**unet3d_kwargs)
        else:
            self.unet3d = None

        self.reso_plane = plane_resolution
        self.reso_grid = grid_resolution
        self.plane_type = plane_type
        self.padding = padding

        if scatter_type == 'max':
            self.scatter = scatter_max
        elif scatter_type == 'mean':
            self.scatter = scatter_mean
        else:
            raise ValueError('incorrect scatter type')

        self.pos_encoding = pos_encoding
        if pos_encoding:
            self.pe = positional_encoding()
    def __init__(self, dim=3, z_dim=128, c_dim=128,
                 hidden_size=256, leaky=False, sample_mode='bilinear', n_blocks=5, pos_encoding=False, padding=0.1):
        super().__init__()
        self.z_dim = z_dim
        self.c_dim = c_dim
        self.n_blocks = n_blocks
        
        if pos_encoding == True:
            dim = 60 # hardcoded

        if z_dim != 0:
            self.fc_z = nn.ModuleList([
                nn.Linear(z_dim, hidden_size) for i in range(n_blocks)
            ])
            self.conv_layers = nn.ModuleList([
                nn.ConvTranspose2d(2, 6, 3, stride=2, padding=1, output_padding=1,),
                nn.ConvTranspose2d(6, 12, 3, stride=2, padding=1, output_padding=1,),
                nn.ConvTranspose2d(12, 24, 3, stride=2, padding=1, output_padding=1,),
                nn.ConvTranspose2d(24, 48, 3, stride=2, padding=1, output_padding=1,),
                nn.ConvTranspose2d(48, 96, 3, stride=2, padding=1, output_padding=1,),
            ])
        if c_dim != 0:
            self.fc_c = nn.ModuleList([
                nn.Linear(c_dim, hidden_size) for i in range(n_blocks)
            ])


        self.fc_p = nn.Linear(dim, hidden_size)

        self.blocks = nn.ModuleList([
            ResnetBlockFC(hidden_size) for i in range(n_blocks)
        ])

        self.fc_out = nn.Linear(hidden_size, 1)

        if not leaky:
            self.actvn = F.relu
        else:
            self.actvn = lambda x: F.leaky_relu(x, 0.2)

        self.sample_mode = sample_mode
        self.padding = padding

        self.pos_encoding = pos_encoding
        if pos_encoding:
            self.pe = positional_encoding()
Esempio n. 4
0
    def __init__(self,
                 dim=3,
                 c_dim=128,
                 hidden_size=256,
                 n_blocks=5,
                 leaky=False,
                 sample_mode='bilinear',
                 padding=0.1,
                 pos_encoding=False):
        super().__init__()
        self.c_dim = c_dim
        self.n_blocks = n_blocks

        if c_dim != 0:
            self.fc_c = nn.ModuleList(
                [nn.Linear(c_dim, hidden_size) for i in range(n_blocks)])

        if pos_encoding == True:
            dim = 60

        self.fc_p = nn.Linear(dim, hidden_size)

        self.blocks = nn.ModuleList(
            [ResnetBlockFC(hidden_size) for i in range(n_blocks)])

        self.fc_out = nn.Linear(hidden_size, 1)

        if not leaky:
            self.actvn = F.relu
        else:
            self.actvn = lambda x: F.leaky_relu(x, 0.2)

        self.sample_mode = sample_mode
        self.padding = padding

        self.pos_encoding = pos_encoding
        if pos_encoding:
            self.pe = positional_encoding()