def __init__(self, t_shift, model="ProgressiveDilated", model_path=None, kernel_size=3, train_unet_decoder=False, train_unet=False): super().__init__() if model == "ProgressiveDilated": self.unet = UNet_ProgressiveDilated(in_channels=1, out_channels=3) elif model == "Dilated": self.unet = UNet_Dilated(in_channels=1, out_channels=3) elif model == "Baseline": self.unet = UNet_Baseline(in_channels=1, out_channels=3) elif model == "Original_with_BatchNorm": self.unet = UNet_Original_with_BatchNorm(in_channels=1, out_channels=3) else: self.unet = UNet_Original(in_channels=1, out_channels=3) if model_path: print("load model -- mode: {}".format(model)) self.unet.load_state_dict(torch.load(model_path)) self.train_unet_decoder = train_unet_decoder self.train_unet = train_unet self.conv_temp_block = nn.Sequential( nn.Conv3d(in_channels=32, out_channels=32, kernel_size=(t_shift, kernel_size, kernel_size), padding=( int((t_shift - 1)/2), int( (kernel_size-1) /2), int( (kernel_size-1) /2) )), nn.BatchNorm3d(32), nn.PReLU(), nn.Conv3d(in_channels=32, out_channels=32, kernel_size=(t_shift, kernel_size, kernel_size), padding=( int((t_shift - 1)/2), int( (kernel_size-1) /2), int( (kernel_size-1) /2) )) ) self.last_conv = nn.Conv2d(in_channels=32, out_channels=3, kernel_size=3, padding=1)
def __init__(self, model="ProgressiveDilated", model_path=None, kernel_size=3, num_layers=1, train_unet_decoder=False, train_unet=False): super().__init__() if model == "ProgressiveDilated": self.unet = UNet_ProgressiveDilated(in_channels=1, out_channels=3) elif model == "Dilated": self.unet = UNet_Dilated(in_channels=1, out_channels=3) elif model == "Dilated": self.unet = UNet_Baseline(in_channels=1, out_channels=3) elif model == "Original_with_BatchNorm": self.unet = UNet_Original_with_BatchNorm(in_channels=1, out_channels=3) else: self.unet = UNet_Original(in_channels=1, out_channels=3) if model_path: print("load model -- mode: {}".format(model)) self.unet.load_state_dict(torch.load(model_path)) self.train_unet_decoder = train_unet_decoder self.train_unet = train_unet self.convlstm_forward = ConvLSTM(input_size=(256, 256), input_dim=32, hidden_dim=32, kernel_size=(kernel_size, kernel_size), num_layers=num_layers, batch_first=False, bias=True, return_all_layers=False) self.convlstm_backward = ConvLSTM(input_size=(256, 256), input_dim=32, hidden_dim=32, kernel_size=(kernel_size, kernel_size), num_layers=num_layers, batch_first=False, bias=True, return_all_layers=False) self.last_conv = nn.Conv2d(in_channels=32*2, out_channels=3, kernel_size=3, padding=1)
def __init__(self, n_class): super().__init__() self.model = UNet_Dilated(1, n_class) self.model.model.conv_down[0].conv_layers = nn.ModuleList([ nn.Conv2d(1, 32, 10, padding=4, dilation=1, bias=False), nn.Conv2d(32, 32, 4, padding=5, dilation=3) ]) self.model.model.conv_down[1].conv_layers = nn.ModuleList([ nn.Conv2d(32, 128, 10, stride=2, dilation=2, padding=5 + 4 - 1, bias=False), nn.Conv2d(128, 128, 4, padding=5, dilation=3) ]) self.model.model.conv_down[2].conv_layers = nn.ModuleList([ nn.Conv2d(128, 256, 10, stride=2, dilation=4, padding=5 + 12, bias=False), nn.Conv2d(256, 256, 4, padding=5, dilation=3) ])
def __init__(self, n_class): super().__init__() self.model = UNet_Dilated(1, n_class) self.model.model.conv_down[0].conv_layers = nn.ModuleList( [Conv2d_with_GivenKernel(padding=None), nn.Conv2d(32, 32, 4, padding=5, dilation=3)])
def __init__(self, n_class, kernel_size=20): super().__init__() self.model = UNet_Dilated(1, n_class) self.model.model.conv_down[0].conv_layers = nn.ModuleList([ nn.Conv2d(1, 32, kernel_size, padding=int(kernel_size / 2) - 1, dilation=1, bias=False), nn.Conv2d(32, 32, 4, padding=5, dilation=3) ])