def __init__(self, encoder: nn.Module, n_classes: int, blur: bool = False, blur_final=True, self_attention: bool = False, y_range: Optional[Tuple[float, float]] = None, last_cross: bool = True, bottle: bool = False, **kwargs): imsize = (args.size, args.size) sfs_szs = model_sizes(encoder, size=imsize) sfs_idxs = list(reversed(_get_sfs_idxs(sfs_szs))) self.sfs = hook_outputs([encoder[i] for i in sfs_idxs]) x = dummy_eval(encoder, imsize).detach() ni = sfs_szs[-1][1] middle_conv = nn.Sequential(conv_layer(ni, ni * 2, **kwargs), conv_layer(ni * 2, ni, **kwargs)).eval() x = middle_conv(x) layers = [encoder, batchnorm_2d(ni), nn.ReLU(), middle_conv] self.hc_hooks = [Hook(layers[-1], _hook_inner, detach=False)] hc_c = [x.shape[1]] for i, idx in enumerate(sfs_idxs): not_final = i != len(sfs_idxs) - 1 up_in_c, x_in_c = int(x.shape[1]), int(sfs_szs[idx][1]) do_blur = blur and (not_final or blur_final) sa = self_attention and (i == len(sfs_idxs) - 3) unet_block = UnetBlock(up_in_c, x_in_c, self.sfs[i], final_div=not_final, blur=blur, self_attention=sa, **kwargs).eval() layers.append(unet_block) x = unet_block(x) self.hc_hooks.append(Hook(layers[-1], _hook_inner, detach=False)) hc_c.append(x.shape[1]) ni = x.shape[1] if imsize != sfs_szs[0][-2:]: layers.append(PixelShuffle_ICNR(ni, **kwargs)) if last_cross: layers.append(MergeLayer(dense=True)) ni += in_channels(encoder) layers.append(res_block(ni, bottle=bottle, **kwargs)) hc_c.append(ni) layers.append(Hcolumns(self.hc_hooks, hc_c)) layers += [ conv_layer(ni * len(hc_c), n_classes, ks=1, use_activ=False, **kwargs) ] if y_range is not None: layers.append(SigmoidRange(*y_range)) super().__init__(*layers)
def __init__(self, vol_size, num_layers, ni=1, nf=16, hidden=200, num_classes=2, drop_conv=0, drop_out=0, separable_convs=False, concat_pool=False, self_attention=False, do_pooling=True): strides = [2 if e > max(vol_size) // 2 else 1 for e in vol_size] layers = [ conv_pool(ni, nf, stride=strides, do_pooling=do_pooling, separable=separable_convs) ] x = dummy_eval(layers[0], vol_size).detach() for i in range(num_layers - 1): dims = x.shape[-3:] strides = 1 if i == num_layers - 2 else [ 2 if e > max(dims) // 2 else 1 for e in dims ] sa = self_attention and (i == num_layers - 4) layers.append( conv_pool(nf, nf * 2, stride=strides, do_pooling=do_pooling, separable=separable_convs, self_attention=sa)) nf *= 2 x = layers[-1].eval()(x) if concat_pool: pool = AdaptiveConcatPool3d() nf *= 2 else: pool = nn.AdaptiveMaxPool3d(1) layers += [ pool, Flatten(), ClassifierHead([nf, hidden, num_classes], [drop_conv, drop_out]) ] super().__init__(*layers)
def __init__(self, vol_size, growth_rate=32, num_blocks=[2, 2, 2, 2], ni=1, nf=16, bn_size=4, num_classes=2, drop_rate=0, drop_out=0, concat_pool=False, final_pool=nn.AdaptiveAvgPool3d): layers = [ conv_pool(ni, nf, ks=7, stride=2, do_pooling=False), nn.MaxPool3d(kernel_size=3, stride=2, padding=1) ] x = dummy_eval(layers[0], vol_size).detach() x = layers[-1].eval()(x) for i, nb in enumerate(num_blocks): dims = x.shape[-3:] final = i == len(num_blocks) - 1 strides = 1 if final else [ 2 if e > max(dims) // 2 else 1 for e in dims ] layers.append( self.make_dense_layer(nf, nb, stride=strides, bn_size=bn_size, growth_rate=growth_rate, drop_rate=drop_rate, transition=not final)) nf += nb * growth_rate if not final: nf //= 2 x = layers[-1][-1][-1].eval()(x) if concat_pool: pool = AdaptiveConcatPool3d() nf *= 2 else: pool = final_pool(1) layers += [ pool, Flatten(), ClassifierHead([nf, num_classes], [drop_out]) ] super().__init__(*layers)
def __init__(self, encoder=None, n_classes=2, last_filters=32, imsize=(256, 256), y_range=None, **kwargs): self.n_classes = n_classes layers = nn.ModuleList() # Encoder sfs_szs = model_sizes(encoder, size=imsize) sfs_idxs = list(reversed(_get_sfs_idxs(sfs_szs))) self.sfs = hook_outputs([encoder[i] for i in sfs_idxs]) layers.append(encoder) x = dummy_eval(encoder, imsize).detach() self.hc_hooks = [] hc_c = [] ni = sfs_szs[-1][1] middle_conv = nn.Sequential(conv_layer(ni, ni * 2), conv_layer(ni * 2, ni)).eval() x = middle_conv(x) layers.extend([batchnorm_2d(ni), nn.ReLU(), middle_conv]) # self.hc_hooks = [Hook(layers[-1], _hook_inner, detach=False)] # hc_c = [x.shape[1]] # Decoder n_filters = [64, 128, 256, 512] n = len(n_filters) is_deconv = True for i, idx in enumerate(sfs_idxs[:-1]): in_c, out_c = int(n_filters[n - i - 1] + n_filters[n - i - 2]) // 2, int(sfs_szs[idx][1]) dec_bloc = DecoderBlock(in_c, out_c, self.sfs[i], is_deconv, True).eval() layers.append(dec_bloc) x = dec_bloc(x) self.hc_hooks.append(Hook(layers[-1], _hook_inner, detach=False)) hc_c.append(x.shape[1]) ni = x.shape[1] layers.append(PixelShuffle_ICNR(n_filters[0], scale=2)) layers.append(Hcolumns(self.hc_hooks, hc_c)) fin_block = FinalBlock(ni * (len(hc_c) + 1), last_filters, n_classes) layers.append(fin_block) if y_range is not None: layers.append(SigmoidRange(*y_range)) super().__init__(*layers)
def __init__(self, encoder: nn.Module, n_classes: int, blur: bool = False, blur_final=True, self_attention: bool = False, y_range: Optional[Tuple[float, float]] = None, last_cross: bool = True, bottle: bool = False, norm_type: Optional[NormType] = NormType.Batch, nf_factor: int = 1, **kwargs): nf = 512 * nf_factor extra_bn = norm_type == NormType.Spectral imsize = (256, 256) sfs_szs = model_sizes(encoder, size=imsize) sfs_idxs = list(reversed(_get_sfs_idxs(sfs_szs))) self.sfs = hook_outputs([encoder[i] for i in sfs_idxs], detach=False) x = dummy_eval(encoder, imsize).detach() ni = sfs_szs[-1][1] middle_conv = nn.Sequential( custom_conv_layer(ni, ni * 2, norm_type=norm_type, extra_bn=extra_bn, **kwargs), custom_conv_layer(ni * 2, ni, norm_type=norm_type, extra_bn=extra_bn, **kwargs), ).eval() x = middle_conv(x) layers = [encoder, batchnorm_2d(ni), nn.ReLU(), middle_conv] for i, idx in enumerate(sfs_idxs): not_final = i != len(sfs_idxs) - 1 up_in_c, x_in_c = int(x.shape[1]), int(sfs_szs[idx][1]) do_blur = blur and (not_final or blur_final) sa = self_attention and (i == len(sfs_idxs) - 3) n_out = nf if not_final else nf // 2 unet_block = UnetBlockWide(up_in_c, x_in_c, n_out, self.sfs[i], final_div=not_final, blur=blur, self_attention=sa, norm_type=norm_type, extra_bn=extra_bn, **kwargs).eval() layers.append(unet_block) x = unet_block(x) ni = x.shape[1] if imsize != sfs_szs[0][-2:]: layers.append(PixelShuffle_ICNR(ni, **kwargs)) if last_cross: layers.append(MergeLayer(dense=True)) ni += in_channels(encoder) layers.append( res_block(ni, bottle=bottle, norm_type=norm_type, **kwargs)) layers += [ custom_conv_layer(ni, n_classes, ks=1, use_activ=False, norm_type=norm_type) ] if y_range is not None: layers.append(SigmoidRange(*y_range)) super().__init__(*layers)
out = [F.interpolate(self.hooks[i].stored if self.factorization is None else self.factorization[i](self.hooks[i].stored), scale_factor=2**(self.n-i), mode='bilinear',align_corners=False) for i in range(self.n)] + [x] return torch.cat(out, dim=1) class DynamicUnet_Hcolumns(SequentialEx): "Create a U-Net from a given architecture." def __init__(self, encoder:nn.Module, n_classes:int, blur:bool=False, blur_final=True, self_attention:bool=False, y_range:Optional[Tuple[float,float]]=None, last_cross:bool=True, bottle:bool=False, **kwargs): imsize = (args.size, args.size) sfs_szs = model_sizes(encoder, size=imsize) sfs_idxs = list(reversed(_get_sfs_idxs(sfs_szs))) self.sfs = hook_outputs([encoder[i] for i in sfs_idxs]) x = dummy_eval(encoder, imsize).detach() ni = sfs_szs[-1][1] middle_conv = nn.Sequential(conv_layer(ni, ni*2, **kwargs), conv_layer(ni*2, ni, **kwargs)).eval() x = middle_conv(x) layers = [encoder, batchnorm_2d(ni), nn.ReLU(), middle_conv] self.hc_hooks = [Hook(layers[-1], _hook_inner, detach=False)] hc_c = [x.shape[1]] for i,idx in enumerate(sfs_idxs): not_final = i!=len(sfs_idxs)-1 up_in_c, x_in_c = int(x.shape[1]), int(sfs_szs[idx][1]) do_blur = blur and (not_final or blur_final) sa = self_attention and (i==len(sfs_idxs)-3)
def __init__( self, encoder: nn.Module, n_classes: int, blur: bool = False, blur_final=True, self_attention: bool = False, y_range: Optional[Tuple[float, float]] = None, last_cross: bool = True, bottle: bool = False, small=True, **kwargs, ): imsize = (256, 256) # for resnet50 ... but memory not enough... # sfs_szs = [(1, 64, 128, 128), (1, 64, 128, 128), (1, 64, 1...512, 32, 32), (1, 1024, 16, 16), (1, 2048, 8, 8)] # sfs_idxs = [6, 5, 4, 2] #? 3? sfs_szs = model_sizes(encoder, size=imsize) # for resnext50_32x4d # [torch.Size([1, 64, 64, 64]), torch.Size([1, 64, 64, 64]), torch.Size([1, 64, 64, 64]), torch.Size([1, 64, 32, 32]), torch.Size([1, 256, 32, 32]), torch.Size([1, 512, 16, 16]), torch.Size([1, 1024, 8, 8]), torch.Size([1, 2048, 4, 4])] sfs_idxs = list(reversed(_get_sfs_idxs(sfs_szs))) # if small: sfs_idxs = sfs_idxs[-3:] (need to do double upscale) self.sfs = hook_outputs([encoder[i] for i in sfs_idxs]) x = dummy_eval(encoder, imsize).detach() ni = sfs_szs[-1][1] if small: middle_conv_size_down_scale = 2 middle_conv = conv_layer(ni, ni // middle_conv_size_down_scale, **kwargs).eval() else: middle_conv_size_scale = 2 middle_conv = nn.Sequential( conv_layer(ni, ni * middle_conv_size_scale, **kwargs), conv_layer(ni * middle_conv_size_scale, ni, **kwargs), ).eval() x = middle_conv(x) layers = [encoder, batchnorm_2d(ni), nn.ReLU(), middle_conv] if small: self.hc_hooks = [] hc_c = [] else: self.hc_hooks = [Hook(layers[-1], _hook_inner, detach=False)] hc_c = [x.shape[1]] for i, idx in enumerate(sfs_idxs): final_unet_flag = i == len(sfs_idxs) - 1 up_in_c, x_in_c = int(x.shape[1]), int(sfs_szs[idx][1]) do_blur = blur and (final_unet_flag or blur_final) sa = self_attention and (i == len(sfs_idxs) - 3) unet_block_class = UnetBlockSmall if small else UnetBlock unet_block = unet_block_class( up_in_c, x_in_c, self.sfs[i], final_div=final_unet_flag, blur=blur, self_attention=sa, **kwargs, ).eval() print(unet_block) layers.append(unet_block) x = unet_block(x) # added for hypercolumns, two line self.hc_hooks.append(Hook(layers[-1], _hook_inner, detach=False)) hc_c.append(x.shape[1]) ni = x.shape[1] if imsize != sfs_szs[0][-2:]: layers.append(PixelShuffle_ICNR(ni, **kwargs)) if last_cross: layers.append(MergeLayer(dense=True)) ni += in_channels(encoder) layers.append(res_block(ni, bottle=bottle, **kwargs)) # added for hypercolumns, two line hc_c.append(ni) layers.append(Hcolumns(self.hc_hooks, hc_c)) layers += [ conv_layer(ni * len(hc_c), n_classes, ks=1, use_activ=False, **kwargs) ] if y_range is not None: layers.append(SigmoidRange(*y_range)) super().__init__(*layers)
def __init__(self, encoder=None, n_classes=2, last_filters=32, imsize=(256, 256), y_range=None, **kwargs): self.n_classes = n_classes layers = nn.ModuleList() # Encoder sfs_idxs = [4, 3, 2, 1, 0] self.sfs = hook_outputs([encoder[i] for i in sfs_idxs]) layers.append(encoder) x = dummy_eval(encoder, imsize).detach() self.hc_hooks = [] hc_c = [] # ni = sfs_szs[-1][1] # middle_conv = nn.Sequential(conv_layer(ni, ni * 2), # conv_layer(ni * 2, ni)).eval() # x = middle_conv(x) # layers.extend([batchnorm_2d(ni), nn.ReLU(), middle_conv]) # Decoder n_filters = [128, 256, 512, 1024, 2048] n = len(n_filters) is_deconv = True for i, idx in enumerate(sfs_idxs[:-1]): if i == 0: in_c, out_c = n_filters[n - i - 1], n_filters[n - i - 2] else: in_c, out_c = 2 * n_filters[n - i - 1], n_filters[n - i - 2] if i == 3: scale = False else: scale = True dec_bloc = DecoderBlock(in_c, out_c, self.sfs[i+1], is_deconv, scale).eval() layers.append(dec_bloc) x = dec_bloc(x) self.hc_hooks.append(Hook(layers[-1], _hook_inner, detach=False)) hc_c.append(x.shape[1]) # print(x.size()) # last decoder in_c, out_c = n_filters[0] + 64, n_filters[0] dec_bloc = DecoderBlock(in_c, out_c, None, is_deconv, True).eval() layers.append(dec_bloc) x = dec_bloc(x) self.hc_hooks.append(Hook(layers[-1], _hook_inner, detach=False)) hc_c.append(x.shape[1]) # print(x.size()) ni = x.shape[1] layers.append(PixelShuffle_ICNR(n_filters[0], scale=2)) layers.append(Hcolumns(self.hc_hooks, hc_c)) fin_block = FinalBlock(ni * (len(hc_c) + 1), last_filters, n_classes) layers.append(fin_block) if y_range is not None: layers.append(SigmoidRange(*y_range)) super().__init__(*layers)