def __init__(self, **kwargs): super().__init__(**kwargs) self.input_spec = [ layers.InputSpec(ndim=4, axes={-1: 3}), # image layers.InputSpec(ndim=4, axes={-1: 7}), # decoder output ] self.la = 0.1
def build(self, input_shape): channels = [shape[-1] for shape in input_shape] if None in channels: raise ValueError( 'Channel dimension of the inputs should be defined. Found `None`.' ) self.input_spec = [ layers.InputSpec(ndim=4, dtype='uint8', axes={-1: channels[0]}), layers.InputSpec(ndim=4, axes={-1: channels[1]}) ] self.guide = models.Sequential([ ConvNormRelu(self.filters, self.kernel_size, activation=self.activation, standardized=self.standardized) if self.normalize else layers.Conv2D(self.filters, self.kernel_size, padding='same', activation=self.activation), layers.Conv2D(channels[1], self.kernel_size, padding='same') ]) self.box = BoxFilter(self.radius) super().build(input_shape)
def __init__(self, filters, depths, **kwargs): super().__init__(**kwargs) self.input_spec = [ layers.InputSpec(ndim=4, axes={-1: 3}, dtype='uint8'), # image layers.InputSpec(ndim=4, axes={-1: 1}, dtype='uint8') # trimap ] self.filters = filters self.depths = depths
def __init__(self, num_layers, out_features, **kwargs): super().__init__(**kwargs) self.input_spec = [ layers.InputSpec(ndim=4), # features layers.InputSpec(ndim=4) # skip ] self.num_layers = num_layers self.out_features = out_features
def __init__(self, low_filters, decoder_filters, **kwargs): super().__init__(**kwargs) self.input_spec = [ layers.InputSpec(ndim=4), # low level features layers.InputSpec(ndim=4) # high level features ] self.low_filters = low_filters self.decoder_filters = decoder_filters
def __init__(self, filters, main, **kwargs): super().__init__(**kwargs) self.input_spec = [layers.InputSpec(ndim=4), layers.InputSpec(ndim=4)] if main not in {0, 1}: raise ValueError('Parameter "main" should equals 0 or 1') self.filters = filters self.main = main
def __init__(self, filters, **kwargs): super().__init__(**kwargs) self.input_spec = [ layers.InputSpec(ndim=4), layers.InputSpec(ndim=4), layers.InputSpec(ndim=4) ] self.filters = filters
def __init__(self, kernel_size, denoise, **kwargs): super().__init__(**kwargs) self.input_spec = [ layers.InputSpec(ndim=4), # encoder map layers.InputSpec(ndim=4, axes={-1: 1}) # decoder map ] self.kernel_size = kernel_size self.denoise = denoise
def __init__(self, psp_sizes, **kwargs): super().__init__(**kwargs) self.input_spec = [ layers.InputSpec(ndim=4, axes={-1: 3}, dtype='uint8'), # image layers.InputSpec(ndim=4, axes={-1: 1}, dtype='uint8'), # mask layers.InputSpec(ndim=4, axes={-1: 1}, dtype='uint8'), # previous prediction ] self.psp_sizes = psp_sizes
def __init__(self, filters, deformable_groups=8, **kwargs): super().__init__(**kwargs) self.input_spec = [ layers.InputSpec(ndim=4), # coarse layers.InputSpec(ndim=4) # fine ] self.filters = filters self.deformable_groups = deformable_groups
def __init__(self, bone_arch, bone_init, pool_scales, **kwargs): super().__init__(**kwargs) self.input_spec = [ layers.InputSpec(ndim=4, axes={-1: 3}, dtype='uint8'), # image layers.InputSpec(ndim=4, axes={-1: 2}, dtype='uint8'), # twomap layers.InputSpec(ndim=4, axes={-1: 6}, dtype='uint8'), # distance ] self.pool_scales = pool_scales self.bone_arch = bone_arch self.bone_init = bone_init
def build(self, input_shape): self.channels_h = input_shape[0][-1] self.channels_l = input_shape[1][-1] if self.channels_h is None or self.channels_l is None: raise ValueError( 'Channel dimension of the inputs should be defined. Found `None`.' ) self.input_spec = [ layers.InputSpec(ndim=4, axes={-1: self.channels_h}), layers.InputSpec(ndim=4, axes={-1: self.channels_l}) ] min_channels = min(self.channels_h, self.channels_l) self.relu = layers.ReLU() self.pool = layers.AveragePooling2D(2, strides=2, padding='same') # stage 0 self.cbr_hh0 = ConvNormRelu(min_channels, 3) self.cbr_ll0 = ConvNormRelu(min_channels, 3) # stage 1 self.conv_hh1 = SameConv(min_channels, 3) self.conv_hl1 = SameConv(min_channels, 3) self.conv_lh1 = SameConv(min_channels, 3) self.conv_ll1 = SameConv(min_channels, 3) self.bn_l1 = layers.BatchNormalization() self.bn_h1 = layers.BatchNormalization() if self.main == 0: # stage 2 self.conv_hh2 = SameConv(min_channels, 3) self.conv_lh2 = SameConv(min_channels, 3) self.bn_h2 = layers.BatchNormalization() # stage 3 self.conv_hh3 = SameConv(self.filters, 3) self.bn_h3 = layers.BatchNormalization() self.identity = SameConv(self.filters, 1) elif self.main == 1: # stage 2 self.conv_hl2 = SameConv(min_channels, 3) self.conv_ll2 = SameConv(min_channels, 3) self.bn_l2 = layers.BatchNormalization() # stage 3 self.conv_ll3 = SameConv(self.filters, 3) self.bn_l3 = layers.BatchNormalization() self.identity = SameConv(self.filters, 1) super().build(input_shape)
def __init__(self, classes, units, fines, residual=False, **kwargs): super().__init__(**kwargs) self.input_spec = [layers.InputSpec(ndim=3, axes={-1: classes})] # coarse features self.input_spec += [layers.InputSpec(ndim=3) for _ in range(fines)] # fine grained features if fines < 1: raise ValueError('At least one fine grained feature map required') self.classes = classes self.units = units self.fines = fines self.residual = residual
def __init__(self, method=tf.image.ResizeMethod.BILINEAR, antialias=False, **kwargs): kwargs['autocast'] = False super().__init__(**kwargs) self.input_spec = [ layers.InputSpec(ndim=4), # targets layers.InputSpec(ndim=4) # samples ] self.method = method self.antialias = antialias
def __init__(self, align_corners, mode='bilinear', **kwargs): kwargs['autocast'] = False super().__init__(**kwargs) self.input_spec = [ layers.InputSpec(ndim=4), layers.InputSpec(ndim=3, axes={-1: 2}) ] if mode not in {'bilinear', 'nearest'}: raise ValueError( 'Wrong interpolation mode. Only "bilinear" and "nearest" supported' ) self.align_corners = align_corners self.mode = mode
def __init__(self, classes, bone_arch, bone_init, bone_train, **kwargs): super().__init__(**kwargs) self.input_spec = layers.InputSpec(ndim=4, dtype='uint8') self.classes = classes self.bone_arch = bone_arch self.bone_init = bone_init self.bone_train = bone_train
def build(self, input_shape): if 'channels_last' != backend.image_data_format(): raise ValueError('Only NHWC mode (channels last) supported') channel_size = input_shape[-1] if channel_size is None: raise ValueError( 'Channel dimension of the inputs should be defined. Found `None`.' ) self.input_spec = layers.InputSpec(ndim=4, axes={-1: channel_size}, dtype='uint8') bone_model, default_feats = self._config[self.arch] if self.scales is None: use_feats = list(filter(None, default_feats)) else: feats_idx = [self._scales.index(sc) for sc in self.scales] use_feats = [default_feats[fi] for fi in feats_idx] if None in use_feats: bad_idx = [fi for fi, uf in enumerate(use_feats) if uf is None] bad_scales = [self.scales[sc] for sc in bad_idx] raise ValueError( 'Some scales are unavailable: {}'.format(bad_scales)) self.bone = bone_model(channel_size, use_feats, self.init, self.trainable) super().build(input_shape)
def build(self, input_shape): # input_shapes = input_shape # assert (input_shapes[0] == input_shapes[1]) # input_shape = input_shapes[0] # assert len(input_shape) >= 2 if self.data_format == 'channels_first': channel_axis = 1 else: channel_axis = -1 if input_shape[channel_axis] is None: raise ValueError('The channel dimension of the inputs ' 'should be defined. Found `None`.') input_dim = input_shape[channel_axis]//2 kernel_shape = self.kernel_size + (input_dim, self.filters) self.kernel_shape = kernel_shape self.kernel = self.add_weight(shape = kernel_shape, initializer = self.kernel_initializer, name = 'kernel', regularizer = self.kernel_regularizer, constraint = self.kernel_constraint) self.kernel_real = self.kernel[0] self.kernel_image = self.kernel[1] self.kernel_complex = tf.complex(self.kernel_real, self.kernel_image) if self.use_bias: self.bias_real = self.add_weight(shape = (self.filters,), initializer = self.bias_initializer, name = 'bias_real', regularizer = self.bias_regularizer, constraint = self.bias_constraint) self.bias_image = self.add_weight(shape = (self.filters,), initializer = self.bias_initializer, name = 'bias_image', regularizer = self.bias_regularizer, constraint = self.bias_constraint) else: self.bias_real = None self.bias_image = None self.input_spec = KL.InputSpec(ndim = self.rank + 2, axes = {channel_axis: input_dim*2}) self.built = True self.convArgs = { 'strides': self.strides[0] if self.rank ==1 else self.strides, 'padding': self.padding, 'data_format': self.data_format, 'dilation_rate': self.dilation_rate[0] if self.rank == 1 else self.dilation_rate } self.convFunc = { 1: K.conv1d, 2: K.conv2d, 3: K.conv3d }[self.rank] self.ifftFunc = { 1: tf.ifft, 2: tf.ifft2d, 3: tf.ifft3d }[self.rank]
def __init__(self, layer, scales=((0.5, ), (0.25, 0.5, 2.0)), filters=256, dropout=0., standardized=False, **kwargs): super().__init__(layer, **kwargs) self.input_spec = layers.InputSpec(ndim=4) self.scales = scales self.filters = filters self.dropout = dropout self.standardized = standardized if 2 != len(scales) or not all( [isinstance(s, (list, tuple)) for s in scales]): raise ValueError( 'Expecting `scales` to be a train/eval pair of scale lists/tuples.' ) self.train_scales = sorted({1.0} | set(scales[0]), reverse=True) self.eval_scales = sorted({1.0} | set(scales[1]), reverse=True) if len(self.train_scales) < 2 or len(self.eval_scales) < 2: raise ValueError( 'Expecting `scales` to have at least one more scale except `1`.' )
def __init__(self, filters, kernel_size, strides=(1, 1), depth_multiplier=1, data_format=None, dilation_rate=(1, 1), groups=1, activation=None, use_bias=True, depthwise_initializer='glorot_uniform', kernel_initializer='glorot_uniform', bias_initializer='zeros', depthwise_regularizer=None, kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, depthwise_constraint=None, kernel_constraint=None, bias_constraint=None, standardized=False, **kwargs): super().__init__(activity_regularizer=activity_regularizer, **kwargs) self.input_spec = layers.InputSpec(ndim=4) self.filters = filters self.kernel_size = kernel_size self.strides = strides self.depth_multiplier = depth_multiplier self.data_format = data_format self.dilation_rate = dilation_rate self.groups = groups self.activation = activations.get(activation) self.use_bias = use_bias self.depthwise_initializer = initializers.get(depthwise_initializer) self.kernel_initializer = initializers.get(kernel_initializer) self.bias_initializer = initializers.get(bias_initializer) self.depthwise_regularizer = regularizers.get(depthwise_regularizer) self.kernel_regularizer = regularizers.get(kernel_regularizer) self.bias_regularizer = regularizers.get(bias_regularizer) self.depthwise_constraint = constraints.get(depthwise_constraint) self.kernel_constraint = constraints.get(kernel_constraint) self.bias_constraint = constraints.get(bias_constraint) self.standardized = standardized
def __init__(self, bone_arch, bone_init, **kwargs): super().__init__(**kwargs) self.input_channels = 11 self.input_spec = layers.InputSpec(ndim=4, axes={-1: self.input_channels}) self.bone_arch = bone_arch self.bone_init = bone_init
def build(self, input_shape): self.channels = input_shape[-1] if self.channels is None: raise ValueError( 'Channel dimension of the inputs should be defined. Found `None`.' ) self.input_spec = layers.InputSpec(ndim=4, axes={-1: self.channels}) self.relu = layers.ReLU() self.pool = layers.AveragePooling2D(2, strides=2, padding='same') self.cbr_hh0 = ConvNormRelu(self.channels, 3) self.cbr_hl0 = ConvNormRelu(self.filters, 3) self.conv_hh1 = SameConv(self.channels, 3) self.conv_hl1 = SameConv(self.filters, 3) self.conv_lh1 = SameConv(self.channels, 3) self.conv_ll1 = SameConv(self.filters, 3) self.bn_l1 = layers.BatchNormalization() self.bn_h1 = layers.BatchNormalization() self.conv_hh2 = SameConv(self.channels, 3) self.conv_lh2 = SameConv(self.channels, 3) self.bn_h2 = layers.BatchNormalization() super().build(input_shape)
def __init__(self, filters, **kwargs): super().__init__(**kwargs) self.input_spec = [layers.InputSpec(ndim=4) for _ in range(5)] if not isinstance(filters, (list, tuple)) or len(filters) != 5: raise ValueError('Parameter "filters" should contain 5 filter values') self.filters = filters
def __init__(self, filters, stride, **kwargs): super().__init__(**kwargs) self.input_spec = layers.InputSpec(ndim=4) if stride not in {1, 2}: raise ValueError('Unsupported stride') self.filters = filters self.stride = stride
def __init__(self, points, **kwargs): kwargs['autocast'] = False super().__init__(**kwargs) self.input_spec = layers.InputSpec(ndim=4) if not 0. <= points <= 1.: raise ValueError('Parameter "points" should be in range [0; 1]') self.points = float(points)
def __init__(self, classes, kernel_size=1, kernel_initializer='glorot_uniform', **kwargs): super().__init__(**kwargs) self.input_spec = layers.InputSpec(ndim=4) self.classes = classes self.kernel_size = kernel_size self.kernel_initializer = initializers.get(kernel_initializer)
def __init__(self, bone_arch, bone_init, bone_train, aspp_filters, aspp_stride, add_strides=None, **kwargs): super().__init__(**kwargs) self.input_spec = layers.InputSpec(ndim=4, dtype='uint8') self.bone_arch = bone_arch self.bone_init = bone_init self.bone_train = bone_train self.aspp_filters = aspp_filters self.aspp_stride = aspp_stride self.low_stride = 4 self.add_strides = add_strides
def __init__(self, classes, weighted=False, reduction=Reduction.AUTO, **kwargs): kwargs['autocast'] = False super().__init__(**kwargs) self.input_spec = [ layers.InputSpec(ndim=3, axes={-1: classes}), # point logits layers.InputSpec(ndim=3, axes={-1: 2}), # point coords layers.InputSpec(ndim=4, axes={-1: 1}) # labels ] if weighted: self.input_spec.append(layers.InputSpec(ndim=4, axes={-1: 1})) # weights self.classes = classes self.weighted = weighted self.reduction = reduction
def build(self, input_shape): self.channels = input_shape[-1] if self.channels is None: raise ValueError( 'Channel dimension of the inputs should be defined. Found `None`.' ) self.input_spec = layers.InputSpec(min_ndim=2, axes={-1: self.channels}) super().build(input_shape)
def __init__(self, classes, bone_arch, bone_init, bone_train, dropout, dec_filters, psp_sizes, **kwargs): super().__init__(**kwargs) self.input_spec = layers.InputSpec(ndim=4, dtype='uint8') self.classes = classes self.dropout = dropout self.bone_arch = bone_arch self.bone_init = bone_init self.bone_train = bone_train self.dec_filters = dec_filters self.psp_sizes = psp_sizes