def f(x): in_channels = K.int_shape(x)[channel_index] if in_channels == filters: y = x if batch_norm: y = BatchNormalization()(y) y = make_activation(activation, **kwargs)(y) else: if batch_norm: x = BatchNormalization()(x) x = make_activation(activation, **kwargs)(x) y = x y = make_conv(filters, activation=activation, strides=strides, **kwargs)(y) if batch_norm: y = BatchNormalization()(y) y = make_activation(activation, **kwargs)(y) if dropout != 0: y = make_dropout(dropout, activation=activation)(y) y = make_conv(filters, activation=activation, **kwargs)(y) if not residual: return y shortcut = x if in_channels == filters else make_conv(filters, shape=(1, 1), strides=strides, **kwargs)(x) return Add()([y, shortcut])
def f(x): y = make_activation(activation, **kwargs)(x) y = make_conv(filters, strides=strides)(y) if batch_norm: y = BatchNormalization()(y) y = make_activation(activation, **kwargs)(y) y = make_conv(filters)(y) if batch_norm: y = BatchNormalization()(y) return y
def make_wrn(activation, input_shape, output_size, depth=4, k=2, dropout=True, batch_norm=True, residual=True, **kwargs): assert((depth - 4) % 6 == 0) n = (depth - 4) / 6 dropout = 0.3 if dropout else 0 x = Input(shape=input_shape) y = x y = make_conv(16, activation=activation, **kwargs)(y) for _ in range(n): y = _make_block(16*k, activation=activation, batch_norm=batch_norm, dropout=dropout, residual=residual, **kwargs)(y) y = _make_block(32*k, activation=activation, batch_norm=batch_norm, dropout=dropout, strides=(2, 2), residual=residual, **kwargs)(y) for _ in range(n-1): y = _make_block(32*k, activation=activation, batch_norm=batch_norm, dropout=dropout, residual=residual, **kwargs)(y) y = _make_block(64*k, activation=activation, batch_norm=batch_norm, dropout=dropout, strides=(2, 2), residual=residual, **kwargs)(y) for _ in range(n-1): y = _make_block(64*k, activation=activation, batch_norm=batch_norm, dropout=dropout, residual=residual, **kwargs)(y) if batch_norm: y = BatchNormalization()(y) y = make_activation(activation, **kwargs)(y) y = GlobalMaxPooling2D()(y) y = make_dense(output_size, activation='softmax', **kwargs)(y) return Model(inputs=x, outputs=y)
def make_shakeshake(activation, input_shape, output_size, depth=26, k=32, batch_norm=True, residual=True, **kwargs): assert ((depth - 2) % 6 == 0) n = (depth - 2) / 6 x = Input(batch_shape=(kwargs['batch_size'], ) + input_shape) y = make_conv(16)(x) y = BatchNormalization()(y) for _ in range(n): y = _make_block(k, activation=activation, batch_norm=batch_norm, residual=residual, **kwargs)(y) y = _make_block(2 * k, activation=activation, batch_norm=batch_norm, residual=residual, strides=(2, 2), **kwargs)(y) for _ in range(n - 1): y = _make_block(2 * k, activation=activation, batch_norm=batch_norm, residual=residual, **kwargs)(y) y = _make_block(4 * k, activation=activation, batch_norm=batch_norm, residual=residual, strides=(2, 2), **kwargs)(y) for _ in range(n - 1): y = _make_block(4 * k, activation=activation, batch_norm=batch_norm, residual=residual, **kwargs)(y) y = make_activation(activation, **kwargs)(y) y = GlobalAveragePooling2D()(y) y = make_dense(output_size, activation='softmax', **kwargs)(y) return Model(inputs=x, outputs=y)
def f(x): in_channels = K.int_shape(x)[channel_index] if in_channels == filters: y = x else: y = make_activation(activation, **kwargs)(x) y1 = AveragePooling2D((1, 1), strides=strides, padding='same')(y) y1 = make_conv(filters / 2, shape=(1, 1))(y1) y2 = ZeroPadding2D(padding=((1, 0), (1, 0)))(y) y2 = Cropping2D(cropping=((0, 1), (0, 1)))(y2) y2 = AveragePooling2D((1, 1), strides=strides, padding='same')(y2) y2 = make_conv(filters / 2, shape=(1, 1))(y2) y = Concatenate(axis=channel_index)([y1, y2]) if batch_norm: y = BatchNormalization()(y) return y