def get_embeddings(feature_dim_dict, embedding_size, init_std, seed, l2_rev_V): if embedding_size == "auto": sparse_embedding = [ Embedding(feature_dim_dict["sparse"][feat], 6 * int(pow(feature_dim_dict["sparse"][feat], 0.25)), embeddings_initializer=RandomNormal(mean=0.0, stddev=init_std, seed=seed), embeddings_regularizer=l2(l2_rev_V), name='sparse_emb_' + str(i) + '-' + feat) for i, feat in enumerate(feature_dim_dict["sparse"]) ] print( "Using auto embedding size,the connected vector dimension is", sum([ 6 * int(pow(feature_dim_dict["sparse"][k], 0.25)) for k, v in feature_dim_dict["sparse"].items() ])) else: sparse_embedding = [ Embedding(feature_dim_dict["sparse"][feat], embedding_size, embeddings_initializer=RandomNormal(mean=0.0, stddev=init_std, seed=seed), embeddings_regularizer=l2(l2_rev_V), name='sparse_emb_' + str(i) + '-' + feat) for i, feat in enumerate(feature_dim_dict["sparse"]) ] return sparse_embedding
def default_classification_model(num_classes, num_anchors, pyramid_feature_size=256, prior_probability=0.01, classification_feature_size=256, name='classification_submodel'): """Creates the default regression submodel. Args: num_classes: Number of classes to predict a score for at each feature level. num_anchors: Number of anchors to predict classification scores for at each feature level. pyramid_feature_size: The number of filters to expect from the feature pyramid levels. classification_feature_size: The number of filters to use in the layers in the classification submodel. name: The name of the submodel. Returns: A keras.models.Model that predicts classes for each anchor. """ options = { 'kernel_size': 3, 'strides': 1, 'padding': 'same', } if K.image_data_format() == 'channels_first': inputs = Input(shape=(pyramid_feature_size, None, None, None)) else: inputs = Input(shape=(None, None, None, pyramid_feature_size)) outputs = inputs for i in range(4): outputs = Conv3D(filters=classification_feature_size, activation='relu', name='pyramid_classification_{}'.format(i), kernel_initializer=RandomNormal(mean=0.0, stddev=0.01, seed=None), bias_initializer='zeros', **options)(outputs) outputs = Conv3D( filters=num_classes * num_anchors, kernel_initializer=RandomNormal(mean=0.0, stddev=0.01, seed=None), bias_initializer=PriorProbability(probability=prior_probability), name='pyramid_classification', **options)(outputs) # reshape output and apply sigmoid if K.image_data_format() == 'channels_first': outputs = Permute((2, 3, 1), name='pyramid_classification_permute')(outputs) outputs = Reshape((-1, num_classes), name='pyramid_classification_reshape')(outputs) outputs = Activation('sigmoid', name='pyramid_classification_sigmoid')(outputs) return Model(inputs=inputs, outputs=outputs, name=name)
def get_embeddings( feature_dim_dict, embedding_size, init_std, seed, l2_rev_V, l2_reg_w, ): sparse_embedding = { j.name: { feat.name: Embedding(j.dimension, embedding_size, embeddings_initializer=RandomNormal(mean=0.0, stddev=0.0001, seed=seed), embeddings_regularizer=l2(l2_rev_V), name='sparse_emb_' + str(j.name) + '_' + str(i) + '-' + feat.name) for i, feat in enumerate(feature_dim_dict["sparse"] + feature_dim_dict['dense']) } for j in feature_dim_dict["sparse"] } dense_embedding = { j.name: { feat.name: Dense(embedding_size, kernel_initializer=RandomNormal(mean=0.0, stddev=0.0001, seed=seed), use_bias=False, kernel_regularizer=l2(l2_rev_V), name='sparse_emb_' + str(j.name) + '_' + str(i) + '-' + feat.name) for i, feat in enumerate(feature_dim_dict["sparse"] + feature_dim_dict["dense"]) } for j in feature_dim_dict["dense"] } linear_embedding = { feat.name: Embedding(feat.dimension, 1, embeddings_initializer=RandomNormal(mean=0.0, stddev=init_std, seed=seed), embeddings_regularizer=l2(l2_reg_w), name='linear_emb_' + str(i) + '-' + feat.name) for i, feat in enumerate(feature_dim_dict["sparse"]) } return sparse_embedding, dense_embedding, linear_embedding
def build_generator(input_shape=(256, 256, 3), num_blocks=9): """Generator network architecture""" x0 = layers.Input(input_shape) x = ReflectionPadding2D(padding=(3, 3))(x0) x = layers.Conv2D(filters=64, kernel_size=7, strides=1, kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x) x = InstanceNormalization()(x) x = layers.ReLU()(x) # downsample x = layers.Conv2D(filters=128, kernel_size=3, strides=2, padding='same', kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x) x = InstanceNormalization()(x) x = layers.ReLU()(x) x = layers.Conv2D(filters=256, kernel_size=3, strides=2, padding='same', kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x) x = InstanceNormalization()(x) x = layers.ReLU()(x) # residual for _ in range(num_blocks): x = _resblock(x) # upsample x = layers.Conv2DTranspose(filters=128, kernel_size=3, strides=2, padding='same', kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x) x = InstanceNormalization()(x) x = layers.ReLU()(x) x = layers.Conv2DTranspose(filters=64, kernel_size=3, strides=2, padding='same', kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x) x = InstanceNormalization()(x) x = layers.ReLU()(x) # final x = ReflectionPadding2D(padding=(3, 3))(x) x = layers.Conv2D(filters=3, kernel_size=7, activation='tanh', kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x) return Model(inputs=x0, outputs=x)
def build(self, input_shape): self.routing_logits = self.add_weight( shape=[1, self.k_max, self.max_len], initializer=RandomNormal(stddev=self.init_std), trainable=False, name="B", dtype=tf.float32) self.bilinear_mapping_matrix = self.add_weight( shape=[self.input_units, self.out_units], initializer=RandomNormal(stddev=self.init_std), name="S", dtype=tf.float32) super(CapsuleLayer, self).build(input_shape)
def get_pooling(input, feature_dim_dict, embedding_size, init_std, seed, l2_rev_V, l2_reg_w): # using this will get error:"ValueError: Output tensors to a Model must be the output of a TensorFlow `Layer` # (thus holding past layer metadata). Found: Tensor("prediction_layer/Reshape_1:0", shape=(?, 1), dtype=float32)" # movie_tags = [[0, 1, 2, 0, 0, 0, 0], # movie1 具有0,1,2 一共3个标签 # [0, 1, 2, 3, 4, 0, 0]] # movie2 具有0,1,2,3,4 一共5个标签 # tags_len = [[3], [5]] # 这里记得输入变长特征的有效长度 # model_input = [movie_tags, tags_len] # 之后我们可以根据输入拿到对应的embeddding矩阵tag_embedding # 按照API的要求输入embedding矩阵和长度 # tags_pooling = SequencePoolingLayer(seq_len_max=7, mode='mean', )([tag_embedding, tags_len_input]) # 这样就得到了对变长多值特征的一个定长表示,后续可以进行其他操作 multi_val_embedding = [] multi_val_linear_embedding = [] for i, feat in enumerate(feature_dim_dict['multi_val']): max_len = feature_dim_dict['multi_val'][feat] # embedding_input = [input[i][:, 0:max_len], input[i][:, -1]] tag_embedding = Embedding(feature_dim_dict["multi_val"][feat], embedding_size, embeddings_initializer=RandomNormal( mean=0.0, stddev=init_std, seed=seed), embeddings_regularizer=l2(l2_rev_V), name='multi_val_emb_' + str(i) + '-' + feat) tag_len = tf.expand_dims(input[i][:, max_len], -1) tag_embedding = Lambda(tag_embedding)(input[i][:, 0:max_len]) multi_val_pooling = Lambda( SequencePoolingLayer(seq_len_max=max_len, mode='max', name='multi_val_emb_' + str(i) + '-' + feat))([tag_embedding, tag_len]) multi_val_embedding.append(multi_val_pooling) linear_embedding = Embedding(feature_dim_dict["multi_val"][feat], 1, embeddings_initializer=RandomNormal( mean=0.0, stddev=init_std, seed=seed), embeddings_regularizer=l2(l2_reg_w), name='linear_emb_' + str(i) + '-' + feat) linear_embedding = Lambda(linear_embedding)((input[i][:, 0:max_len])) multi_val_linear_pooling = Lambda( SequencePoolingLayer(seq_len_max=max_len, mode='max', name='linear_emb_' + str(i) + '-' + feat))([linear_embedding, tag_len]) multi_val_linear_embedding.append(multi_val_linear_pooling) return multi_val_embedding, multi_val_linear_embedding
def get_embeddings(feature_dim_dict, embedding_size, init_std, seed, l2_rev_V, l2_reg_w): sparse_embedding = [Embedding(feature_dim_dict["sparse"][feat], embedding_size, embeddings_initializer=RandomNormal( mean=0.0, stddev=init_std, seed=seed), embeddings_regularizer=l2(l2_rev_V), name='sparse_emb_' + str(i) + '-' + feat) for i, feat in enumerate(feature_dim_dict["sparse"])] linear_embedding = [Embedding(feature_dim_dict["sparse"][feat], 1, embeddings_initializer=RandomNormal(mean=0.0, stddev=init_std, seed=seed), embeddings_regularizer=l2(l2_reg_w), name='linear_emb_' + str(i) + '-' + feat) for i, feat in enumerate(feature_dim_dict["sparse"])] return sparse_embedding, linear_embedding
def build(self, input_shape): # 路由对数,大小是1,2,50, 即每个路由对数与输入胶囊个数一一对应,同时如果有两组输出胶囊的话, 那么这里就需要2组B self.routing_logits = self.add_weight( shape=[1, self.k_max, self.max_len], initializer=RandomNormal(stddev=self.init_std), trainable=False, name='B', dtype=tf.float32) # 双线性映射矩阵,维度是[输入胶囊维度,输出胶囊维度] 这样才能进行映射 self.bilinear_mapping_matrix = self.add_weight( shape=[self.input_units, self.out_units], initializer=RandomNormal(stddev=self.init_std), name="S", dtype=tf.float32) super(CapsuleLayer, self).build(input_shape)
def define_generator(latent_dim, n_classes=10): # weight initialization init = RandomNormal(stddev=0.02) # label input in_label = Input(shape=(1,)) # embedding for categorical input li = Embedding(n_classes, 50)(in_label) # linear multiplication n_nodes = 7 * 7 li = Dense(n_nodes, kernel_initializer=init)(li) # reshape to additional channel li = Reshape((7, 7, 1))(li) # image generator input in_lat = Input(shape=(latent_dim,)) # foundation for 7x7 image n_nodes = 384 * 7 * 7 gen = Dense(n_nodes, kernel_initializer=init)(in_lat) gen = Activation('relu')(gen) gen = Reshape((7, 7, 384))(gen) # merge image gen and label input merge = Concatenate()([gen, li]) # upsample to 14x14 gen = Conv2DTranspose(192, (5,5), strides=(2,2), padding='same', kernel_initializer=init)(merge) gen = BatchNormalization()(gen) gen = Activation('relu')(gen) # upsample to 28x28 gen = Conv2DTranspose(1, (5,5), strides=(2,2), padding='same', kernel_initializer=init)(gen) out_layer = Activation('tanh')(gen) # define model model = Model([in_lat, in_label], out_layer) return model
def __init__(self, n_filters, kernel_size, stride, padding='valid', data_format='channels_first'): """ This layer performs a transposed 2D convolution, batch normalization followed by leaky relu activation. Note: 1. The transposed convolution has no activation function. 2. The batch norm layer has no scale parameter :param n_filters: Number of filters for the convolution layer :param kernel_size: The kernel size for the convolution layer :param stride: The stride size for the convolution layer :param padding: The padding type for the convolution layer :param data_format: The data_format of the input tensor to be passed to this layer """ bn_axis = 3 if data_format == 'channels_last' else 1 self.conv_transpose = Conv2DTranspose(filters=n_filters, kernel_size=kernel_size, strides=stride, padding=padding, activation='linear', kernel_initializer=RandomNormal( mean=0, stddev=0.02), data_format=data_format) # TODO: Figure out why can leave out the scale parameter in batch norm if the following layer is a relu self.batch_norm = BatchNormalization(axis=bn_axis, scale=False, center=True) self.leaky_relu = LeakyReLU(alpha=1e-2)
def _add_generator_block(old_model, config): # get the end of the last block block_end = old_model.layers[-2].output # weights init w_init = RandomNormal(stddev=0.02) w_const = max_norm(1.0) # upsample, and define new block upsampling = UpSampling2D()(block_end) # conv layers x = upsampling for i, strides in enumerate([3, 3]): x = Conv2D(config['filters'], strides, padding='same', kernel_initializer=w_init, kernel_constraint=w_const)(x) x = PixelNormalization()(x) x = LeakyReLU(alpha=0.2)(x) # add new output layer out_image = Conv2D(config['n_channels'], 1, padding='same')(x) # define model model1 = Model(old_model.input, out_image) # get the output layer from old model out_old = old_model.layers[-1] # connect the upsampling to the old output layer out_image2 = out_old(upsampling) # define new output image as the weighted sum of the old and new models merged = WeightedSum()([out_image2, out_image]) # define model model2 = Model(old_model.input, merged) return [model1, model2]
def __init__(self, n_filters, kernel_size, stride, padding='valid', data_format='channels_first'): param_initialzer = RandomNormal(mean=0, stddev=0.002) self.mean = Conv2D(filters=n_filters, kernel_size=kernel_size, strides=stride, padding=padding, activation='linear', data_format=data_format, kernel_initializer=param_initialzer, bias_initializer=param_initialzer, name='conv2d_mean') self.standard_deviation = Conv2D(filters=n_filters, kernel_size=kernel_size, strides=stride, padding=padding, activation='linear', data_format=data_format, kernel_initializer=param_initialzer, bias_initializer=param_initialzer, name='conv2d_sd') # We need to pass standard_deviation to soft plus activation layer because standard deviation is > 0 self.soft_plus = Activation('softplus', name='softplus_sd')
def creative_sparse_embedding_dict(sparse_feature_columns, feature_names, embedding_size, l2_reg_embedding=0.00001, init_std=0.0001, seed=1024): """ :param sparse_feature_columns: 离散feature信息 :param feature_names: 需要获取embedding的feature :param embedding_size: embedding向量长度 :param l2_reg_embedding: l2正则参数 :param init_std: std参数 :param seed: 种子 :return: {"embedding_name": Embedding, ......} """ sparse_embedding_dict = { feat.name: Embedding(feat.vocabulary_size, embedding_size, embeddings_initializer=RandomNormal(mean=0.0, stddev=init_std, seed=seed), embeddings_regularizer=l2(l2_reg_embedding), name='emb_' + feat.name) for feat in sparse_feature_columns if feat.name in feature_names } return sparse_embedding_dict
def __init__(self, rank: int, head_size: int, head_count: int, depth: int, kernel_size: Union[int, Tuple, List], strides: Union[int, Tuple, List], dilation_rate: Union[int, Tuple, List], kernel_regularizer: Optional[Union[Dict, AnyStr, Callable]], bias_regularizer: Optional[Union[Dict, AnyStr, Callable]], activity_regularizer: Optional[Union[Dict, AnyStr, Callable]], kernel_constraint: Optional[Union[Dict, AnyStr, Callable]], bias_constraint: Optional[Union[Dict, AnyStr, Callable]], **kwargs): filters = head_count * head_size self.head_size = head_size self.head_count = head_count self.embeddings_initializer = RandomNormal(stddev=1.0) super(ResSASABasicBlock, self).__init__(rank=rank, filters=filters, depth=depth, kernel_size=kernel_size, strides=strides, data_format=None, dilation_rate=dilation_rate, kernel_regularizer=kernel_regularizer, bias_regularizer=bias_regularizer, activity_regularizer=activity_regularizer, kernel_constraint=kernel_constraint, bias_constraint=bias_constraint, **kwargs)
def __new__(cls, name, vocabulary_size, embedding_dim=4, use_hash=False, dtype="int32", embeddings_initializer=None, embedding_name=None, group_name=DEFAULT_GROUP_NAME, trainable=True): if embedding_dim == "auto": embedding_dim = 6 * int(pow(vocabulary_size, 0.25)) if embeddings_initializer is None: embeddings_initializer = RandomNormal(mean=0.0, stddev=0.0001, seed=2020) if embedding_name is None: embedding_name = name return super(SparseFeat, cls).__new__(cls, name, vocabulary_size, embedding_dim, use_hash, dtype, embeddings_initializer, embedding_name, group_name, trainable)
def __init__(self): super(DiscriminatorNet, self).__init__() init = RandomNormal(stddev=0.02) self.conv1 = Conv2D(64, (4, 4), strides=2, padding="same", kernel_initializer=init) self.conv2 = Conv2D(128, (4, 4), strides=2, padding="same", kernel_initializer=init) self.in2 = InstanceNormalization() self.conv3 = Conv2D(256, (4, 4), strides=2, padding="same", kernel_initializer=init) self.in3 = InstanceNormalization() self.conv4 = Conv2D(512, (4, 4), strides=1, padding="same", kernel_initializer=init) self.in4 = InstanceNormalization() self.conv5 = Conv2D(1, (4, 4), strides=1, padding="same", kernel_initializer=init) self.lrelu = LeakyReLU(alpha=0.2)
def _resblock(x0, num_filter=256, kernel_size=3): """Residual block architecture""" x = ReflectionPadding2D()(x0) x = layers.Conv2D(filters=num_filter, kernel_size=kernel_size, kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x) x = InstanceNormalization()(x) x = layers.ReLU()(x) x = ReflectionPadding2D()(x) x = layers.Conv2D(filters=num_filter, kernel_size=kernel_size, kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x) x = InstanceNormalization()(x) x = layers.Add()([x, x0]) return x
def create_embedding_dict(sparse_feature_columns, varlen_sparse_feature_columns, multi_value_sparse_feature_columns, init_std, seed, l2_reg, prefix='sparse_', seq_mask_zero=True): sparse_embedding = { feat.embedding_name: Embedding(feat.vocabulary_size, feat.embedding_dim, embeddings_initializer=RandomNormal(mean=0.0, stddev=init_std, seed=seed), embeddings_regularizer=l2(l2_reg), name=prefix + '_emb_' + feat.embedding_name) for feat in sparse_feature_columns } if varlen_sparse_feature_columns and len( varlen_sparse_feature_columns) > 0: for feat in varlen_sparse_feature_columns: # if feat.name not in sparse_embedding: sparse_embedding[feat.embedding_name] = Embedding( feat.vocabulary_size, feat.embedding_dim, embeddings_initializer=RandomNormal(mean=0.0, stddev=init_std, seed=seed), embeddings_regularizer=l2(l2_reg), name=prefix + '_seq_emb_' + feat.name, mask_zero=seq_mask_zero) if multi_value_sparse_feature_columns and len( multi_value_sparse_feature_columns) > 0: for feat in multi_value_sparse_feature_columns: sparse_embedding[feat.embedding_name] = Embedding( feat.vocabulary_size, feat.embedding_dim, embeddings_initializer=RandomNormal(mean=0.0, stddev=init_std, seed=seed), embeddings_regularizer=l2(l2_reg), name=prefix + '_multivalue_emb_' + feat.name, mask_zero=seq_mask_zero) return sparse_embedding
def create_embedding_dict(feature_dim_dict, embedding_size, init_std, seed, l2_reg, prefix='sparse', seq_mask_zero=True): # 对单值和不定长多值的输入建立embedding层的字典 if embedding_size == 'auto': print("Notice:Do not use auto embedding in models other than DCN") sparse_embedding = {feat.name: Embedding(feat.dimension, 6 * int(pow(feat.dimension, 0.25)), embeddings_initializer=RandomNormal( mean=0.0, stddev=init_std, seed=seed), embeddings_regularizer=l2(l2_reg), name=prefix + '_emb_' + str(i) + '-' + feat.name) for i, feat in enumerate(feature_dim_dict["sparse"])} else: sparse_embedding = {feat.name: Embedding(feat.dimension, embedding_size, embeddings_initializer=RandomNormal( mean=0.0, stddev=init_std, seed=seed), embeddings_regularizer=l2( l2_reg), name=prefix + '_emb_' + str(i) + '-' + feat.name) for i, feat in enumerate(feature_dim_dict["sparse"])} if 'sequence' in feature_dim_dict: count = len(sparse_embedding) # 除了单值稀疏特征输入之外的多值输入,例如不定长的文本 sequence_dim_list = feature_dim_dict['sequence'] for feat in sequence_dim_list: # if feat.name not in sparse_embedding: if embedding_size == "auto": sparse_embedding[feat.name] = Embedding(feat.dimension, 6 * int(pow(feat.dimension, 0.25)), embeddings_initializer=RandomNormal( mean=0.0, stddev=init_std, seed=seed), embeddings_regularizer=l2( l2_reg), name=prefix + '_emb_' + str(count) + '-' + feat.name, mask_zero=seq_mask_zero) else: sparse_embedding[feat.name] = Embedding(feat.dimension, embedding_size, embeddings_initializer=RandomNormal( mean=0.0, stddev=init_std, seed=seed), embeddings_regularizer=l2( l2_reg), name=prefix + '_emb_' + str(count) + '-' + feat.name, mask_zero=seq_mask_zero) count += 1 return sparse_embedding
def build(self, input_shape): self.W = self.add_weight( name='W', shape=(input_shape[1:]), # initializer='uniform', initializer=RandomNormal(), trainable=True) super(iLayer, self).build(input_shape)
def __init__(self): self.sparse_dict={ 'embedding_dim':4, 'use_hash':False,'dtype':"int32", 'feat_cat':'sparse', 'embeddings_initializer':RandomNormal(mean=0.0, stddev=0.0001, seed=2020), 'embedding_name':None,'group_name':"default_group", 'trainable':True} self.dense_dict={'dimension':1, 'dtype':"float32", 'feat_cat':'dense',}
def build_discriminator(input_shape=(256, 256, 3)): """Returns the discriminator network of the GAN. Args: input_shape (tuple, optional): shape of the input image. Defaults to (256, 256, 3). Returns: 'Model' object: GAN discriminator. """ x0 = layers.Input(input_shape) x = layers.Conv2D(filters=64, kernel_size=4, strides=2, padding='same', kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x0) x = layers.LeakyReLU(0.2)(x) x = layers.Conv2D(filters=128, kernel_size=4, strides=2, padding='same', kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x) x = InstanceNormalization()(x) x = layers.LeakyReLU(0.2)(x) x = layers.Conv2D(filters=256, kernel_size=4, strides=2, padding='same', kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x) x = InstanceNormalization()(x) x = layers.LeakyReLU(0.2)(x) x = ReflectionPadding2D()(x) x = layers.Conv2D(filters=512, kernel_size=4, strides=1, kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x) x = InstanceNormalization()(x) x = layers.LeakyReLU(0.2)(x) x = ReflectionPadding2D()(x) x = layers.Conv2D(filters=1, kernel_size=4, strides=1, kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x) return Model(inputs=x0, outputs=x)
def __init__(self, channels, kernel_size=3, strides=1): super(ConvLayer, self).__init__() init = RandomNormal(stddev=0.02) reflection_padding = kernel_size // 2 self.reflection_pad = ReflectionPadding2D(reflection_padding) self.conv2d = Conv2D(channels, kernel_size, strides=strides, kernel_initializer=init)
def build_discriminator(input_shape=(256, 256, 3)): """Discriminator network architecture""" x0 = layers.Input(input_shape) x = layers.Conv2D(filters=64, kernel_size=4, strides=2, padding='same', kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x0) x = layers.LeakyReLU(0.2)(x) x = layers.Conv2D(filters=128, kernel_size=4, strides=2, padding='same', kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x) x = InstanceNormalization()(x) x = layers.LeakyReLU(0.2)(x) x = layers.Conv2D(filters=256, kernel_size=4, strides=2, padding='same', kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x) x = InstanceNormalization()(x) x = layers.LeakyReLU(0.2)(x) x = ReflectionPadding2D()(x) x = layers.Conv2D(filters=512, kernel_size=4, strides=1, kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x) x = InstanceNormalization()(x) x = layers.LeakyReLU(0.2)(x) x = ReflectionPadding2D()(x) x = layers.Conv2D(filters=1, kernel_size=4, strides=1, kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x) return Model(inputs=x0, outputs=x)
def _add_discriminator_block(old_model, config): # new shape is double the size of previous one old_input_shape = list(old_model.input.shape) new_input_shape = (old_input_shape[-2] * 2, old_input_shape[-2] * 2, old_input_shape[-1]) model_input = Input(shape=new_input_shape, name="doubled_dis_input") # weights init w_init = RandomNormal(stddev=0.02) w_const = max_norm(1.0) # conv layers x = model_input for strides in [1, 3, 3]: x = Conv2D(config['filters'], strides, padding='same', kernel_initializer=w_init, kernel_constraint=w_const)(x) x = LeakyReLU()(x) x = AveragePooling2D()(x) new_block = x # skip the input, 1x1 and activation for the old model for i in range(config['num_input_layers'], len(old_model.layers)): x = old_model.layers[i](x) # define straight-through model model1 = Model(model_input, x) # compile model model1.compile(loss=wasserstein_loss, optimizer=ProGan.get_optimizer(config)) # downsample the new larger image downsample = AveragePooling2D()(model_input) # connect old input processing to downsampled new input old_block = old_model.layers[1](downsample) old_block = old_model.layers[2](old_block) # fade in output of old model input layer with new input x = WeightedSum()([old_block, new_block]) # skip the input, 1x1 and activation for the old model for i in range(config['num_input_layers'], len(old_model.layers)): x = old_model.layers[i](x) # define fade-in model model2 = Model(model_input, x) # compile model model2.compile(loss=wasserstein_loss, optimizer=ProGan.get_optimizer(config)) return [model1, model2]
def kdd_create_embedding_dict(sparse_feature_columns, varlen_sparse_feature_columns, init_std, seed, l2_reg, prefix='sparse_', seq_mask_zero=True): global user_embed_np, item_embed_np sparse_embedding = {} for feat in sparse_feature_columns: embed_initializer = RandomNormal(mean=0.0, stddev=init_std, seed=seed) if feat.embedding_name == 'user_id': print('init user embed') embed_initializer = Constant(user_embed_np) if feat.embedding_name == 'item_id': print('init item embed') embed_initializer = Constant(item_embed_np) sparse_embedding[feat.embedding_name] = Embedding( feat.vocabulary_size, feat.embedding_dim, embeddings_initializer=embed_initializer, name=prefix + '_emb_' + feat.embedding_name) if varlen_sparse_feature_columns and len( varlen_sparse_feature_columns) > 0: for feat in varlen_sparse_feature_columns: embed_initializer = RandomNormal(mean=0.0, stddev=init_std, seed=seed) if feat.embedding_name == 'user_id': print('init user embed') embed_initializer = Constant(user_embed_np) if feat.embedding_name == 'item_id': print('init item embed') embed_initializer = Constant(item_embed_np) sparse_embedding[feat.embedding_name] = Embedding( feat.vocabulary_size, feat.embedding_dim, embeddings_initializer=embed_initializer, name=prefix + '_seq_emb_' + feat.name, mask_zero=seq_mask_zero) return sparse_embedding
def create_embedding_dict(feature_dim_dict, embedding_size, init_std, seed, l2_reg, prefix='sparse', seq_mask_zero=True): if embedding_size == 'auto': sparse_embedding = {feat.name: Embedding(feat.dimension, 6 * int(pow(feat.dimension, 0.25)), embeddings_initializer=RandomNormal( mean=0.0, stddev=init_std, seed=seed), embeddings_regularizer=l2(l2_reg), name=prefix+'_emb_' + str(i) + '-' + feat.name) for i, feat in enumerate(feature_dim_dict["sparse"])} else: sparse_embedding = {feat.name: Embedding(feat.dimension, embedding_size, embeddings_initializer=RandomNormal( mean=0.0, stddev=init_std, seed=seed), embeddings_regularizer=l2( l2_reg), name=prefix+'_emb_' + str(i) + '-' + feat.name) for i, feat in enumerate(feature_dim_dict["sparse"])} if 'sequence' in feature_dim_dict: count = len(sparse_embedding) sequence_dim_list = feature_dim_dict['sequence'] for feat in sequence_dim_list: # if feat.name not in sparse_embedding: if embedding_size == "auto": sparse_embedding[feat.name] = Embedding(feat.dimension, 6 * int(pow(feat.dimension, 0.25)), embeddings_initializer=RandomNormal( mean=0.0, stddev=init_std, seed=seed), embeddings_regularizer=l2( l2_reg), name=prefix + '_emb_' + str(count) + '-' + feat.name, mask_zero=seq_mask_zero) else: sparse_embedding[feat.name] = Embedding(feat.dimension, embedding_size, embeddings_initializer=RandomNormal( mean=0.0, stddev=init_std, seed=seed), embeddings_regularizer=l2( l2_reg), name=prefix+'_emb_' + str(count) + '-' + feat.name, mask_zero=seq_mask_zero) count += 1 return sparse_embedding
def create_embedding_dict(sparse_feature_columns, varlen_sparse_feature_columns, embedding_size, init_std, seed, l2_reg, prefix='sparse_', seq_mask_zero=True): if embedding_size == 'auto': print("Notice:Do not use auto embedding in models other than DCN") sparse_embedding = {feat.embedding_name: Embedding(feat.dimension, 6 * int(pow(feat.dimension, 0.25)), embeddings_initializer=RandomNormal( mean=0.0, stddev=init_std, seed=seed), embeddings_regularizer=l2(l2_reg), name=prefix + '_emb_' + feat.name) for feat in sparse_feature_columns} else: sparse_embedding = {feat.embedding_name: Embedding(feat.dimension, embedding_size, embeddings_initializer=RandomNormal( mean=0.0, stddev=init_std, seed=seed), embeddings_regularizer=l2( l2_reg), name=prefix + '_emb_' + feat.name) for feat in sparse_feature_columns} if varlen_sparse_feature_columns and len(varlen_sparse_feature_columns) > 0: for feat in varlen_sparse_feature_columns: # if feat.name not in sparse_embedding: if embedding_size == "auto": sparse_embedding[feat.embedding_name] = Embedding(feat.dimension, 6 * int(pow(feat.dimension, 0.25)), embeddings_initializer=RandomNormal( mean=0.0, stddev=init_std, seed=seed), embeddings_regularizer=l2( l2_reg), name=prefix + '_seq_emb_' + feat.name, mask_zero=seq_mask_zero) else: sparse_embedding[feat.embedding_name] = Embedding(feat.dimension, embedding_size, embeddings_initializer=RandomNormal( mean=0.0, stddev=init_std, seed=seed), embeddings_regularizer=l2( l2_reg), name=prefix + '_seq_emb_' + feat.name, mask_zero=seq_mask_zero) return sparse_embedding
def __init__(self): super(myModel, self).__init__() """ 初始化我们自己需要 使用到的神经网络层。 """ self.rows = None self.cols = None self.kernel = (3, 3) self.init = RandomNormal(stddev=0.01) self.model = Sequential()
def generator(self): # weight initialization init = RandomNormal(stddev=0.02) # image input in_image = Input(shape=self.input_shape) # c7s1-64 g = Conv2D(self.n_filters, (11,11), padding='same', kernel_initializer=init)(in_image) g = Activation('tanh')(g) g = Conv2D(2*self.n_filters, (7,7), strides=(2,2), padding='same', kernel_initializer=init)(g) g = Activation('tanh')(g) g = Conv2D(4*self.n_filters, (5,5), padding='same', kernel_initializer=init)(g) g = Activation('tanh')(g) g = Conv2D(4*self.n_filters, (3,3), padding='same', kernel_initializer=init)(g) g = Activation('tanh')(g) g = Conv2D(4*self.n_filters, (5,5), strides=(2,2), padding='same', kernel_initializer=init)(g) g = Activation('tanh')(g) g = Conv2D(4*self.n_filters, (3,3), padding='same', kernel_initializer=init)(g) g = Activation('tanh')(g) g = Conv2D(4*self.n_filters, (3,3), padding='same', kernel_initializer=init)(g) g = Activation('tanh')(g) #for _ in range(self.n_resblocks): # g = self.resnet_block(g) g = Conv2DTranspose(4*self.n_filters, (3,3), padding='same', kernel_initializer=init)(g) g = Activation('tanh')(g) g = Conv2DTranspose(4*self.n_filters, (3,3), padding='same', kernel_initializer=init)(g) g = Activation('tanh')(g) g = Conv2DTranspose(4*self.n_filters, (5,5), strides=(2,2), padding='same', kernel_initializer=init)(g) g = Activation('tanh')(g) g = Conv2DTranspose(4*self.n_filters, (3,3), padding='same', kernel_initializer=init)(g) g = Activation('tanh')(g) g = Conv2DTranspose(4*self.n_filters, (5,5), padding='same', kernel_initializer=init)(g) g = Activation('tanh')(g) g = Conv2DTranspose(2*self.n_filters, (7,7), strides=(2,2), padding='same', kernel_initializer=init)(g) g = Activation('tanh')(g) # c7s1-3 g = Conv2D(1, (11,11), padding='same', kernel_initializer=init)(g) #g = InstanceNormalization(axis=-1)(g) out_image = Activation('tanh')(g) # define model model = Model(in_image, out_image) return model