def build(self, input_shape): """ The only function that is overloaded from the layer class. We Set bias to be trainable. :param input_shape: input tensor shape :return: none """ if self.lambda_single: self.scalar = self.add_weight( shape=(1, ), name="lambda", initializer="ones", dtype="float32", trainable=True, constraint=non_neg(), ) else: self.scalar = self.add_weight( shape=(self.num_conv, ), name="lambda", initializer="ones", dtype="float32", trainable=True, constraint=non_neg(), ) self.built = True
def __init__(self, latent_dim=100, nb_rows=28, nb_columns=28, nb_input_channels=1, one_channel_output=True, dropout_rate=None): """ Create and initialize an autoencoder. """ self.latent_dim = latent_dim self.nb_input_channels=nb_input_channels self.nb_rows=nb_rows self.nb_columns=nb_columns if one_channel_output: self.nb_output_channels=1 else: self.nb_output_channels=nb_input_channels input_img = Input(shape=(self.nb_rows, self.nb_columns, nb_input_channels)) # adapt this if using `channels_first` image data format x = Flatten()(input_img) encoded = Dense(latent_dim, activation='sigmoid')(x) self.encoder = Model(input_img, encoded, name='encoder') encoded_img = Input(shape=(self.latent_dim,)) if dropout_rate is None: x = MaxPlusDense(self.nb_rows*self.nb_columns*self.nb_output_channels, use_bias=False, kernel_constraint=constraints.non_neg())(encoded_img) else: x = Dropout(dropout_rate)(encoded_img) x = MaxPlusDense(self.nb_rows*self.nb_columns*self.nb_output_channels, use_bias=False, kernel_constraint=constraints.non_neg())(x) decoded = Reshape((self.nb_rows,self.nb_columns,self.nb_output_channels))(x) self.decoder = Model(encoded_img, decoded, name='decoder') encoded = self.encoder(input_img) decoded = self.decoder(encoded) self.autoencoder = Model(input_img, decoded) self.autoencoder.compile(optimizer='adadelta', loss='mean_squared_error', metrics=['mse'])
def Recmand_model(num_user, num_item, k): input_uer = Input(shape=[ None, ], dtype="int32") model_uer = Embedding( num_user + 1, k, input_length=1, embeddings_regularizer=regularizers.l2(0.001), #正则,下同 embeddings_constraint=non_neg() #非负,下同 )(input_uer) model_uer = Dense(k, activation="relu", use_bias=True)(model_uer) #激活函数 model_uer = Dropout(0.1)(model_uer) #Dropout 随机删去一些节点,防止过拟合 model_uer = Reshape((k, ))(model_uer) input_item = Input(shape=[ None, ], dtype="int32") model_item = Embedding(num_item + 1, k, input_length=1, embeddings_regularizer=regularizers.l2(0.001), embeddings_constraint=non_neg())(input_item) model_item = Dense(k, activation="relu", use_bias=True)(model_item) model_item = Dropout(0.1)(model_item) model_item = Reshape((k, ))(model_item) out = Dot(1)([model_uer, model_item]) #点积运算 model = Model(inputs=[input_uer, input_item], outputs=out) model.compile(loss='mse', optimizer='Adam') model.summary() return model
def NMF_image(n_users, n_items, n_factors): item_input = Input(shape=[1]) item_embedding = Embedding(n_items, n_factors, embeddings_regularizer=l2(1e-5), embeddings_constraint=non_neg())(item_input) item_vec = Flatten()(item_embedding) image_input = Input(shape=(224, 224, 3)) imgflow = tf.keras.layers.Conv2D(32, (3, 3), padding='same', activation='relu')(image_input) imgflow = tf.keras.layers.Conv2D(32, (3, 3), padding='same', activation='relu')(imgflow) imgflow = MaxPooling2D(pool_size=(4, 4))(imgflow) imgflow = Dropout(0.25)(imgflow) imgflow = tf.keras.layers.Conv2D(32, (3, 3), padding='same', activation='relu')(imgflow) imgflow = MaxPooling2D(pool_size=(4, 4))(imgflow) imgflow = Dropout(0.25)(imgflow) imgflow = tf.keras.layers.Conv2D(32, (3, 3), padding='same', activation='relu')(imgflow) imgflow = Dropout(0.25)(imgflow) imgflow = tf.keras.layers.Conv2D(32, (3, 3), padding='same', activation='relu')(imgflow) imgflow = Dropout(0.25)(imgflow) imgflow = Flatten()(imgflow) imgflow = Dense(512, activation='relu')(imgflow) imgflow = BatchNormalization()(imgflow) imgflow = Dense(256, activation='relu')(imgflow) imgflow = BatchNormalization()(imgflow) imgflow = Dense(128, activation='relu')(imgflow) Concat_1 = tf.keras.layers.concatenate(inputs=[item_vec, imgflow], axis=1) Concat_1 = Dense(n_factors, kernel_initializer='glorot_normal')(Concat_1) user_input = Input(shape=[1]) user_embedding = Embedding(n_users, n_factors, embeddings_regularizer=l2(1e-5), embeddings_constraint=non_neg())(user_input) user_vec = Flatten()(user_embedding) DotProduct = Dot(axes=1)([Concat_1, user_vec]) model = Model([user_input, item_input, image_input], DotProduct) model.compile(loss='mean_squared_error', optimizer="adam") return model
def gen_model(n_users, n_items, latent_dim, normalize): userInputLayer = layers.Input(shape=[1]) itemInputLayer = layers.Input(shape=[1]) if normalize is True: userVec = layers.Embedding(n_users, latent_dim, embeddings_initializer='random_normal', name='User_Embedding')(userInputLayer) itemVec = layers.Embedding(n_items, latent_dim, embeddings_initializer='random_normal', name='Movie_Embedding')(itemInputLayer) else: #non-negative matrix userVec = layers.Embedding( n_users, latent_dim, embeddings_initializer='random_normal', name='User_Embedding', embeddings_constraint=non_neg())(userInputLayer) itemVec = layers.Embedding( n_items, latent_dim, embeddings_initializer='random_normal', name='Movie_Embedding', embeddings_constraint=non_neg())(itemInputLayer) userBias = layers.Embedding(n_users, 1, embeddings_initializer='zeros')(userInputLayer) itemBias = layers.Embedding(n_items, 1, embeddings_initializer='zeros')(itemInputLayer) userVec = layers.Flatten()(userVec) userBias = layers.Flatten()(userBias) itemVec = layers.Flatten()(itemVec) itemBias = layers.Flatten()(itemBias) r_hat = layers.Dot(name='Dot', axes=1)([userVec, itemVec]) r_hat = layers.Add(name='Bias')([r_hat, userBias, itemBias]) #outputLayer = layers.Concatenate()([inputLayer_a, inputLayer_b]) #keras.layers.Concatenate(axis=-1) model = models.Model(inputs=[userInputLayer, itemInputLayer], outputs=r_hat) model.summary() model.compile(loss='mse', optimizer='adam') plot_model(model, to_file='tmp/model.png', show_shapes=True, show_layer_names=True) return model
def get_model(self): item_input = Input(shape=[1], name='Item') item_embedding = Embedding(self.num_tweets, self.n_latent_factors_item, name='Item-Embedding', embeddings_constraint=non_neg())(item_input) item_vec = Flatten(name='FlattenItem')(item_embedding) user_input = Input(shape=[1], name='User') user_vec = Flatten(name='FlattenUsers')( Embedding(self.num_users, self.n_latent_factors_user, name='User-Embedding', embeddings_constraint=non_neg())(user_input)) prod = dot([item_vec, user_vec], axes=1, name='DotProduct') return Model(input=[user_input, item_input], output=prod)
def build(self, input_shape): # Set the input dimensions as the output dimension of the conv layer assert len(input_shape) >= 2 self.num_data = input_shape[0] self.input_dim = self.tied_layer.output_shape[-1] self.input_spec = [InputSpec(min_ndim=2, axes={-1: self.input_dim})] # Set kernel from the tied layer self.kernel = K.reverse(self.tied_layer.kernel, axes=0) self.kernel = K.reverse(self.kernel, axes=1) self.kernel = K.reshape( self.kernel, ( self.kernel_size[0], self.kernel_size[1], self.tied_layer.output_shape[-1], 1, ), ) # Set bias from the lambda_value if self.lambda_single: self.bias = self.add_weight( shape=(1, ), initializer=self.bias_initializer, name="lambda", regularizer=self.bias_regularizer, trainable=self.lambda_trainable, constraint=non_neg(), ) else: self.bias = self.add_weight( shape=(self.tied_layer.output_shape[3], ), initializer=self.bias_initializer, name="lambda", regularizer=self.bias_regularizer, trainable=self.lambda_trainable, constraint=non_neg(), ) # noiseSTD self.noiseSTD = self.add_weight( shape=(1, ), initializer=self.bias_initializer, name="noiseSTD", regularizer=self.bias_regularizer, trainable=False, constraint=non_neg(), ) # Have to set build to True self.built = True
def build(self, input_shape): # Create a trainable weight variable for this layer. self.wp = self.add_weight(name='positive_weights', shape=(input_shape[1], self.output_dim), initializer=RandomUniform(minval=0, maxval=0.2), trainable=True, constraint=contraints.non_neg()) self.wn = self.add_weight(name='negative_weights', shape=(input_shape[1], self.output_dim), initializer=RandomUniform(minval=0, maxval=0.2), trainable=True, constraint=contraints.non_neg()) super(RandomLayer, self).build(input_shape)
def build_stream(monotonic): nfts = self.num_monotonic if monotonic else self.num_unconstrained input_ = KL.Input((self.num_features,)) n = self.num_unconstrained if monotonic: last_ = KL.Lambda(lambda x: x[:, n:],)(input_) else: last_ = KL.Lambda(lambda x: x[:, : n],)(input_) if nfts > 0: constraint = non_neg() if monotonic else None num_dense = self.dense_monotonic_num if monotonic else \ self.dense_unconstrained_num for d in range(num_dense): last_ = KL.Dense(self.dense_width, activation=self.dense_activation, use_bias=True, kernel_constraint=constraint, #kernel_regularizer=l2(self.l2), )(last_) if self.dropout is not None and self.dropout > 0.: last_ = KL.Dropout(self.dropout)(last_) submodel = Model([input_], [last_]) #submodel.summary() return submodel
def train_network_convex(self): x_train = (self.df_price - 50) / 100 y_train = 90 - self.revenue model = Sequential() model.add(Dense(units=100, input_dim=self.product_number)) model.add(Activation("sigmoid")) #add the constraint: kernel_constraint =non_neg() to ensure its convexity model.add(Dense(units=1, kernel_constraint=non_neg())) model.add(Activation("linear")) model.summary() model.compile(optimizer='adam', loss='mse', metrics=['mae']) model.fit(x_train, y_train, validation_split=0.2, epochs=300, batch_size=32) price_new = (np.random.randn(10, 10) * 10 + 25) / 100 # s = model.predict(price_new) # print(s) self.weights = np.array(model.get_weights()) self.weights[0] = self.weights[0].T price = np.ones(self.product_number) s = np.array( [self.weights[2][i][0] for i in range(len(self.weights[2]))])
def test_some_pairwise(self): feature_names = [ "f1", "f2", "f3", "f5", "f6", "f7", "f8", "f9", "f10", "f11" ] fm = DeepFM(model_features=[["f1"], ["f2"], [10], [10], [10], [10], [10], [10], [10], [10]], feature_dimensions=[ 100, 1, 100, 100, 100, 100, 100, 100, 100, 100 ], realval=[ False, True, False, False, False, False, False, False, False, False ], mask_zero=True, feature_names=feature_names, obj="nce") groups = zip(["f1"] * (len(feature_names) - 1), feature_names[1:]) print(groups) model = fm.build_model(10, dropout_layer=0.5, deep_out=True, deep_out_bias=False, deep_weight_groups=[groups], deep_kernel_constraint=non_neg()) try: from keras.utils import plot_model plot_model(model, to_file="some_pairwise.png") except: pass
def __init__(self, latent_dim=100, nb_rows=28, nb_columns=28, nb_input_channels=1, one_channel_output=True, sparsity_weight=0.1, sparsity_objective=0.1): """ Create a sparse shallow AE with the custom kl divergence regularizer, enforcing weights non negativity with Keras NonNeg constraint. Arguments: sparsity_weight: positive float - the weight of the sparsity cost. sparsity_objective: float between 0 and 1 - the sparsity parameter. """ self.latent_dim = latent_dim self.nb_rows=nb_rows self.nb_columns=nb_columns self.nb_input_channels=nb_input_channels if one_channel_output: self.nb_output_channels=1 else: self.nb_output_channels=nb_input_channels self.sparsity_weight = sparsity_weight self.sparsity_objective = sparsity_objective input_img = Input(shape=(self.nb_rows, self.nb_columns, nb_input_channels)) # adapt this if using `channels_first` image data format x = Flatten()(input_img) encoded = Dense(latent_dim, activation='sigmoid', activity_regularizer=custom_regularizers.KL_divergence_sum(beta=self.sparsity_weight, rho=self.sparsity_objective))(x) self.encoder = Model(input_img, encoded, name='encoder') encoded_img = Input(shape=(self.latent_dim,)) x = Dense(self.nb_rows*self.nb_columns*self.nb_output_channels, kernel_constraint=constraints.non_neg())(encoded_img) x = LeakyReLU(alpha=0.1)(x) decoded = Reshape((self.nb_rows,self.nb_columns,self.nb_output_channels))(x) self.decoder = Model(encoded_img, decoded, name='decoder') encoded = self.encoder(input_img) decoded = self.decoder(encoded) self.autoencoder = Model(input_img, decoded) self.autoencoder.compile(optimizer='adadelta', loss='mean_squared_error', metrics=['mse'])
def convex_model2(input_dim, output_dim): input = keras.layers.Input(shape=(input_dim, )) x0 = keras.layers.Dense(150, kernel_constraint=non_neg(), activation='relu')(input) x1 = keras.layers.Dense(150, kernel_constraint=non_neg(), activation='relu')(x0) direct1 = keras.layers.Dense(150, activation='relu')(input) x2 = keras.layers.Add()([x1, direct1]) x2 = keras.layers.Dense(30, kernel_constraint=non_neg(), activation='relu')(x2) out = keras.layers.Dense(output_dim, kernel_constraint=non_neg())(x2) model = keras.models.Model(inputs=input, outputs=out) return model
def build(self, input_shape): # Create a trainable weight variable for this layer. self.W = self.add_weight(name='highway', shape=(1, self.output_dim), initializer='uniform', constraint=non_neg(), trainable=True) super(HighwayWeights, self).build(input_shape) # Be sure to call this at the end
def get_model(self): item_input = Input(shape=[1], name='Item') item_embedding = Embedding(self.num_tweets, self.n_latent_factors_item, name='Item-Embedding', embeddings_constraint=non_neg())(item_input) item_vec = Flatten(name='FlattenItem')(item_embedding) user_input = Input(shape=[1], name='User') user_vec = Flatten(name='FlattenUsers')(Embedding( self.num_users, self.n_latent_factors_user, name='User-Embedding', embeddings_constraint=non_neg())(user_input)) prod = dot([item_vec, user_vec], axes=1, name='DotProduct') return Model(input=[user_input, item_input], output=prod)
def create_model(self, n_users, n_items): movie_input = keras.layers.Input(shape=[1], name='Item') movie_embedding = keras.layers.Embedding( n_items + 1, self.n_latent_factors_movie, name='Movie-Embedding')(movie_input) if (self.nonneg): movie_embedding = keras.layers.Embedding( n_items + 1, self.n_latent_factors_movie, name='Movie-Embedding', embeddings_constraint=non_neg())(movie_input) movie_vec = keras.layers.Flatten(name='FlattenMovies')(movie_embedding) movie_vec = keras.layers.Dropout(0.2)(movie_vec) user_input = keras.layers.Input(shape=[1], name='User') user_vec = keras.layers.Flatten(name='FlattenUsers')( keras.layers.Embedding(n_users + 1, self.n_latent_factors_user, name='User-Embedding')(user_input)) if (self.nonneg): user_vec = keras.layers.Flatten(name='FlattenUsers')( keras.layers.Embedding( n_users + 1, self.n_latent_factors_user, name='User-Embedding', embeddings_constraint=non_neg())(user_input)) user_vec = keras.layers.Dropout(0.2)(user_vec) concat = keras.layers.concatenate([movie_vec, user_vec], name='Concat') concat_dropout = keras.layers.Dropout(0.2)(concat) dense = keras.layers.Dense(200, name='FullyConnected')(concat) dropout_1 = keras.layers.Dropout(0.2, name='Dropout')(dense) dense_2 = keras.layers.Dense(100, name='FullyConnected-1')(concat) dropout_2 = keras.layers.Dropout(0.2, name='Dropout')(dense_2) dense_3 = keras.layers.Dense(50, name='FullyConnected-2')(dense_2) dropout_3 = keras.layers.Dropout(0.2, name='Dropout')(dense_3) dense_4 = keras.layers.Dense(20, name='FullyConnected-3', activation='relu')(dense_3) result = keras.layers.Dense(1, activation='relu', name='Activation')(dense_4) adam = Adam(lr=0.005) self.model = keras.Model([user_input, movie_input], result) self.model.compile(optimizer=adam, loss='mean_absolute_error')
def _create_embedding_user(self): self.user_input = keras.layers.Input(shape=[1], name='User') self.user_vec = keras.layers.Flatten(name='FlattenUsers')( keras.layers.Embedding(self.n_user + 1, self.n_latent_ftr, name='User-Embedding', embeddings_constraint=non_neg())( self.user_input)) self.user_vec = keras.layers.Dropout(0.2)(self.user_vec)
def build(self, input_shape): nb_feats = input_shape[-1] std_shape = (1, 1, 1, nb_feats) self.min_std = self.add_weight(shape=std_shape, initializer=Constant(self.min_std_val), name='min_std', constraint=non_neg()) self.built = True return
def build(self, unit, input_shape=None): self.prop_weights = self.add_weight( name='proportion-weights', shape=(unit,), initializer=RND_UNI, trainable=True, constraint=non_neg() ) super(Multiply, self).build(input_shape)
def normal_model(input_dim, output_dim): input = keras.layers.Input(shape=(input_dim, )) x0 = keras.layers.Dense(80, activation='relu')(input) x1 = keras.layers.Dense(50, activation='relu')(x0) x2 = keras.layers.Dense(30, activation='relu')(x1) out = keras.layers.Dense(output_dim, kernel_constraint=non_neg())(x2) model = keras.models.Model(inputs=input, outputs=out) return model
def arch8(): model = Sequential() model.add(Dense(512, input_dim=122, activation='relu')) model.add(Dropout(0.3)) model.add(Dense(32, activation='sigmoid')) model.add(Dropout(0.3)) model.add(Dense(1, kernel_constraint=non_neg(), activation='sigmoid')) return model
def build(self, input_shape): assert isinstance(input_shape, list) # Create a trainable weights variable for this layer. self.kernel1 = self.add_weight(name="modality_weight_1", shape=(1, ), initializer=constant(value=0.0), trainable=True, constraint=non_neg()) super(Linear, self).build(input_shape)
def create_model(self, n_users, n_items): user_id_input = keras.layers.Input(shape=[1], name='user') item_id_input = keras.layers.Input(shape=[1], name='item') user_embedding = keras.layers.Embedding( output_dim=100, input_dim=n_users + 1, input_length=1, name='user_embedding')(user_id_input) if (self.nonneg): user_embedding = keras.layers.Embedding( output_dim=100, input_dim=n_users + 1, input_length=1, name='user_embedding', embeddings_constraint=non_neg())(user_id_input) item_embedding = keras.layers.Embedding( output_dim=100, input_dim=n_items + 1, input_length=1, name='item_embedding')(item_id_input) if (self.nonneg): item_embedding = keras.layers.Embedding( output_dim=100, input_dim=n_items + 1, input_length=1, name='item_embedding', embeddings_constraint=non_neg())(item_id_input) user_vecs = keras.layers.Flatten()(user_embedding) item_vecs = keras.layers.Flatten()(item_embedding) user_dropout = keras.layers.Dropout(0.2, name="user_dropout")(user_vecs) item_dropout = keras.layers.Dropout(0.2, name="item_dropout")(item_vecs) y = keras.layers.dot([user_dropout, item_dropout], axes=1) self.model = keras.models.Model(inputs=[user_id_input, item_id_input], outputs=[y]) self.model.compile(optimizer='adam', loss='mae')
def _create_embedding_item(self): self.item_input = keras.layers.Input(shape=[1], name='Item') self.item_embedding = keras.layers.Embedding( self.n_item + 1, self.n_latent_ftr, name='Item-Embedding', embeddings_constraint=non_neg())(self.item_input) self.item_vec = keras.layers.Flatten(name='FlattenItems')( self.item_embedding) self.item_vec = keras.layers.Dropout(0.2)(self.item_vec)
def build(self, input_shape): assert len(input_shape[0]) >= 2 input_dim = input_shape[0][-1] self.mu = self.add_weight(shape=(self.num_comp, input_dim), initializer=initializers.TruncatedNormal( mean=0.0, stddev=0.1), name='mu') self.s = self.add_weight(shape=(self.num_comp, ), initializer='ones', name='s', constraint=constraints.non_neg())
def __init__(self, alpha_initializer='ones', alpha_regularizer=None, alpha_constraint=constraints.non_neg(), beta_initializer='ones', beta_regularizer=None, beta_constraint=constraints.non_neg(), shared_axes=None, **kwargs): super(PTReLU, self).__init__(**kwargs) self.supports_masking = True self.alpha_initializer = initializers.get(alpha_initializer) self.alpha_regularizer = regularizers.get(alpha_regularizer) self.alpha_constraint = constraints.get(alpha_constraint) self.beta_initializer = initializers.get(beta_initializer) self.beta_regularizer = regularizers.get(beta_regularizer) self.beta_constraint = constraints.get(beta_constraint) if shared_axes is None: self.shared_axes = None elif not isinstance(shared_axes, (list, tuple)): self.shared_axes = [shared_axes] else: self.shared_axes = list(shared_axes)
def build(self, input_shape): # Create trainable weights for this layer. # This layer simulates a mosaic function by applying a per-pixel dot product on input channels. # The total number of weights is equal to the input image shape (img_width * img_height * img_channels) print("input_shape: " + str(input_shape)) self.cfa = self.add_weight(name='colorFilter', shape=(input_shape[1], input_shape[2], input_shape[3]), initializer=constant(value=0.5), trainable=True, constraint=non_neg()) super(MosaicLayer, self).build(input_shape)
def test_all_pairwise(self): feature_names = [ "f1", "f2", "f3", "f5", "f6", "f7", "f8", "f9", "f10", "f11" ] fm = DeepFM(model_features=[[ "f1", ], ["f2"], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2]], feature_dimensions=[ 100, 1, 100, 100, 100, 100, 100, 100, 100, 100 ], realval=[ False, True, False, False, False, False, False, False, False, False ], mask_zero=True, feature_names=feature_names, obj="ns") model = fm.build_model(10, dropout_layer=0.5, deep_out=True, deep_out_bias=False, deep_kernel_constraint=non_neg()) model.compile(loss='binary_crossentropy', metrics=['accuracy'], optimizer=tf.train.AdamOptimizer()) model.fit(x=[ np.array([0]), np.array([0]), np.array([[51, 2]]), np.array([[0, 0]]), np.array([[25, 1]]), np.array([[0, 0]]), np.array([[17, 1]]), np.array([[1, 1]]), np.array([[1, 1]]), np.array([[0, 0]]) ], y=np.array([0])) try: from keras.utils import plot_model plot_model(model, to_file="all_pairwise.png") except: pass
def build(self, input_shape): """ The only function that is overloaded from the Dense layer class. We Set bias to be trainable. :param input_shape: input tensor shape :return: none """ # Create a trainable weight variable for this layer. self.bias = self.add_weight( shape=(self.num_conv, ), initializer=self.bias_initializer, name="bias", regularizer=self.bias_regularizer, trainable=True, constraint=non_neg(), ) self.built = True
def __init__(self, latent_dim=100, nb_rows=28, nb_columns=28, nb_input_channels=1, one_channel_output=True): """ Create a shallow AE with the Keras Non Negativity Constraint. """ self.latent_dim = latent_dim self.nb_input_channels = nb_input_channels self.nb_rows = nb_rows self.nb_columns = nb_columns if one_channel_output: self.nb_output_channels = 1 else: self.nb_output_channels = nb_input_channels input_img = Input( shape=(self.nb_rows, self.nb_columns, self.nb_input_channels )) # adapt this if using `channels_first` image data format x = Conv2D(64, (4, 4), strides=(2, 2), padding='same')(input_img) x = LeakyReLU(alpha=0.1)(x) x = Conv2D(128, (4, 4), strides=(2, 2), padding='same')(x) x = BatchNormalization()(x) x = LeakyReLU(alpha=0.1)(x) x = Flatten()(x) x = Dense(1024)(x) x = BatchNormalization()(x) x = LeakyReLU(alpha=0.1)(x) encoded = Dense(self.latent_dim, activation='sigmoid')(x) self.encoder = Model(input_img, encoded, name='encoder') encoded_img = Input(shape=(self.latent_dim, )) x = Dense(self.nb_rows * self.nb_columns * self.nb_output_channels, kernel_constraint=constraints.non_neg())(encoded_img) x = LeakyReLU(alpha=0.1)(x) decoded = Reshape( (self.nb_rows, self.nb_columns, self.nb_output_channels))(x) self.decoder = Model(encoded_img, decoded, name='decoder') encoded = self.encoder(input_img) decoded = self.decoder(encoded) self.autoencoder = Model(input_img, decoded) self.autoencoder.compile(optimizer='adadelta', loss='mean_squared_error', metrics=['mse'])
def make_model(self): x = Input(shape=(self.look_back, )) ar_output = Dense(units=1, kernel_initializer='uniform', kernel_constraint=unit_norm(), name='ar-weights')(x) pre_point = Lambda(lambda k: k[:, -1:])(x) merged_output = concatenate([ar_output, pre_point]) outputs = Dense(units=1, kernel_initializer=RND_UNI, use_bias=False, kernel_constraint=non_neg(), name='contrib-weights')(merged_output) model = Model(inputs=x, outputs=outputs) model.compile('Adam', 'mae') return model
def test_non_neg(): non_neg_instance = constraints.non_neg() normed = non_neg_instance(K.variable(get_example_array())) assert(np.all(np.min(K.eval(normed), axis=1) == 0.))
rate_num = len(rate) user_num = len(user_dict) book_num = len(book_dict) if args.use_implicit: feedback_u, feedback_b = get_feedback(user_all, book_all, user_num, book_num) feedback_u, feedback_b = pad_sequences(feedback_u), pad_sequences(feedback_b) print('Data prepared') # Model u_input = Input(shape=[1], name='user') if not args.nmf: U = Embedding(user_num, args.emb_dim, input_length=1, embeddings_initializer="random_normal", name='user_embed')(u_input) else: U = Embedding(user_num, args.emb_dim, input_length=1, embeddings_initializer="random_normal", embeddings_constraint=non_neg(), name='user_embed')(u_input) U = Dropout(0.3)(U) U = Flatten()(U) b_input = Input(shape=[1], name='book') if not args.nmf: B = Embedding(book_num, args.emb_dim, input_length=1, embeddings_initializer="random_normal", name='book_embed')(b_input) else: B = Embedding(book_num, args.emb_dim, input_length=1, embeddings_initializer="random_normal", embeddings_constraint=non_neg(), name='book_embed')(b_input) B = Dropout(0.3)(B) B = Flatten()(B) U_bias = Embedding(user_num, 1, input_length=1, embeddings_initializer="zeros", name='user_embed_bias')(u_input)