Example #1
0
    def EmbeddingNet_classification(n1_features,
                                    n2_features,
                                    n_users,
                                    n_latent_factors_user=8,
                                    n_latent_factors_item=8,
                                    k=200,
                                    alpha=0.15,
                                    dropout=0.2,
                                    lr=0.005):
        # Embedding usera
        model1_in = Input(shape=(n1_features, ), name='useraInput')
        model1_out = Embedding(input_dim=n_users + 1,
                               output_dim=n_latent_factors_user)(model1_in)
        model1_out = Flatten()(model1_out)
        model1_out = Dropout(dropout)(model1_out)
        # Embedding userb
        model2_in = Input(shape=(n2_features, ), name='userbInput')
        model2_out = Embedding(input_dim=n_users + 1,
                               output_dim=n_latent_factors_item)(model2_in)
        model2_out = Flatten()(model2_out)
        model2_out = Dropout(dropout)(model2_out)
        # Merge embedding of usera and embeddingof userb
        model = concatenate([model1_out, model2_out], axis=-1)
        model = LeakyReLU(alpha=alpha)(model)
        model = Dropout(dropout)(model)

        # Deep Learning
        model = Dense(k)(model)
        model = LeakyReLU(alpha=alpha)(model)
        model = Dropout(dropout)(model)
        model = Dense(int(k / 2))(model)
        model = LeakyReLU(alpha=alpha)(model)
        model = Dropout(dropout)(model)
        model = Dense(int(k / 4))(model)
        model = LeakyReLU(alpha=alpha)(model)
        model = Dropout(dropout)(model)
        model = Dense(int(k / 10))(model)
        model = LeakyReLU(alpha=alpha)(model)
        model = Dense(2, activation='softmax')(model)

        model = Model([model1_in, model2_in], model)
        adam = Adam(lr=lr)
        model.compile(optimizer=adam,
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])
        return model
Example #2
0
 def create_discriminator():
     discriminator_input = Input(shape=(28, 28, 1))
     discriminator = Conv2D(65, 5, strides=(2, 2), padding='same')(discriminator_input)
     discriminator = LeakyReLU()(discriminator)
     discriminator = Dropout(0.4)(discriminator)
     discriminator = Conv2D(256, 5, strides=(2, 2), padding='same')(discriminator)
     discriminator = LeakyReLU()(discriminator)
     discriminator = Dropout(0.4)(discriminator)
     discriminator = Conv2D(512, 5, strides=(2, 2), padding='same')(discriminator)
     discriminator = LeakyReLU()(discriminator)
     discriminator = Dropout(0.4)(discriminator)
     discriminator = Flatten()(discriminator)
     discriminator_output = Dense(1, activation='sigmoid')(discriminator)
     discriminator = Model(discriminator_input, discriminator_output)
     discriminator.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
     discriminator.summary()
     return discriminator
Example #3
0
    def EmbeddingNetV3(n1_features,
                       n2_features,
                       n_latent_factors_user=5,
                       n_latent_factors_item=8,
                       n_users=743):
        model1_in = Input(shape=(n1_features, ), name='userInput')
        model1_out = Embedding(input_dim=n_users + 1,
                               output_dim=n_latent_factors_user)(model1_in)
        model1_out = Flatten()(model1_out)
        model1_out = Dropout(0.2)(model1_out)

        model2_in = Input(shape=(n2_features, ), name='itemInput')
        model2_out = Embedding(input_dim=n_users + 1,
                               output_dim=n_latent_factors_item)(model2_in)
        model2_out = Flatten()(model2_out)
        model2_out = Dropout(0.2)(model2_out)

        model = concatenate([model1_out, model2_out], axis=-1)
        model = LeakyReLU(alpha=0.15)(model)
        model = Dropout(0.2)(model)
        model = Dense(200)(model)
        model = LeakyReLU(alpha=0.15)(model)
        model = Dropout(0.2)(model)
        model = Dense(100)(model)
        model = LeakyReLU(alpha=0.15)(model)
        model = Dropout(0.2)(model)
        model = Dense(50)(model)
        model = LeakyReLU(alpha=0.15)(model)
        model = Dropout(0.2)(model)
        model = Dense(20)(model)
        model = LeakyReLU(alpha=0.15)(model)

        model = Dense(2, activation='softmax')(model)
        adam = Adam(lr=0.005)
        model = Model([model1_in, model2_in], model)
        model.compile(optimizer=adam,
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])
        return model
Example #4
0
 def build_discriminator(self):
     # Source image input
     source_img_in = Input(shape = self.image_shape)
     # Target image input
     target_img_in = Input(shape = self.image_shape)
     # concatenate images
     merged = Concatenate()([source_img_in, target_img_in])
     # C64
     model = Conv2D(64, (4,4), strides=(2,2), padding = "same", kernel_initializer = RandomNormal(stddev=0.02))(merged)
     model = LeakyReLU(alpha=0.2)(model)
     # C128
     model = Conv2D(128, (4,4), strides=(2,2), padding = "same", kernel_initializer = RandomNormal(stddev=0.02))(model)
     model = BatchNormalization()(model)
     model = LeakyReLU(alpha=0.2)(model)
     # C256
     model = Conv2D(256, (4,4), strides=(2,2), padding = "same", kernel_initializer = RandomNormal(stddev=0.02))(model)
     model = BatchNormalization()(model)
     model = LeakyReLU(alpha=0.2)(model)
     # C512
     model = Conv2D(512, (4,4), strides=(2,2), padding = "same", kernel_initializer = RandomNormal(stddev=0.02))(model)
     model = BatchNormalization()(model)
     model = LeakyReLU(alpha=0.2)(model)
     # Last layer
     model = Conv2D(256, (4,4), padding = "same", kernel_initializer = RandomNormal(stddev=0.02))(model)
     model = BatchNormalization()(model)
     model = LeakyReLU(alpha=0.2)(model)
     # Patch output
     model = Conv2D(1, (4,4), padding = "same", kernel_initializer = RandomNormal(stddev=0.02))(model)
     patch_out = Activation("sigmoid")(model)
     # Define model
     model = Model([source_img_in, target_img_in], patch_out)
     # Compile model
     opt = Adam(lr=0.0002, beta_2=0.5)
     model.compile(loss = "binary_crossentropy", optimizer=opt, loss_weights=[0.5])   
     
     return model