示例#1
0
def task4(learning_rate=0.001,momentum=0.9, epochs=1, batch_size=None):
	label_tr = pd.read_csv("fairface_label_train.csv")
	label_te = pd.read_csv("fairface_label_val.csv")

	# Load image dataset 
	x_train, yg_train = load_img_dataset_with_label(label_tr, "gender")
	x_test, yg_test = load_img_dataset_with_label(label_te, "gender")

	yr_train = label_tr['race'].astype('category').cat.codes
	yr_test  = label_te["race"].astype('category').cat.codes

	# Normalize using MinMax Scalar
	x_tr = MinMaxScaling(x_train)
	x_te = MinMaxScaling(x_test)

	# Add another dimension for channel 
	x_te = np.expand_dims(x_te,axis=3)
	x_tr = np.expand_dims(x_tr,axis=3)

	# Label encoding
	encoder = LabelEncoder()
	encoder.fit(yg_train)
	yg_tr = encoder.transform(yg_train)
	yg_te =  encoder.transform(yg_test)

	encoder.fit(yr_train)
	yr_tr = encoder.transform(yr_train)
	yr_te =  encoder.transform(yr_test)

	# Task 4: Your own ConvNet on both tasks simultaneously 
	input = layers.Input(shape=(32,32,1))
	conv1 = layers.Conv2D(filters=32,kernel_size=7,strides=1,
	                    padding='same',activation='relu',
	                    name='conv1')(input)
	max1 = layers.MaxPool2D(pool_size=(3,3),strides=(2,2),
	                        padding="same",name='pool1')(conv1)
	lrn1 = tf.keras.layers.Lambda(
	                    tf.nn.local_response_normalization)(max1)                  
	conv2 = layers.Conv2D(filters=64,kernel_size=(1,1),
	            padding="same",strides=1,activation="relu")(lrn1)
	conv3 = layers.Conv2D(filters=192,kernel_size=(3,3),
	            padding="same",strides=1,activation="relu")(conv2)              
	max2 = layers.MaxPool2D(pool_size=(3,3),strides=(2,2),
	            padding="same")(conv3)
示例#2
0
def task5(learning_rate=0.001, momentum=0.9, epochs=1, batch_size=None):
    label_tr = pd.read_csv("fairface_label_train.csv")
    label_te = pd.read_csv("fairface_label_val.csv")

    # Load image dataset
    x_train, y_train = load_img_dataset_with_label(label_tr, "gender")
    x_test, y_test = load_img_dataset_with_label(label_te, "gender")

    # Normalize using MinMax Scalar
    x_tr = MinMaxScaling(x_train)
    x_te = MinMaxScaling(x_test)

    # Add another dimension for channel
    x_te = np.expand_dims(x_te, axis=3)
    x_tr = np.expand_dims(x_tr, axis=3)

    # Label encoding
    encoder = LabelEncoder()
    encoder.fit(y_train)
    y_tr = encoder.transform(y_train)
    y_te = encoder.transform(y_test)

    # reparameterization trick
    # instead of sampling from Q(z|X), sample epsilon = N(0,I)
    # z = z_mean + sqrt(var) * epsilon
    from tensorflow.keras import backend as K

    def sampling(args):
        """Reparameterization trick by sampling from an isotropic unit Gaussian.
         # Arguments
             args (tensor): mean and log of variance of Q(z|X)
         # Returns
             z (tensor): sampled latent vector
         """
        #Extract mean and log of variance
        z_mean, z_log_var = args
        #get batch size and length of vector (size of latent space)
        batch = K.shape(z_mean)[0]
        dim = K.int_shape(z_mean)[1]

        # by default, random_normal has mean = 0 and std = 1.0
        epsilon = K.random_normal(shape=(batch, dim))
        #Return sampled number (need to raise var to correct power)
        return z_mean + K.exp(z_log_var) * epsilon

    # Task 5: Variational Auto Encoder (VAE)
    # encoder
    latent_dim = 5
    inputs = Input(shape=(32, 32, 1), name='encoder_input')
    encoder_hl1 = layers.Conv2D(32,
                                kernel_size=7,
                                strides=1,
                                activation='relu',
                                name='encoder_hl1',
                                padding='same')(inputs)
    encoder_hl2 = layers.Conv2D(64,
                                kernel_size=3,
                                strides=1,
                                activation='relu',
                                name='encoder_hl2',
                                padding='same')(encoder_hl1)
    encoder_flatten = layers.Flatten()(encoder_hl2)
    z_mean = layers.Dense(latent_dim, name='z_mean')(encoder_flatten)
    z_log_var = layers.Dense(latent_dim, name='z_log_var')(encoder_flatten)

    z = layers.Lambda(sampling, name='z')([z_mean, z_log_var])
    encoder = keras.Model(inputs, [z_mean, z_log_var, z],
                          name='encoder_output')
    print(encoder.summary())

    # decoder
    latent_inputs = Input(shape=(latent_dim, ), name="z_sampling")
    decoder_dense = layers.Dense(32 * 32 * 64,
                                 activation='relu')(latent_inputs)
    decoder_reshape = layers.Reshape((32, 32, 64))(decoder_dense)
    decoder_hl1 = layers.Conv2DTranspose(64,
                                         kernel_size=3,
                                         strides=1,
                                         activation='relu',
                                         name='decoder_hl1',
                                         padding='same')(decoder_reshape)
    decoder_hl2 = layers.Conv2DTranspose(32,
                                         kernel_size=7,
                                         strides=1,
                                         activation='relu',
                                         name='decoder_hl2',
                                         padding='same')(decoder_hl1)
    decoder_outputs = layers.Conv2DTranspose(1,
                                             kernel_size=1,
                                             activation="sigmoid",
                                             padding='same')(decoder_hl2)

    decoder = keras.Model(latent_inputs, decoder_outputs, name="decoder")
    print(decoder.summary())

    outputs = decoder(encoder(inputs)[2])
    vae = keras.Model(inputs, outputs, name='vae_mlp')

    #setting loss
    reconstruction_loss = keras.losses.mse(inputs, outputs)
    reconstruction_loss *= 1
    # kl_loss = K.exp(z_log_var) + K.square(z_mean) - z_log_var - 1
    # kl_loss = K.sum(kl_loss, axis=-1)
    # kl_loss *= 0.001
    kl_loss = 0
    vae_loss = K.mean(reconstruction_loss + kl_loss)
    vae.add_loss(vae_loss)
    vae.compile(optimizer=optimizers.SGD(learning_rate=learning_rate,
                                         momentum=momentum),
                metrics=['accuracy'])

    # Fit the model
    history = vae.fit(x=x_tr,
                      y=x_tr,
                      epochs=epochs,
                      batch_size=batch_size,
                      shuffle=True,
                      verbose=1)

    print(vae.summary())

    # Predict using the model
    pred = vae.predict(x_te)

    # Plot 10 random latent vectors
    plt.rcParams["axes.grid"] = False

    randomlist = [random.randint(0, len(x_te)) for i in range(10)]

    for i in randomlist:
        plt.imshow(pred[i][:, :, 0])
        plt.show()
示例#3
0
def task3g(learning_rate=0.001, momentum=0.9, epochs=1, batch_size=None):
    label_tr = pd.read_csv("fairface_label_train.csv")
    label_te = pd.read_csv("fairface_label_val.csv")

    x_train, y_train = load_img_dataset_with_label(label_tr, "gender")
    x_test, y_test = load_img_dataset_with_label(label_te, "gender")

    # Normalize using MinMax Scalar
    x_tr = MinMaxScaling(x_train)
    x_te = MinMaxScaling(x_test)

    # Add another dimension for channel
    x_te = np.expand_dims(x_te, axis=3)
    x_tr = np.expand_dims(x_tr, axis=3)

    # Label encoding
    encoder = LabelEncoder()
    encoder.fit(y_train)
    y_tr = encoder.transform(y_train)
    y_te = encoder.transform(y_test)

    # Task 3: Your Own ConvNet (Gender)
    model = Sequential()
    model.add(Conv2D(32, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(Conv2D(32, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Conv2D(64, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(Conv2D(64, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(2, activation='softmax'))

    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer=optimizers.SGD(learning_rate=learning_rate,
                                           momentum=momentum),
                  metrics=['accuracy'])

    # Fit the model
    history = model.fit(x=x_tr,
                        y=y_tr,
                        epochs=epochs,
                        batch_size=batch_size,
                        validation_data=(x_te, y_te),
                        verbose=1)

    print(model.summary())

    # Predict using the model
    pred = model.predict(x_te)
    predicted_class_indices = np.argmax(pred, axis=1)
    predicted_class_indices.shape
    labels = {0: "Female", 1: "Male"}
    predictions = [labels[k] for k in predicted_class_indices]

    # Confusion matrix
    data = {'y_Actual': label_te['gender'], 'y_Predicted': predictions}

    df_cm = pd.DataFrame(data, columns=['y_Actual', 'y_Predicted'])
    confusion_matrix = pd.crosstab(df_cm['y_Actual'],
                                   df_cm['y_Predicted'],
                                   rownames=['Actual'],
                                   colnames=['Predicted'])
    print(confusion_matrix)

    sns.heatmap(confusion_matrix, annot=True)
    plt.show()

    # Accuracy-vs-epoch and loss-vs-epoch
    plt.figure(figsize=[12.5, 4])
    plt.subplot(1, 2, 1)
    plt.plot(history.history['loss'], '-.*', label='train')
    plt.plot(history.history['val_loss'], '-.*', label='val')
    plt.xlabel("Epoch")
    plt.ylabel("Binary Cross Entropy loss")
    plt.title("Task 3: Loss vs epoch (gender)")
    plt.legend()

    plt.subplot(1, 2, 2)
    plt.plot(history.history['accuracy'], '-.*', label='train')
    plt.plot(history.history['val_accuracy'], '-.*', label='val')
    plt.xlabel("Epoch")
    plt.ylabel("Accuracy (%)")
    plt.title("Task 3: Accuracy vs epoch (gender)")
    plt.legend()
    plt.show()