def build(config, classes, softmax=True, scale_adjust_wb=None):
        model = Sequential()
        input_shape = config.input_shape

        # 1. CONV => RELU => BN => POOL
        model.add(
            Conv2D(48, (7, 7), activation='relu', input_shape=input_shape))
        # model.add(BatchNormalization())
        model.add(MaxPooling2D(pool_size=(2, 2)))

        # 2. CONV => RELU => BN => POOL
        model.add(Conv2D(96, (5, 5), activation='relu'))
        # model.add(BatchNormalization())
        model.add(MaxPooling2D(pool_size=(2, 2)))

        # 3. CONV => RELU => BN => POOL
        model.add(Conv2D(128, (3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        # 4. CONV => RELU => BN => POOL
        model.add(Conv2D(96, (3, 3), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        # 5. CONV => RELU => BN => POOL
        model.add(Conv2D(64, (1, 1), activation='relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(Dropout(0.3))
        model.add(Flatten())

        model.add(Dense(256, activation='relu'))
        # model.add(BatchNormalization())
        model.add(Dropout(0.5))

        model.add(Dense(16, activation='relu'))

        # final layer
        model.add(Dense(classes))
        if softmax:
            model.add(Activation("softmax"))
        if scale_adjust_wb is not None:
            # The below line doesn't save/load well as this is a custom object. Thus replaced by dense layer
            # model.add(Lambda(lambda x: scale_adjust_wb[0] * x + scale_adjust_wb[1]))
            input_shape = (None, classes)
            scale_layer = Dense(
                classes,
                trainable=False,
                input_shape=input_shape,
            )
            scale_layer.build(input_shape=input_shape)
            scale_layer.set_weights(
                [np.diag(scale_adjust_wb[0]), scale_adjust_wb[1]])
            model.add(scale_layer)

        # return the constructed network architecture
        return model
Esempio n. 2
0
class VAE_Encoder(Model):
    def __init__(self, latent_num):
        super(VAE_Encoder, self).__init__()
        self.latent_num = latent_num
        self.conv_layers = [  # (32, 32, 3)
            Conv2D(filters=32,
                   kernel_size=(4, 4),
                   strides=(2, 2),
                   activation='relu'),  # (16, 16, 32)
            Conv2D(filters=64,
                   kernel_size=(4, 4),
                   strides=(2, 2),
                   activation='relu'),  # (6, 6, 64)
            Conv2D(filters=128, kernel_size=(3, 3),
                   activation='relu'),  # (4, 4, 128)
            Conv2D(filters=256, kernel_size=(3, 3),
                   activation='relu'),  # (2, 2, 256)
        ]
        self.flatten = Flatten()
        self.mean_out = Dense(latent_num, name='mean')
        # self.std_out = Dense(latent_num, activation='sigmoid', name='std')

    def __call__(self, x):
        #returns out, mean, std
        m, s = self.encode(x)
        # if sampling:
        # noise = np.random.normal(size=self.latent_num)
        # z = m + s * noise
        # else:
        z = m
        return z, m, s

    def forward(self, x):
        m, _ = self.encode(x)
        return m

    def encode(self, x):
        for layer in self.conv_layers:
            x = layer(x)
        x = self.flatten(x)
        m = self.mean_out(x)
        # s = self.std_out(x)
        return m, None

    def set_weights_by_list(self, l):
        for i, layer in enumerate(self.conv_layers):
            layer.set_weights(l[i])
        self.mean_out.set_weights(l[-2])
        self.std_out.set_weights(l[-1])

    def save(self, dir, name):
        self.save_weights(dir + name + '.h5')
 def _create_dense_layer(self, _, normalized_weights, num_classes):
     input_shape = tf.TensorShape([None, 512])
     dense_layer = Dense(
         input_shape=(512, ),
         units=num_classes,
         use_bias=False,
         name='fully_connected_to_softmax_crossentropy',
         dtype='float32',
         trainable=False,
     )
     dense_layer.build(input_shape)
     dense_layer.set_weights([normalized_weights.read_value()])
     return dense_layer
Esempio n. 4
0
def sparse_fc_mapping(x, input_idxs):

    num_units = len(input_idxs)
    d = Dense(num_units, use_bias=False)
    d.trainable = False
    x = d(x)

    w = d.get_weights()
    w[0].fill(0)
    for i in range(num_units):
        w[0][input_idxs[i], i] = 1.
    d.set_weights(w)

    return x
Esempio n. 5
0
    def wider(self, added_size=1, pos_layer=None):
        layers_size = len(self.layers)
        if layers_size < 2:
            raise ValueError("Number of layer must be greater than 2.")
        if pos_layer is None:
            pos_layer = max(layers_size - 2, 0)
        elif pos_layer >= layers_size - 1 or pos_layer < 0:
            raise ValueError(
                f"pos_layer is expected less than length of layers (pos_layer in [0, layers_size-2])"
            )

        # TODO: get biggest value to divide for new weights
        weights, bias = self.layers[pos_layer].get_weights()
        weights_next_layer, bias_next_layer = self.layers[pos_layer +
                                                          1].get_weights()

        new_weights, new_bias, new_weights_next_layer = net2wider(
            weights, bias, weights_next_layer, added_size)

        src_units, des_units = weights.shape[0], weights.shape[1] + added_size
        next_des_units = weights_next_layer.shape[1]

        wider_layer = Dense(units=des_units,
                            activation=tf.nn.relu,
                            kernel_regularizer=regularizers.l1_l2(l1=self.l1,
                                                                  l2=self.l2))

        # input_shape = (batch_size, input_features).
        # input_features = number of units in layer = length(layer) = output of previous layer
        wider_layer.build(input_shape=(None, src_units))
        wider_layer.set_weights([new_weights, new_bias])

        next_layer = Dense(units=next_des_units,
                           activation=tf.nn.relu,
                           kernel_regularizer=regularizers.l1_l2(l1=self.l1,
                                                                 l2=self.l2))
        next_layer.build(input_shape=(None, des_units))
        next_layer.set_weights([new_weights_next_layer, bias_next_layer])

        self.layers[pos_layer] = wider_layer
        self.layers[pos_layer + 1] = next_layer
Esempio n. 6
0
 def create_keras_model(self, nclasses, warming_up=False):
     input = Input(shape=self.nfeatures)
     x = input
     classifier = Dense(
         nclasses - 1, use_bias=True, kernel_initializer='he_normal',
         bias_initializer='zeros', input_shape=K.int_shape(x)[-1:],
         kernel_regularizer=regularizers.l2(self.mu)
     )
     output = classifier(x)
     if warming_up:
         kernel_values = self.sklearn_model.coef_.T
         kernel_values = np.concatenate((np.mean(-1. * kernel_values, axis=-1)[:, None], kernel_values), axis=-1)
         beta = self.sklearn_model.intercept_
         classifier.set_weights([kernel_values, beta])
     self.model = Model(input, output)
     self.output_shape = self.model.output_shape
     self.output = self.model.output
     self.layers = self.model.layers
     self.input = self.model.input
     optimizer = optimizers.SGD(0.1)
     self.model.compile(loss=self.loss_function('square_hinge'), optimizer=optimizer, metrics=['acc'])
Esempio n. 7
0
    def deeper(self, pos_layer=None):
        layers_size = len(self.layers)
        if pos_layer is None:
            pos_layer = max(layers_size - 2, 0)
        elif pos_layer >= layers_size - 1 or pos_layer < 0:
            raise ValueError(
                f"pos_layer is expected less than length of layers (pos_layer in [0, layers_size-2])."
            )

        weights, bias = self.layers[pos_layer].get_weights()
        new_weights, new_bias = net2deeper(weights)
        des_units = weights.shape[1]
        # TODO: add initial kernel
        layer = Dense(
            units=des_units,
            activation=tf.nn.relu,
            kernel_regularizer=regularizers.l1_l2(l1=self.l1, l2=self.l2),
        )
        layer.build(input_shape=(None, des_units))
        layer.set_weights([new_weights, new_bias])

        self.layers.insert(pos_layer + 1, layer)
Esempio n. 8
0
    def last_insert_layer(self, layer_dim):
        prev_weights, prev_bias = self.layers[len(self.layers) -
                                              1].get_weights()
        prev_units = prev_weights.shape[1]

        replace_prev_layer = Dense(
            units=prev_units,
            activation=tf.nn.relu,
            kernel_regularizer=regularizers.l1_l2(l1=self.l1, l2=self.l2),
        )
        replace_prev_layer.build(input_shape=(None, prev_weights.shape[0]))
        replace_prev_layer.set_weights([prev_weights, prev_bias])

        added_layer = Dense(
            units=layer_dim,
            activation=tf.nn.sigmoid,
            kernel_regularizer=regularizers.l1_l2(l1=self.l1, l2=self.l2),
            kernel_initializer=initializers.GlorotNormal(seed=self.seed),
            bias_initializer=initializers.Zeros())
        added_layer.build(input_shape=(None, prev_units))

        del self.layers[len(self.layers) - 1]
        self.layers.append(replace_prev_layer)
        self.layers.append(added_layer)
Esempio n. 9
0
class CNNLayerwise(tf.keras.Model):
    def __init__(self, hyperp, run_options, data_input_shape, label_dimensions, num_channels, kernel_regularizer, bias_regularizer):
        super(CNNLayerwise, self).__init__()
###############################################################################
#                  Construct Initial Neural Network Architecture               #
###############################################################################
        #=== Defining Attributes ===#
        self.data_input_shape = data_input_shape
        self.architecture = [] # storage for layer information, each entry is [filter_size, num_filters]
        self.num_filters = hyperp.num_filters
        self.kernel_size = hyperp.filter_size
        self.activation = hyperp.activation
        self.kernel_regularizer = kernel_regularizer
        self.bias_regularizer = bias_regularizer
        self.hidden_layers_list = [] # This will be a list of Keras layers

        #=== Define Initial Architecture and Create Layer Storage ===#
        self.architecture.append([self.data_input_shape[0], num_channels]) # input information
        self.architecture.append([3, self.num_filters]) # 3x3 convolutional layer for upsampling data
        self.architecture.append([hyperp.filter_size, self.num_filters]) # First hidden layer
        self.architecture.append([3, num_channels]) # 3x3 convolutional layer for downsampling features
        self.architecture.append(label_dimensions) # fully-connected output layer
        print(self.architecture)
        
        #=== Weights and Biases Initializer ===#
        kernel_initializer = RandomNormal(mean=0.0, stddev=0.05)
        bias_initializer = 'zeros'
        
        #=== Linear Upsampling Layer to Map to Feature Space ===#
        l = 1
        self.upsampling_layer = Conv2D(self.architecture[l][1], (3, 3), padding = 'same',
                                       activation = 'linear', use_bias = True,
                                       input_shape = self.data_input_shape,
                                       kernel_initializer = kernel_initializer, bias_initializer = bias_initializer,
                                       kernel_regularizer = self.kernel_regularizer, bias_regularizer = self.bias_regularizer,
                                       name='upsampling_layer')
        
        #=== Define Hidden Layers ===#
        l = 2
        conv_layer = Conv2D(self.architecture[l][1], (self.architecture[l][0], self.architecture[l][0]), padding = 'same', 
                            activation = self.activation, use_bias = True, 
                            input_shape = (None, self.data_input_shape[0], self.data_input_shape[1], self.num_filters),
                            kernel_initializer = kernel_initializer, bias_initializer = bias_initializer,
                            kernel_regularizer = self.kernel_regularizer, bias_regularizer = self.bias_regularizer,
                            name = "W" + str(l))
        self.hidden_layers_list.append(conv_layer)
            
            
        #=== Linear Downsampling Layer to Map to Data Space ===#
        l = 3
        self.downsampling_layer = Conv2D(self.architecture[l][1], (3, 3), padding = 'same',
                                         activation = "linear", use_bias = True,
                                         input_shape = (None, self.data_input_shape[0], self.data_input_shape[1], self.num_filters),
                                         kernel_initializer = kernel_initializer, bias_initializer = bias_initializer,
                                         kernel_regularizer = self.kernel_regularizer, bias_regularizer = self.bias_regularizer,
                                         name = "downsampling_layer")
        
        #=== Classification Layer ===#
        self.classification_layer = Dense(units = label_dimensions,
                                          activation = 'linear', use_bias = True,
                                          kernel_initializer = kernel_initializer, bias_initializer = bias_initializer,
                                          kernel_regularizer = self.kernel_regularizer, bias_regularizer = self.bias_regularizer,
                                          name = 'classification_layer')
        
###############################################################################
#                            Network Propagation                              #
############################################################################### 
    def call(self, inputs):
        #=== Upsampling ===#
        output = self.upsampling_layer(inputs)  
        for hidden_layer in self.hidden_layers_list:
            #=== Hidden Layers ===#
            prev_output = output
            output = prev_output + hidden_layer(output)
        #=== Downsampling ===#
        output = self.downsampling_layer(output)
        #=== Classification ===#
        output = Flatten()(output)
        output = self.classification_layer(output)
        return output
    
###############################################################################
#                                 Add Layer                                   #
###############################################################################     
    def add_layer(self, trainable_hidden_layer_index, freeze = True, add = True):
        kernel_initializer = 'zeros'
        bias_initializer = 'zeros'
        if add:
            conv_layer = Conv2D(self.num_filters, (self.kernel_size, self.kernel_size), padding = 'same',
                                activation = self.activation, use_bias = True,
                                input_shape = (None, self.data_input_shape[0], self.data_input_shape[1], self.num_filters),
                                kernel_initializer = kernel_initializer, bias_initializer = bias_initializer,
                                kernel_regularizer = self.kernel_regularizer, bias_regularizer = self.bias_regularizer,
                                name = "W" + str(trainable_hidden_layer_index))
            self.hidden_layers_list.append(conv_layer)
        if freeze:
            self.upsampling_layer.trainable = False
            for index in range(0, trainable_hidden_layer_index-2):
              self.hidden_layers_list[index].trainable = False
        else:
            self.upsampling_layer.trainable = True
            for index in range(0, trainable_hidden_layer_index-2):
              self.hidden_layers_list[index].trainable = True
              
###############################################################################
#                              Sparsify Weights                               #
###############################################################################            
    def sparsify_weights_and_get_relative_number_of_zeros(self, threshold = 1e-6):
        #=== Downsampling Layer ===#
        down_weights = self.downsampling_layer.get_weights()        
        sparsified_weights = self.sparsify_weights(down_weights, threshold)
        self.downsampling_layer.set_weights(sparsified_weights)
        
        #=== Classification Layer ===#
        class_weights = self.classification_layer.get_weights()
        sparsified_weights = self.sparsify_weights(class_weights, threshold)
        self.classification_layer.set_weights(sparsified_weights)
        
        #=== Trained Hidden Layer ===#
        trained_weights = self.hidden_layers_list[-1].get_weights()
        sparsified_weights = self.sparsify_weights(trained_weights, threshold)
        self.hidden_layers_list[-1].set_weights(sparsified_weights)
        
        #=== Compute Relative Number of Zeros ===#
        total_number_of_zeros = 0
        total_number_of_elements = 0
        for i in range(0, len(sparsified_weights)):
            total_number_of_zeros += np.count_nonzero(sparsified_weights[i]==0)
            total_number_of_elements += sparsified_weights[i].flatten().shape[0]
        relative_number_zeros = np.float64(total_number_of_zeros/total_number_of_elements)
        
        return relative_number_zeros
    
    def sparsify_weights(self, weights, threshold = 1e-6):
        sparsified_weights = []
        if isinstance(weights, float):
            if abs(weights) > threshold:
                sparsified_weights = weights
            else:
                sparsified_weights = 0
        else:
            for w in weights:
                bool_mask = (abs(w) > threshold).astype(int)
                sparsified_weights.append(w*bool_mask)
            
        return sparsified_weights
Esempio n. 10
0
              EarlyStopping(monitor='val_loss',
                            patience=20,
                            restore_best_weights=True)
          ],
          validation_data=(X_train_val, y_labels_train_val))

new_weights = np.empty([100, 37])
biases = outLayer.get_weights()[1]
for i in range(len(outLayer.get_weights()[0])):
    weights = outLayer.get_weights()[0][i]
    for j in range(len(weights)):
        if j not in (32, 33, 34, 35, 36):  #subtypes
            weights[j] = 0
    new_weights[i] = weights

outLayer.set_weights([new_weights, biases])

score = model.evaluate(X_test, y_labels_test)
print(model.predict(X_test).argmax(axis=1))
conf_matrix = pd.DataFrame(
    confusion_matrix(y_labels_test.values.argmax(axis=1),
                     model.predict(X_test).argmax(axis=1)))

conf_matrix.to_csv(
    "../results/fully_con/all_cancer/feed_forward_300_100_conf_matrix_test.csv"
)

classify_df = classify_df.append({"accuracy": score[1]}, ignore_index=True)
print(score)
scores.append(score[1])
Esempio n. 11
0
class DenseApproximator(Approximator):
    def __init__(self, num_layers, num_units, output_dim, internal_dim,
                 activation):
        super().__init__(output_dim, internal_dim)
        self.activation = activation

        self.dense_layers = []
        self.batch_layers = []

        for _ in range(num_layers - 1):
            self.dense_layers.append(Dense(units=num_units, use_bias=False))
            self.batch_layers.append(BatchNormalization())

        self.output_layer = Dense(units=output_dim + internal_dim)

    def _evaluate_layer(self, x, dense, batch, training):
        x = dense(x, training=training)
        x = batch(x, training=training)
        return self.activation(x)

    def _call(self, inputs, training=False):
        """Implementation of call for Strategy.
        Args:
            inputs: (batch_size, None)
            training: bool
        Returns:
            output: see Strategy._call
        """
        for dense, batch in zip(self.dense_layers, self.batch_layers):
            inputs = self._evaluate_layer(inputs, dense, batch, training)
        output = self.output_layer(inputs, training=training)

        return output

    def initialise(self, inputs, sample_size):
        batch_size = tf.shape(inputs)[0]

        iterator = zip(self.dense_layers, self.batch_layers)
        for k, (dense, batch) in enumerate(iterator):
            sample_idx = tf.random.shuffle(tf.range(batch_size))[:sample_size]
            sample = tf.gather(inputs, sample_idx, axis=0)

            for i in tf.range(k + 1):
                sample = self._evaluate_layer(sample, self.dense_layers[i],
                                              self.batch_layers[i], False)
            mean, variance = tf.nn.moments(sample, 0)

            # dense.set_weights([dense.get_weights()[0] / tf.sqrt(variance)])
            batch.set_weights([
                batch.get_weights()[0] / tf.sqrt(variance),
                (batch.get_weights()[1] - mean) / tf.sqrt(variance),
                batch.get_weights()[2],
                batch.get_weights()[3]
            ])

        sample_idx = tf.random.shuffle(tf.range(batch_size))[:sample_size]
        sample = tf.gather(inputs, sample_idx, axis=0)
        sample = self._call(sample, False)
        mean, variance = tf.nn.moments(sample, 0)
        self.output_layer.set_weights([
            self.output_layer.get_weights()[0] / tf.sqrt(variance),
            self.output_layer.get_weights()[1]
        ])
Esempio n. 12
0
class AlexnetModel(Model):
    def __init__(self):
        super(AlexnetModel, self).__init__()

        # OPS
        self.relu = Activation('relu')
        self.maxpool = MaxPooling2D(pool_size=(3, 3),
                                    strides=(2, 2),
                                    padding='valid')
        # self.dropout = Dropout(0.4)
        self.softmax = Activation('softmax', )

        # Conv layers
        self.conv1 = Conv2D(name="conv1",
                            filters=96,
                            input_shape=(224, 224, 3),
                            kernel_size=(11, 11),
                            strides=(4, 4),
                            padding='same')
        self.conv2a = Conv2D(name="conv2a",
                             filters=128,
                             kernel_size=(5, 5),
                             strides=(1, 1),
                             padding='same')
        self.conv2b = Conv2D(name="conv2b",
                             filters=128,
                             kernel_size=(5, 5),
                             strides=(1, 1),
                             padding='same')
        self.conv3 = Conv2D(name="conv3",
                            filters=384,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            padding='same')
        self.conv4a = Conv2D(name="conv4a",
                             filters=192,
                             kernel_size=(3, 3),
                             strides=(1, 1),
                             padding='same')
        self.conv4b = Conv2D(name="conv4b",
                             filters=192,
                             kernel_size=(3, 3),
                             strides=(1, 1),
                             padding='same')
        self.conv5a = Conv2D(name="conv5a",
                             filters=128,
                             kernel_size=(3, 3),
                             strides=(1, 1),
                             padding='same')
        self.conv5b = Conv2D(name="conv5b",
                             filters=128,
                             kernel_size=(3, 3),
                             strides=(1, 1),
                             padding='same')

        # Fully-connected layers

        self.flatten = Flatten()

        self.dense1 = Dense(4096, name="dense1", input_shape=(100, ))
        self.dense2 = Dense(4096, name="dense2")
        self.dense3 = Dense(1000, name="dense3")

    def setAlexnetWeights(self, wdir):
        self.conv1.set_weights(
            (np.load(wdir + 'conv1.npy'), np.load(wdir + 'conv1b.npy')))
        self.conv2a.set_weights(
            (np.load(wdir + 'conv2_a.npy'), np.load(wdir + 'conv2b_a.npy')))
        self.conv2b.set_weights(
            (np.load(wdir + 'conv2_b.npy'), np.load(wdir + 'conv2b_b.npy')))
        self.conv3.set_weights(
            (np.load(wdir + 'conv3.npy'), np.load(wdir + 'conv3b.npy')))
        self.conv4a.set_weights(
            (np.load(wdir + 'conv4_a.npy'), np.load(wdir + 'conv4b_a.npy')))
        self.conv5a.set_weights(
            (np.load(wdir + 'conv5_a.npy'), np.load(wdir + 'conv5b_a.npy')))
        self.conv4b.set_weights(
            (np.load(wdir + 'conv4_b.npy'), np.load(wdir + 'conv4b_b.npy')))
        self.conv5b.set_weights(
            (np.load(wdir + 'conv5_b.npy'), np.load(wdir + 'conv5b_b.npy')))

        self.dense1.set_weights(
            (np.load(wdir + 'dense1.npy'), np.load(wdir + 'dense1b.npy')))
        self.dense2.set_weights(
            (np.load(wdir + 'dense2.npy'), np.load(wdir + 'dense2b.npy')))
        self.dense3.set_weights(
            (np.load(wdir + 'dense3.npy'), np.load(wdir + 'dense3b.npy')))

    def getOutputAtLAyer(self, layer: AlexnetLayers):
        return self.get_layer(layer.name).output

        # Network definition

    def call(self, x, **kwargs):
        layers_by_name = dict()

        x = self.conv1(x)
        x = add_to_dict("conv1", self.relu(x), layers_by_name)

        x = tf.nn.local_response_normalization(x,
                                               depth_radius=2,
                                               alpha=2e-05,
                                               beta=0.75,
                                               bias=1.0)
        x = self.maxpool(x)

        x = tf.concat(
            (self.conv2a(x[:, :, :, :48]), self.conv2b(x[:, :, :, 48:])), 3)
        x = add_to_dict("conv2", self.relu(x), layers_by_name)

        x = tf.nn.local_response_normalization(x,
                                               depth_radius=2,
                                               alpha=2e-05,
                                               beta=0.75,
                                               bias=1.0)
        x = self.maxpool(x)

        x = self.conv3(x)
        x = add_to_dict("conv3", self.relu(x), layers_by_name)
        x = tf.concat(
            (self.conv4a(x[:, :, :, :192]), self.conv4b(x[:, :, :, 192:])), 3)
        x = add_to_dict("conv4", self.relu(x), layers_by_name)

        x = tf.concat(
            (self.conv5a(x[:, :, :, :192]), self.conv5b(x[:, :, :, 192:])), 3)
        x = add_to_dict("conv5", self.relu(x), layers_by_name)
        x = self.maxpool(x)

        x = self.flatten(x)

        x = self.dense1(x)
        x = add_to_dict("dense1", self.relu(x), layers_by_name)
        x = self.dense2(x)
        x = add_to_dict("dense2", self.relu(x), layers_by_name)
        x = self.dense3(x)
        x = add_to_dict("dense3", self.relu(x), layers_by_name)
        x = add_to_dict("softmax", self.softmax(x), layers_by_name)
        return x, layers_by_name
Esempio n. 13
0
class AlexNet(tf.keras.Model):
    def __init__(self, dropout_rate):
        super(AlexNet, self).__init__()
        self.dropout_rate = dropout_rate

        self.conv1 = Conv2D(filters=96,
                            kernel_size=(11, 11),
                            strides=(4, 4),
                            padding="valid",
                            name="conv1")
        # self.norm1 = tf.nn.local_response_normalization()
        self.pool1 = MaxPool2D(pool_size=(3, 3),
                               strides=(2, 2),
                               padding="valid",
                               name="pool1")

        self.conv2_1 = Conv2D(filters=int(256 / 2),
                              kernel_size=(5, 5),
                              strides=(1, 1),
                              padding="same",
                              name="conv2_1")
        self.conv2_2 = Conv2D(filters=int(256 / 2),
                              kernel_size=(5, 5),
                              strides=(1, 1),
                              padding="same",
                              name="conv2_2")

        self.pool2 = MaxPool2D(pool_size=(3, 3),
                               strides=(2, 2),
                               padding="valid",
                               name="pool2")

        self.conv3 = Conv2D(filters=384,
                            kernel_size=(3, 3),
                            strides=(1, 1),
                            padding="same",
                            name="conv3")

        self.conv4_1 = Conv2D(filters=int(384 / 2),
                              kernel_size=(3, 3),
                              strides=(1, 1),
                              padding="same",
                              name="conv4_1")
        self.conv4_2 = Conv2D(filters=int(384 / 2),
                              kernel_size=(3, 3),
                              strides=(1, 1),
                              padding="same",
                              name="conv4_2")

        self.conv5_1 = Conv2D(filters=int(256 / 2),
                              kernel_size=(3, 3),
                              strides=(1, 1),
                              padding="same",
                              name="conv5_1")
        self.conv5_2 = Conv2D(filters=int(256 / 2),
                              kernel_size=(3, 3),
                              strides=(1, 1),
                              padding="same",
                              name="conv5_2")
        self.pool5 = MaxPool2D(pool_size=(3, 3),
                               strides=(2, 2),
                               padding="valid",
                               name="pool5")

        self.flat6 = Flatten()
        self.fc6 = Dense(units=4096,
                         activation=tf.nn.relu,
                         use_bias=True,
                         name="fc6")
        self.dropout6 = Dropout(rate=self.dropout_rate, name="dropout6")

        self.fc7 = Dense(units=4096,
                         activation=tf.nn.relu,
                         use_bias=True,
                         name="fc7")
        self.dropout7 = Dropout(rate=self.dropout_rate, name="dropout7")

    def load_weights(self, weights_path):
        weights_dict = np.load(weights_path, encoding='bytes').item()

        assert "conv1" in weights_dict and len(weights_dict["conv1"]) == 2
        self.conv1.set_weights(weights_dict["conv1"])

        conv2_weights = np.split(weights_dict["conv2"][0],
                                 indices_or_sections=2,
                                 axis=-1)  # [5, 5, 48, 128] * 2
        conv2_bias = np.split(weights_dict["conv2"][1],
                              indices_or_sections=2,
                              axis=-1)
        self.conv2_1.set_weights([conv2_weights[0], conv2_bias[0]])
        self.conv2_2.set_weights([conv2_weights[1], conv2_bias[1]])

        self.conv3.set_weights(weights_dict["conv3"])

        conv4_weights = np.split(weights_dict["conv4"][0],
                                 indices_or_sections=2,
                                 axis=-1)  # [3, 3, 192, 192] * 2
        conv4_bias = np.split(weights_dict["conv4"][1],
                              indices_or_sections=2,
                              axis=-1)
        self.conv4_1.set_weights([conv4_weights[0], conv4_bias[0]])
        self.conv4_2.set_weights([conv4_weights[1], conv4_bias[1]])

        conv5_weights = np.split(weights_dict["conv5"][0],
                                 indices_or_sections=2,
                                 axis=-1)  # [5, 5, 192, 128] * 2
        conv5_bias = np.split(weights_dict["conv5"][1],
                              indices_or_sections=2,
                              axis=-1)
        self.conv5_1.set_weights([conv5_weights[0], conv5_bias[0]])
        self.conv5_2.set_weights([conv5_weights[1], conv5_bias[1]])

        assert "fc6" in weights_dict and len(weights_dict["fc6"]) == 2
        self.fc6.set_weights(weights_dict["fc6"])

        assert "fc7" in weights_dict and len(weights_dict["fc7"]) == 2
        self.fc7.set_weights(weights_dict["fc7"])

    def call(self, x, training, mask=None):
        # conv1
        x = self.conv1(x)
        x = tf.nn.local_response_normalization(x,
                                               depth_radius=2,
                                               alpha=2e-05,
                                               beta=0.02,
                                               bias=1,
                                               name="norm1")
        x = self.pool1(x)
        # conv2
        x = tf.split(x, num_or_size_splits=2, axis=-1)
        x = tf.concat([self.conv2_1(x[0]), self.conv2_2(x[1])], axis=-1)
        x = tf.nn.local_response_normalization(x,
                                               depth_radius=2,
                                               alpha=2e-05,
                                               beta=0.02,
                                               bias=1,
                                               name="norm2")
        x = self.pool2(x)

        # conv3
        x = self.conv3(x)

        # conv4
        x = tf.split(x, num_or_size_splits=2, axis=-1)
        x = tf.concat([self.conv4_1(x[0]), self.conv4_2(x[1])], axis=-1)

        # conv5
        x = tf.split(x, num_or_size_splits=2, axis=-1)
        x = tf.concat([self.conv5_1(x[0]), self.conv5_2(x[1])], axis=-1)
        x = self.pool5(x)
        # fc6
        x = self.flat6(x)
        x = self.fc6(x)
        x = self.dropout6(x, training=training)

        # fc7
        x = self.fc7(x)
        x = self.dropout7(x, training=training)

        return x
Esempio n. 14
0
class SRFR(Model):
    def __init__(
            self,
            num_filters: int = 62,
            depth: int = 50,
            categories: int = 512,
            num_gc: int = 32,
            num_blocks: int = 23,
            residual_scailing: float = 0.2,
            training: bool = True,
            input_shape=(28, 28, 3),
            num_classes_syn: int = None,
            both: bool = False,
            num_classes_nat: int = None,
            scale: int = 64,
        ):
        super(SRFR, self).__init__()
        self._training = training
        self.scale = scale
        if both:
            self._natural_input = Conv2D(
                input_shape=input_shape,
                filters=num_filters,
                kernel_size=(3, 3),
                strides=1,
                padding='same',
                name='natural_input',
                activation=mish,
            )
        self._synthetic_input = Conv2D(
            input_shape=input_shape,
            filters=num_filters,
            kernel_size=(3, 3),
            strides=1,
            padding='same',
            name='synthetic_input',
            activation=mish,
        )
        self._super_resolution = GeneratorNetwork(
            num_filters,
            num_gc,
            num_blocks,
            residual_scailing,
        )
        self._face_recognition = ResNet(
            depth,
            categories,
            training
        )
        if self._training:
            if both:
                self._fc_classification_nat = Dense(
                    input_shape=(categories,),
                    units=num_classes_nat,
                    activation=None,
                    use_bias=False,
                    dtype='float32',
                    name='fully_connected_to_softmax_crossentropy_nat',
                )
                self._fc_classification_nat.build(tf.TensorShape([None, 512]))
                self.net_type = 'nat'
            self._fc_classification_syn: Dense = Dense(
                input_shape=(categories,),
                units=num_classes_syn,
                activation=None,
                use_bias=False,
                dtype='float32',
                name='fully_connected_to_softmax_crossentropy_syn',
            )
            self._fc_classification_syn.build(tf.TensorShape([None, 512]))

    @tf.function
    def _call_evaluating(self, input_tensor, input_type: str = 'nat'):
        if input_type == 'syn':
            outputs = self._synthetic_input(input_tensor)
        else:
            outputs = self._natural_input(input_tensor)
        super_resolution_image = self._super_resolution(outputs)
        embeddings = self._face_recognition(super_resolution_image)
        return super_resolution_image, embeddings

    def _calculate_normalized_embeddings(self, embeddings,
                                         net_type: str = 'syn'):
        fc_weights = self.get_weights(net_type)
        normalized_weights = tf.Variable(
            normalize(fc_weights, name='weights_normalization'),
            aggregation=tf.VariableAggregation.NONE,
        )
        normalized_embeddings = normalize(
            embeddings, axis=1, name='embeddings_normalization') * self.scale
        replica = tf.distribute.get_replica_context()
        replica.merge_call(self.set_weights,
                           args=(normalized_weights, net_type))
        return self.call_fc_classification(normalized_embeddings, net_type)

    def _call_training(self, synthetic_images, natural_images=None):
        synthetic_outputs = self._synthetic_input(synthetic_images)
        synthetic_sr_images = self._super_resolution(synthetic_outputs)
        synthetic_embeddings = self._face_recognition(synthetic_sr_images)
        synthetic_embeddings = self._calculate_normalized_embeddings(
            synthetic_embeddings
        )
        if natural_images:
            natural_outputs = self._natural_input(natural_images)
            natural_sr_images = self._super_resolution(natural_outputs)
            natural_embeddings = self._face_recognition(natural_sr_images)
            natural_embeddings = self._calculate_normalized_embeddings(
                natural_embeddings
            )
            return (
                synthetic_sr_images,
                synthetic_embeddings,
                natural_sr_images,
                natural_embeddings,
            )

        return synthetic_sr_images, synthetic_embeddings

    def call(self, input_tensor_01, input_tensor_02=None,
             training: bool = True, input_type: str = 'nat'):
        if training:
            return self._call_training(input_tensor_01, input_tensor_02)

        return self._call_evaluating(input_tensor_01, input_type)

    def get_weights(self, net_type: str = 'syn'):
        if net_type == 'nat':
            return self._fc_classification_nat.get_weights()
        return self._fc_classification_syn.get_weights()

    def set_weights(self, _, weights, net_type: str = 'syn') -> None:
        if net_type == 'nat':
            self._fc_classification_nat.set_weights([weights.read_value()])
        else:
            self._fc_classification_syn.set_weights([weights.read_value()])

    def call_fc_classification(self, input, net_type: str = 'syn'):
        if net_type == 'nat':
            return self._fc_classification_nat(input)
        return self._fc_classification_syn(input)
Esempio n. 15
0
class MyModel(Model):
    def __init__(self):
        super(MyModel, self).__init__()
        self.conv1 = Conv2D(32,
                            3,
                            activation='relu',
                            autocast=False,
                            dtype=tf.float32)
        self.flatten = Flatten(autocast=False)
        self.d1 = Dense(128,
                        activation='relu',
                        autocast=False,
                        dtype=tf.float32)
        self.d2 = Dense(10,
                        activation='softmax',
                        autocast=False,
                        dtype=tf.float32)
        self.loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
        #自定义优化器
        self.optimizer = tf.keras.optimizers.SGD()
        self.train_loss = tf.keras.metrics.Mean(name='train_loss')
        self.train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
            name='train_accuracy')
        self.test_loss = tf.keras.metrics.Mean(name='test_loss')
        self.test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
            name='test_accuracy')

    # 训练步骤
    @tf.function
    def train_step_(self, images, labels):
        with tf.GradientTape() as tape:
            predictions = self(images)
            loss = self.loss_object(labels, predictions)
        gradients = tape.gradient(loss, self.trainable_variables)
        self.optimizer.apply_gradients(zip(gradients,
                                           self.trainable_variables))
        self.train_loss(loss)
        self.train_accuracy(labels, predictions)

    # 测试步骤
    @tf.function
    def test_step_(self, images, labels):
        predictions = self(images)
        t_loss = self.loss_object(labels, predictions)
        self.test_loss(t_loss)
        self.test_accuracy(labels, predictions)

    def test(self):
        self.test_loss.reset_states()
        self.test_accuracy.reset_states()
        for test_images, test_labels in self.test_ds:
            self.test_step_(test_images, test_labels)
        template = 'Test Loss: {}, Test Accuracy: {}'
        print(
            template.format(self.test_loss.result(),
                            self.test_accuracy.result() * 100))
        return self.test_accuracy.result() * 100

    # 具体的计算call
    def call(self, x):
        x = self.conv1(x)
        x = self.flatten(x)
        x = self.d1(x)
        return self.d2(x)

    # 训练
    def train_test(self, epoch_data_weight, index):
        for epoch in range(epoch_data_weight):
            # 在下一个epoch开始时,重置评估指标
            self.train_loss.reset_states()
            self.train_accuracy.reset_states()
            # self.test_loss.reset_states()
            # self.test_accuracy.reset_states()

            for images, labels in self.train_ds:
                self.train_step_(images, labels)
            # 先去掉测试集
            # for test_images, test_labels in self.test_ds:
            #     self.test_step_(test_images, test_labels)

            template = 'index {} Epoch {}, Loss: {}, Accuracy: {}'
            print(
                template.format(index, epoch + 1, self.train_loss.result(),
                                self.train_accuracy.result() * 100))

    def get_weights(self):
        '''

        :return: data_listq
        '''
        weight = []
        weight.append(self.conv1.get_weights())
        weight.append(self.d1.get_weights())
        weight.append(self.d2.get_weights())
        return weight
        # 对应每层的权重设置

    def set_weights(self, weight):
        self.conv1.set_weights(weights=weight[0])
        self.d1.set_weights(weights=weight[1])
        self.d2.set_weights(weights=weight[2])

    # 为每个模型设置数据集
    def set_data(self, train_ds, test_ds=None):
        self.train_ds = train_ds
        self.test_ds = test_ds