def wider_weighted_add(layer, n_add, weighted=True): if not weighted: return StubAdd() n_add += 0 new_layer = StubAdd() new_layer.set_weights(layer.get_weights()) return new_layer
def get_add_skip_model(): graph = Graph((5, 5, 3), False) output_node_id = 0 output_node_id = graph.add_layer(StubReLU(), output_node_id) output_node_id = graph.add_layer(StubConv(3, 3, 3), output_node_id) output_node_id = graph.add_layer(StubBatchNormalization(3), output_node_id) output_node_id = graph.add_layer(StubDropout(constant.CONV_DROPOUT_RATE), output_node_id) output_node_id = graph.add_layer(StubReLU(), output_node_id) output_node_id = graph.add_layer(StubConv(3, 3, 3), output_node_id) output_node_id = graph.add_layer(StubBatchNormalization(3), output_node_id) output_node_id = graph.add_layer(StubDropout(constant.CONV_DROPOUT_RATE), output_node_id) temp_node_id = output_node_id output_node_id = graph.add_layer(StubReLU(), output_node_id) output_node_id = graph.add_layer(StubConv(3, 3, 3), output_node_id) output_node_id = graph.add_layer(StubBatchNormalization(3), output_node_id) output_node_id = graph.add_layer(StubDropout(constant.CONV_DROPOUT_RATE), output_node_id) temp_node_id = graph.add_layer(StubConv(3, 3, 1), temp_node_id) output_node_id = graph.add_layer(StubAdd(), [output_node_id, temp_node_id]) temp_node_id = output_node_id output_node_id = graph.add_layer(StubReLU(), output_node_id) output_node_id = graph.add_layer(StubConv(3, 3, 3), output_node_id) output_node_id = graph.add_layer(StubBatchNormalization(3), output_node_id) output_node_id = graph.add_layer(StubDropout(constant.CONV_DROPOUT_RATE), output_node_id) temp_node_id = graph.add_layer(StubConv(3, 3, 1), temp_node_id) output_node_id = graph.add_layer(StubAdd(), [output_node_id, temp_node_id]) output_node_id = graph.add_layer(StubReLU(), output_node_id) output_node_id = graph.add_layer(StubConv(3, 3, 3), output_node_id) output_node_id = graph.add_layer(StubBatchNormalization(3), output_node_id) output_node_id = graph.add_layer(StubDropout(constant.CONV_DROPOUT_RATE), output_node_id) output_node_id = graph.add_layer(StubFlatten(), output_node_id) output_node_id = graph.add_layer(StubReLU(), output_node_id) output_node_id = graph.add_layer(StubDense(graph.node_list[output_node_id].shape[0], 5), output_node_id) output_node_id = graph.add_layer(StubDropout(constant.DENSE_DROPOUT_RATE), output_node_id) output_node_id = graph.add_layer(StubReLU(), output_node_id) output_node_id = graph.add_layer(StubDense(5, 5), output_node_id) output_node_id = graph.add_layer(StubDropout(constant.DENSE_DROPOUT_RATE), output_node_id) graph.add_layer(StubSoftmax(), output_node_id) graph.produce_model().set_weight_to_graph() return graph
def to_add_skip_model(self, start_id, end_id): """Add a weighted add skip-connection from after start node to end node. Args: start_id: The convolutional layer ID, after which to start the skip-connection. end_id: The convolutional layer ID, after which to end the skip-connection. """ self.operation_history.append(('to_add_skip_model', start_id, end_id)) conv_block_input_id = self._conv_block_end_node(start_id) conv_block_input_id = self.adj_list[conv_block_input_id][0][0] block_last_layer_input_id = self._conv_block_end_node(end_id) # Add the pooling layer chain. layer_list = self._get_pooling_layers(conv_block_input_id, block_last_layer_input_id) skip_output_id = conv_block_input_id for index, layer_id in enumerate(layer_list): skip_output_id = self.add_layer(deepcopy(self.layer_list[layer_id]), skip_output_id) # Add the conv layer new_relu_layer = StubReLU() skip_output_id = self.add_layer(new_relu_layer, skip_output_id) new_conv_layer = StubConv(self.layer_list[start_id].filters, self.layer_list[end_id].filters, 1) skip_output_id = self.add_layer(new_conv_layer, skip_output_id) new_bn_layer = StubBatchNormalization(self.layer_list[end_id].filters) skip_output_id = self.add_layer(new_bn_layer, skip_output_id) # Add the add layer. block_last_layer_output_id = self.adj_list[block_last_layer_input_id][0][0] add_input_node_id = self._add_node(deepcopy(self.node_list[block_last_layer_output_id])) add_layer = StubAdd() self._redirect_edge(block_last_layer_input_id, block_last_layer_output_id, add_input_node_id) self._add_edge(add_layer, add_input_node_id, block_last_layer_output_id) self._add_edge(add_layer, skip_output_id, block_last_layer_output_id) add_layer.input = [self.node_list[add_input_node_id], self.node_list[skip_output_id]] add_layer.output = self.node_list[block_last_layer_output_id] self.node_list[block_last_layer_output_id].shape = add_layer.output_shape # Set weights to the additional conv layer. if self.weighted: filters_end = self.layer_list[end_id].filters filters_start = self.layer_list[start_id].filters filter_shape = (1,) * (len(self.layer_list[end_id].get_weights()[0].shape) - 2) weights = np.zeros((filters_end, filters_start) + filter_shape) bias = np.zeros(filters_end) new_conv_layer.set_weights((add_noise(weights, np.array([0, 1])), add_noise(bias, np.array([0, 1])))) n_filters = filters_end new_weights = [add_noise(np.ones(n_filters, dtype=np.float32), np.array([0, 1])), add_noise(np.zeros(n_filters, dtype=np.float32), np.array([0, 1])), add_noise(np.zeros(n_filters, dtype=np.float32), np.array([0, 1])), add_noise(np.ones(n_filters, dtype=np.float32), np.array([0, 1]))] new_bn_layer.set_weights(new_weights)
def to_add_skip_model(self, start_id, end_id): """Add a weighted add skip connection from after start node to end node. Args: start_id: The convolutional layer ID, after which to start the skip-connection. end_id: The convolutional layer ID, after which to end the skip-connection. """ self.operation_history.append(('to_add_skip_model', start_id, end_id)) conv_block_input_id = self._conv_block_end_node(start_id) conv_block_input_id = self.adj_list[conv_block_input_id][0][0] dropout_input_id = self._conv_block_end_node(end_id) # Add the pooling layer chain. layer_list = self._get_pooling_layers(conv_block_input_id, dropout_input_id) skip_output_id = conv_block_input_id for index, layer_id in enumerate(layer_list): skip_output_id = self.add_layer( deepcopy(self.layer_list[layer_id]), skip_output_id) # Add the conv layer new_conv_layer = StubConv(self.layer_list[start_id].filters, self.layer_list[end_id].filters, 1) skip_output_id = self.add_layer(new_conv_layer, skip_output_id) # Add the add layer. dropout_output_id = self.adj_list[dropout_input_id][0][0] add_input_node_id = self._add_node( deepcopy(self.node_list[dropout_output_id])) add_layer = StubAdd() self._redirect_edge(dropout_input_id, dropout_output_id, add_input_node_id) self._add_edge(add_layer, add_input_node_id, dropout_output_id) self._add_edge(add_layer, skip_output_id, dropout_output_id) add_layer.input = [ self.node_list[add_input_node_id], self.node_list[skip_output_id] ] add_layer.output = self.node_list[dropout_output_id] self.node_list[dropout_output_id].shape = add_layer.output_shape # Set weights to the additional conv layer. if self.weighted: filters_end = self.layer_list[end_id].filters filters_start = self.layer_list[start_id].filters filter_shape = (1, ) * ( len(self.layer_list[end_id].get_weights()[0].shape) - 2) weights = np.zeros((filters_end, filters_start) + filter_shape) bias = np.zeros(filters_end) new_conv_layer.set_weights((add_noise(weights, np.array([0, 1])), add_noise(bias, np.array([0, 1]))))