def deeper_conv_block(conv_layer, kernel_size, weighted=True): filter_shape = (kernel_size,) * 2 n_filters = conv_layer.filters weight = np.zeros((n_filters, n_filters) + filter_shape) center = tuple(map(lambda x: int((x - 1) / 2), filter_shape)) for i in range(n_filters): filter_weight = np.zeros((n_filters,) + filter_shape) index = (i,) + center filter_weight[index] = 1 weight[i, ...] = filter_weight bias = np.zeros(n_filters) new_conv_layer = StubConv(conv_layer.filters, n_filters, kernel_size=kernel_size) bn = StubBatchNormalization(n_filters) if weighted: new_conv_layer.set_weights((add_noise(weight, np.array([0, 1])), add_noise(bias, np.array([0, 1])))) new_weights = [add_noise(np.ones(n_filters, dtype=np.float32), np.array([0, 1])), add_noise(np.zeros(n_filters, dtype=np.float32), np.array([0, 1])), add_noise(np.zeros(n_filters, dtype=np.float32), np.array([0, 1])), add_noise(np.ones(n_filters, dtype=np.float32), np.array([0, 1]))] bn.set_weights(new_weights) return [StubReLU(), new_conv_layer, bn]
def get_conv_dense_model(): graph = Graph((32, 32, 3), False) output_node_id = 0 output_node_id = graph.add_layer(StubReLU(), output_node_id) output_node_id = graph.add_layer(StubConv(3, 3, 3), output_node_id) output_node_id = graph.add_layer(StubBatchNormalization(3), output_node_id) output_node_id = graph.add_layer(StubReLU(), output_node_id) output_node_id = graph.add_layer(StubConv(3, 3, 3), output_node_id) output_node_id = graph.add_layer(StubBatchNormalization(3), output_node_id) output_node_id = graph.add_layer(StubFlatten(), output_node_id) output_node_id = graph.add_layer(StubDropout(Constant.DENSE_DROPOUT_RATE), output_node_id) output_node_id = graph.add_layer(StubReLU(), output_node_id) output_node_id = graph.add_layer( StubDense(graph.node_list[output_node_id].shape[0], 5), output_node_id) output_node_id = graph.add_layer(StubReLU(), output_node_id) output_node_id = graph.add_layer(StubDense(5, 5), output_node_id) graph.add_layer(StubSoftmax(), output_node_id) graph.produce_model().set_weight_to_graph() return graph
def generate(self, model_len=Constant.MODEL_LEN, model_width=Constant.MODEL_WIDTH): pooling_len = int(model_len / 4) graph = Graph(self.input_shape, False) temp_input_channel = self.input_shape[-1] output_node_id = 0 for i in range(model_len): output_node_id = graph.add_layer(StubReLU(), output_node_id) output_node_id = graph.add_layer( StubConv(temp_input_channel, model_width, kernel_size=3), output_node_id) output_node_id = graph.add_layer( StubBatchNormalization(model_width), output_node_id) temp_input_channel = model_width if pooling_len == 0 or ((i + 1) % pooling_len == 0 and i != model_len - 1): output_node_id = graph.add_layer(StubPooling(), output_node_id) output_node_id = graph.add_layer(StubGlobalPooling(), output_node_id) output_node_id = graph.add_layer( StubDropout(Constant.CONV_DROPOUT_RATE), output_node_id) output_node_id = graph.add_layer( StubDense(graph.node_list[output_node_id].shape[0], model_width), output_node_id) output_node_id = graph.add_layer(StubReLU(), output_node_id) graph.add_layer(StubDense(model_width, self.n_output_node), output_node_id) return graph
def wider_bn(layer, start_dim, total_dim, n_add, weighted=True): if not weighted: return StubBatchNormalization(layer.num_features + n_add) weights = layer.get_weights() new_weights = [add_noise(np.ones(n_add, dtype=np.float32), np.array([0, 1])), add_noise(np.zeros(n_add, dtype=np.float32), np.array([0, 1])), add_noise(np.zeros(n_add, dtype=np.float32), np.array([0, 1])), add_noise(np.ones(n_add, dtype=np.float32), np.array([0, 1]))] student_w = tuple() for weight, new_weight in zip(weights, new_weights): temp_w = weight.copy() temp_w = np.concatenate((temp_w[:start_dim], new_weight, temp_w[start_dim:total_dim])) student_w += (temp_w,) new_layer = StubBatchNormalization(layer.num_features + n_add) new_layer.set_weights(student_w) return new_layer