def call(self, inputs): outputs = [] if self.data_format == 'channels_first': count = 0 for c in range(self.input_spec.axes[1]): input = inputs[:, c:c+1, ...] for d in range(self.depth_multiplier): output = K.conv3d(input , self.depthwise_kernels[count] , padding=self.padding , data_format=self.data_format , dilation_rate=self.dilation_rate) if self.use_bias: output = K.bias_add(output , self.biases[count] , data_format=self.data_format) outputs.append(output) count +=1 outputs = K.concatenate(outputs, axis=1) else: count = 0 for c in range(self.input_spec.axes[4]): input = inputs[:, c:c + 1, ...] for d in range(self.depth_multiplier): output = K.conv3d(input , self.depthwise_kernels[count] , padding=self.padding , data_format=self.data_format , dilation_rate=self.dilation_rate) if self.use_bias: output = K.bias_add(output , self.biases[count] , data_format=self.data_format) outputs.append(output) count += 1 outputs = K.concatenate(outputs, axis=4) outputs = K.conv3d(outputs , self.pointwise_kernel , padding=self.padding , data_format=self.data_format , dilation_rate=self.dilation_rate) if self.activation is not None: return self.activation(outputs) return outputs
def recurrent_conv(self, x, w): conv_out = K.conv3d(x, w, strides=(1, 1, 1), padding='same', data_format=self.data_format) return conv_out
def call(self, inputs, training=None): def _l2normalize(v, eps=1e-12): return v / (K.sum(v**2)**0.5 + eps) def power_iteration(W, u): _u = u _v = _l2normalize(K.dot(_u, K.transpose(W))) _u = _l2normalize(K.dot(_v, W)) return _u, _v if self.spectral_normalization: W_shape = self.kernel.shape.as_list() # Flatten the Tensor W_reshaped = K.reshape(self.kernel, [-1, W_shape[-1]]) _u, _v = power_iteration(W_reshaped, self.u) # Calculate Sigma sigma = K.dot(_v, W_reshaped) sigma = K.dot(sigma, K.transpose(_u)) # normalize it W_bar = W_reshaped / sigma # reshape weight tensor if training in {0, False}: W_bar = K.reshape(W_bar, W_shape) else: with tf.control_dependencies([self.u.assign(_u)]): W_bar = K.reshape(W_bar, W_shape) # update weitht self.kernel = W_bar if self.rank == 1: outputs = K.conv1d(inputs, self.kernel, strides=self.strides[0], padding=self.padding, data_format=self.data_format, dilation_rate=self.dilation_rate[0]) if self.rank == 2: outputs = K.conv2d(inputs, self.kernel, strides=self.strides, padding=self.padding, data_format=self.data_format, dilation_rate=self.dilation_rate) if self.rank == 3: outputs = K.conv3d(inputs, self.kernel, strides=self.strides, padding=self.padding, data_format=self.data_format, dilation_rate=self.dilation_rate) if self.use_bias: outputs = K.bias_add(outputs, self.bias, data_format=self.data_format) if self.activation is not None: return self.activation(outputs) return outputs
def input_conv(self, x, w, b=None, padding='valid'): conv_out = K.conv3d(x, w, strides=self.strides, padding=padding, data_format=self.data_format, dilation_rate=self.dilation_rate) if b is not None: conv_out = K.bias_add(conv_out, b, data_format=self.data_format) return conv_out
def call(self, inputs): #? scaled_kernel = self.kernel * self.runtime_coeff if self.rank == 1: kernel = Ke.pad(scaled_kernel , [[1,1], [0,0], [0,0]]) fused_kernel = Ke.add_n([kernel[1:] , kernel[:-1]]) / 2.0 outputs = K.conv1d(inputs , fused_kernel , strides=self.strides[0] , padding=self.padding , data_format=self.data_format , dilation_rate=self.dilation_rate[0]) if self.rank == 2: kernel = Ke.pad(scaled_kernel , [[1,1], [1,1], [0,0], [0,0]]) fused_kernel = Ke.add_n([kernel[1:, 1:] , kernel[:-1, 1:] , kernel[1:, :-1] , kernel[:-1, :-1]]) / 4.0 outputs = K.conv2d(inputs , fused_kernel , strides=self.strides , padding=self.padding , data_format=self.data_format , dilation_rate=self.dilation_rate) if self.rank == 3: kernel = Ke.pad(scaled_kernel , [[1,1], [1,1], [1,1], [0,0], [0,0]]) fused_kernel = Ke.add_n([kernel[1:, 1:, 1:] , kernel[1:, 1:, :-1] , kernel[1:, :-1, 1:] , kernel[1:, :-1, :-1] , kernel[:-1, 1:, 1:] , kernel[:-1, 1:, :-1] , kernel[:-1, :-1, 1:] , kernel[:-1, :-1, :-1]]) / 8.0 outputs = K.conv3d(inputs , fused_kernel , strides=self.strides , padding=self.padding , data_format=self.data_format , dilation_rate=self.dilation_rate) if self.use_bias: outputs = K.bias_add(outputs , self.bias , data_format=self.data_format) if self.activation is not None: return self.activation(outputs) return outputs
def do_3d_convolution(feature_matrix, kernel_matrix, pad_edges=False, stride_length_px=1): """Convolves 3-D feature maps with 3-D kernel. m = number of rows in kernel n = number of columns in kernel h = number of height in kernel c = number of output feature maps (channels) :param feature_matrix: Input feature maps (numpy array). Dimensions must be M x N x H x C or 1 x M x N x H x C. :param kernel_matrix: Kernel as numpy array. Dimensions must be m x n x h x C x c. :param pad_edges: See doc for `do_2d_convolution`. :param stride_length_px: See doc for `do_2d_convolution`. :return: feature_matrix: Output feature maps (numpy array). Dimensions will be 1 x M x N x H x c or 1 x (M - m + 1) x (N - n + 1) x (H - h + 1) x c, depending on whether or not edges are padded. """ error_checking.assert_is_numpy_array_without_nan(feature_matrix) error_checking.assert_is_numpy_array_without_nan(kernel_matrix) error_checking.assert_is_numpy_array(kernel_matrix, num_dimensions=5) error_checking.assert_is_boolean(pad_edges) error_checking.assert_is_integer(stride_length_px) error_checking.assert_is_geq(stride_length_px, 1) if len(feature_matrix.shape) == 4: feature_matrix = numpy.expand_dims(feature_matrix, axis=0) error_checking.assert_is_numpy_array(feature_matrix, num_dimensions=5) if pad_edges: padding_string = 'same' else: padding_string = 'valid' feature_tensor = K.conv3d(x=K.variable(feature_matrix), kernel=K.variable(kernel_matrix), strides=(stride_length_px, stride_length_px, stride_length_px), padding=padding_string, data_format='channels_last') return feature_tensor.numpy()
def call(self, inputs): # Mask kernel with connection matrix masked_kernel = self.kernel * self.connections # Apply convolution if self.rank == 1: outputs = K.conv1d( inputs, masked_kernel, strides=self.strides[0], padding=self.padding, data_format=self.data_format, dilation_rate=self.dilation_rate[0]) if self.rank == 2: outputs = K.conv2d( inputs, masked_kernel, strides=self.strides, padding=self.padding, data_format=self.data_format, dilation_rate=self.dilation_rate) if self.rank == 3: outputs = K.conv3d( inputs, masked_kernel, strides=self.strides, padding=self.padding, data_format=self.data_format, dilation_rate=self.dilation_rate) if self.use_bias: outputs = K.bias_add( outputs, self.bias, data_format=self.data_format) if self.activation is not None: return self.activation(outputs) return outputs
def call(self, inputs, training=None): scaled_kernel = self.kernel * self.runtime_coeff if self.rank == 1: outputs = K.conv1d( inputs, scaled_kernel, strides=self.strides[0], padding=self.padding, data_format=self.data_format, dilation_rate=self.dilation_rate[0]) if self.rank == 2: outputs = K.conv2d( inputs, scaled_kernel, strides=self.strides, padding=self.padding, data_format=self.data_format, dilation_rate=self.dilation_rate) if self.rank == 3: outputs = K.conv3d( inputs, scaled_kernel, strides=self.strides, padding=self.padding, data_format=self.data_format, dilation_rate=self.dilation_rate) if self.use_bias: outputs = K.bias_add( outputs, self.bias, data_format=self.data_format) if self.activation is not None: outputs = self.activation(outputs) return outputs