def max_pool2d(inputs, kernel_size, stride=2, padding='VALID', outputs_collections=None, scope=None): """Adds a Max Pooling op. It is assumed by the wrapper that the pooling is only done per image and not in depth or batch. Args: inputs: a tensor of size [batch_size, height, width, depth]. kernel_size: a list of length 2: [kernel_height, kernel_width] of the pooling kernel over which the op is computed. Can be an int if both values are the same. stride: a list of length 2: [stride_height, stride_width]. Can be an int if both strides are the same. Note that presently both strides must have the same value. padding: the padding method, either 'VALID' or 'SAME'. outputs_collections: collection to add the outputs. scope: Optional scope for op_scope. Returns: a tensor representing the results of the pooling operation. Raises: ValueError: if 'kernel_size' is not a 2-D list """ with ops.op_scope([inputs], scope, 'MaxPool2D') as sc: kernel_h, kernel_w = utils.two_element_tuple(kernel_size) stride_h, stride_w = utils.two_element_tuple(stride) outputs = nn.max_pool(inputs, ksize=[1, kernel_h, kernel_w, 1], strides=[1, stride_h, stride_w, 1], padding=padding) return utils.collect_named_outputs(outputs_collections, sc, outputs)
def loop_fn(i): with g: x1 = array_ops.gather(x, i) output = nn.max_pool( x1, ksize, strides=strides, padding="VALID", data_format="NHWC") loss = nn.l2_loss(output) ones = array_ops.ones_like(output) g.watch(ones) grad = g.gradient(loss, x1, output_gradients=ones) grad_grad = g.gradient(grad, ones) return output, grad, grad_grad
def max_pool_2x2(x): """max_pool_2x2 downsamples a feature map by 2X.""" return nn.max_pool( x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def _max_pool_2x2(x): """Downsamples a feature map by 2X.""" return nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def loop_fn(i): x1 = array_ops.gather(x, i) output = nn.max_pool( x1, ksize, strides=[1, 2, 2, 1], padding="VALID", data_format="NHWC") loss = nn.l2_loss(output) return output, gradient_ops.gradients(loss, x1)
def loop_fn(i): x1 = array_ops.gather(x, i) output = nn.max_pool( x1, ksize, strides=[1, 2, 2, 1], padding="VALID", data_format="NHWC") loss = nn.l2_loss(output) return output, gradient_ops.gradients(loss, x1)