def __eq__(self, other): if type(self) is not type(other): return False if self._type_spec != other._type_spec: return False self_tensors = nest.flatten(self, expand_composites=True) other_tensors = nest.flatten(other, expand_composites=True) if len(self_tensors) != len(other_tensors): return False conditions = [] for t1, t2 in zip(self_tensors, other_tensors): conditions.append( math_ops.reduce_all( gen_math_ops.equal(array_ops.shape(t1), array_ops.shape(t2), incompatible_shape_error=False))) # Explicitly check shape (values that have different shapes but broadcast # to the same value are considered non-equal). conditions.append( math_ops.reduce_all( gen_math_ops.equal(t1, t2, incompatible_shape_error=False))) return math_ops.reduce_all(array_ops.stack(conditions))
def create_test_network_7(): """Aligned network for test, with a control dependency. The graph is similar to create_test_network_1(), except that it includes an assert operation on the left branch. Returns: g: Tensorflow graph object (Graph proto). """ g = ops.Graph() with g.as_default(): # An 8x8 test image. x = array_ops.placeholder(dtypes.float32, (1, 8, 8, 1), name='input_image') # Left branch. l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID') l1_shape = array_ops.shape(l1) assert_op = control_flow_ops.Assert( gen_math_ops.equal(l1_shape[1], 2), [l1_shape], summarize=4) # Right branch. l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]]) l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID') l3 = slim.conv2d(l2, 1, [1, 1], stride=2, scope='L3', padding='VALID') # Addition. with ops.control_dependencies([assert_op]): nn.relu(l1 + l3, name='output') return g
def create_test_network_7(): """Aligned network for test, with a control dependency. The graph is similar to create_test_network_1(), except that it includes an assert operation on the left branch. Returns: g: Tensorflow graph object (Graph proto). """ g = ops.Graph() with g.as_default(): # An 8x8 test image. x = array_ops.placeholder(dtypes.float32, (1, 8, 8, 1), name='input_image') # Left branch. l1 = slim.conv2d(x, 1, [1, 1], stride=4, scope='L1', padding='VALID') l1_shape = array_ops.shape(l1) assert_op = control_flow_ops.Assert(gen_math_ops.equal(l1_shape[1], 2), [l1_shape], summarize=4) # Right branch. l2_pad = array_ops.pad(x, [[0, 0], [1, 0], [1, 0], [0, 0]]) l2 = slim.conv2d(l2_pad, 1, [3, 3], stride=2, scope='L2', padding='VALID') l3 = slim.conv2d(l2, 1, [1, 1], stride=2, scope='L3', padding='VALID') # Addition. with ops.control_dependencies([assert_op]): nn.relu(l1 + l3, name='output') return g
def layer_fft(state, i): diag_vec = diag_vec_list.read(i) off_vec = off_vec_list.read(i) diag = math_ops.multiply(state, diag_vec) off = math_ops.multiply(state, off_vec) hidden_size = int(off.get_shape()[1]) # size = 2**i dist = capacity - i normal_size = (hidden_size // (2**dist)) * (2**(dist-1)) normal_size *= 2 extra_size = tf.maximum(0, (hidden_size % (2**dist)) - (2**(dist-1))) hidden_size -= normal_size def modify(off_normal, dist, normal_size): off_normal = array_ops.reshape(array_ops.reverse(array_ops.reshape(off_normal, [-1, normal_size//(2**dist), 2, (2**(dist-1))]), [2]), [-1, normal_size]) return off_normal def do_nothing(off_normal): return off_normal off_normal, off_extra = array_ops.split(off, [normal_size, hidden_size], 1) off_normal = control_flow_ops.cond(gen_math_ops.equal(normal_size, 0), lambda: do_nothing(off_normal), lambda: modify(off_normal, dist, normal_size)) helper1, helper2 = array_ops.split(off_extra, [hidden_size-extra_size, extra_size], 1) off_extra = array_ops.concat([helper2, helper1], 1) off = array_ops.concat([off_normal, off_extra], 1) layer_output = diag + off i += 1 return layer_output, i
def cumnormalize(op, length, max_length, scale=True, center=True): if center: mean = cummean(op, length, max_length) op -= mean if scale: variance = math_ops.sqrt( cummean(math_ops.square(op), length, max_length)) op /= array_ops.where(gen_math_ops.equal(variance, 0), array_ops.ones_like(variance), variance) return op
def even_input(off, size): def even_s(off, size): off = array_ops.reshape(off, [-1, size//2, 2]) off = array_ops.reshape(array_ops.reverse(off, [2]), [-1, size]) return off def odd_s(off, size): off, helper = array_ops.split(off, [size-1, 1], 1) size -= 1 off = even_s(off, size) off = array_ops.concat([off, helper], 1) return off off = control_flow_ops.cond(gen_math_ops.equal(gen_math_ops.mod(size, 2), 0), lambda: even_s(off, size), lambda: odd_s(off, size)) return off
def _sample_n(self, n, seed=None): shape = array_ops.concat([[n], self.batch_shape_tensor()], 0) w = control_flow_ops.cond(gen_math_ops.equal(self.__m, 3), lambda: self.__sample_w3(n, seed), lambda: self.__sample_w_rej(n, seed)) v = nn_impl.l2_normalize(array_ops.transpose( array_ops.transpose( random_ops.random_normal(shape, dtype=self.dtype, seed=seed))[1:]), axis=-1) x = array_ops.concat((w, math_ops.sqrt(1 - w**2) * v), axis=-1) z = self.__householder_rotation(x) return z
def _sample_n(self, n, seed=0): shape = array_ops.concat([[n], self.batch_shape_tensor()], 0) w = control_flow_ops.cond(gen_math_ops.equal(self.__m, 3), lambda: self.__sample_w3(n, seed), lambda: self.__sample_w_rej(n, seed)) w = tf.clip_by_value(w, -1 + 1e-6, 1 - 1e-6) v = nn_impl.l2_normalize(array_ops.transpose( array_ops.transpose( random_ops.random_normal(shape, dtype=self.dtype, seed=seed))[1:]), axis=-1) tmp = math_ops.sqrt(1.0 + w) * math_ops.sqrt(1.0 - w) x = array_ops.concat((w, tmp * v), axis=-1) z = self.__householder_rotation(x) return z
def layer_tunable(x, i): diag_vec = diag_vec_list.read(i) off_vec = off_vec_list.read(i) diag = math_ops.multiply(x, diag_vec) off = math_ops.multiply(x, off_vec) def even_input(off, size): def even_s(off, size): off = array_ops.reshape(off, [-1, size // 2, 2]) off = array_ops.reshape(array_ops.reverse(off, [2]), [-1, size]) return off def odd_s(off, size): off, helper = array_ops.split(off, [size - 1, 1], 1) size -= 1 off = even_s(off, size) off = array_ops.concat([off, helper], 1) return off off = control_flow_ops.cond( gen_math_ops.equal(gen_math_ops.mod(size, 2), 0), lambda: even_s(off, size), lambda: odd_s(off, size)) return off def odd_input(off, size): helper, off = array_ops.split(off, [1, size - 1], 1) size -= 1 off = even_input(off, size) off = array_ops.concat([helper, off], 1) return off size = int(off.get_shape()[1]) off = control_flow_ops.cond( gen_math_ops.equal(gen_math_ops.mod(i, 2), 0), lambda: even_input(off, size), lambda: odd_input(off, size)) layer_output = diag + off i += 1 return layer_output, i
def on_train_begin(self, logs=None): # First collect all trainable weights self.model._check_trainable_weights_consistency() get_w_list = self.model.trainable_weights get_w_dec_list = [] # Filter all weights and select those named 'kernel' for w in get_w_list: getname = w.name pos = getname.rfind('/') if pos != -1: checked = 'kernel' in getname[pos + 1:] else: checked = 'kernel' in getname if checked: get_w_dec_list.append(w) if not get_w_dec_list: raise ValueError( 'The trainable weights of the model do not include any kernel.' ) # Define the update ops getlr = self.model.optimizer.lr with K.name_scope(self.__class__.__name__): self.w_updates = [] self.w_updates_aft = [] for w in get_w_dec_list: w_l = w if self.bool_l2: w_l = (1 - getlr * self.get_mu) * w_l if self.bool_l1: w_abs = math_ops.abs(w_l) + self.get_lambda w_l = (gen_math_ops.sign(w_l) + gen_math_ops.sign( random_ops.random_uniform( w_l.get_shape(), minval=-1.0, maxval=1.0)) * math_ops.cast(gen_math_ops.equal(w_l, 0), dtype=w_l.dtype)) * w_abs w_abs_x = math_ops.abs(w) - self.get_lambda w_x = gen_math_ops.sign(w) * math_ops.cast( gen_math_ops.greater(w_abs_x, 0), dtype=w.dtype) * w_abs_x self.w_updates_aft.append(state_ops.assign(w, w_x)) self.w_updates.append(state_ops.assign(w, w_l)) # Get and store the session self.session = K.get_session()
def gen_crossentropy(y_true, y_pred, q=0.7, k=-1.0): # Filter true values ("y_true") in "y_pred" y_ok = array_ops.boolean_mask(y_pred, gen_math_ops.equal(y_true, 1)) # Conversion for Float64 for valid operations in TensorFlow um = np.float64(1.) q = np.float64(q) if k == -1: # cross entropy loss # mean[ (1-y_ok^q)/q ] return K.mean(math_ops.divide( math_ops.subtract(um, math_ops.pow(y_ok, q)), q), axis=-1) else: # truncated cross entropy loss k = np.float64(k) # if y_ok < k # [ (1-k^q)/q ] (no broadcasting in Where()) # [ (1-y_ok^q)/q ] vfunct = array_ops.where( gen_math_ops.less_equal(y_ok, k), gen_array_ops.fill(array_ops.shape(y_ok), (um - k**q) / q), math_ops.divide(math_ops.subtract(um, math_ops.pow(y_ok, q)), q)) return K.mean(vfunct, axis=-1) # mean [ above values ]
def atrous_conv2d(value, filters, rate, padding, name=None): with ops.op_scope([value, filters], name, "atrous_conv2d") as name: value = ops.convert_to_tensor(value, name="value") filters = ops.convert_to_tensor(filters, name="filters") value_shape = value.get_shape() filter_shape = filters.get_shape() if not value_shape[3].is_compatible_with(filter_shape[2]): raise ValueError( "value's input channels does not match filters' input channels, " "{} != {}".format(value_shape[3], filter_shape[2])) if rate < 1: raise ValueError("rate {} cannot be less than one".format(rate)) if rate == 1: value = gen_nn_ops.conv2d(input=value, filter=filters, strides=[1, 1, 1, 1], padding=padding) return value # We have two padding contributions. The first is used for converting "SAME" # to "VALID". The second is required so that the height and width of the # zero-padded value tensor are multiples of rate. # Spatial dimensions of original input value_shape = array_ops.shape(value) in_height = value_shape[1] in_width = value_shape[2] # Spatial dimensions of the filters and the upsampled filters in which we # introduce (rate - 1) zeros between consecutive filter values. filter_shape = array_ops.shape(filters) filter_height = filter_shape[0] filter_width = filter_shape[1] filter_height_up = filter_height + (filter_height - 1) * (rate - 1) filter_width_up = filter_width + (filter_width - 1) * (rate - 1) # Padding required to reduce to "VALID" convolution if padding == "SAME": pad_height = filter_height_up - 1 pad_width = filter_width_up - 1 elif padding == "VALID": pad_height = 0 pad_width = 0 else: raise ValueError("Invalid padding") # When padding is "SAME" and the pad_height (pad_width) is odd, we pad more # to bottom (right), following the same convention as conv2d(). pad_top = math_ops.floordiv(pad_height, 2) pad_bottom = pad_height - pad_top pad_left = math_ops.floordiv(pad_width, 2) pad_right = pad_width - pad_left # More padding so that rate divides the height and width of the input value in_height = in_height + pad_top + pad_bottom in_width = in_width + pad_left + pad_right mod_height = math_ops.mod(in_height, rate) mod_width = math_ops.mod(in_width, rate) null = constant_op.constant(0) pad_bottom_extra = control_flow_ops.cond(gen_math_ops.equal(mod_height, 0), lambda: null, lambda: rate - mod_height) pad_right_extra = control_flow_ops.cond(gen_math_ops.equal(mod_width, 0), lambda: null, lambda: rate - mod_width) # The paddings argument to space_to_batch includes both padding components pad_bottom = pad_bottom + pad_bottom_extra pad_right = pad_right + pad_right_extra print 'hahahaha' v = array_ops.expand_dims(array_ops.pack([pad_top, pad_bottom]),1) h = array_ops.expand_dims(array_ops.pack([pad_left, pad_right]),1) space_to_batch_pad = array_ops.concat(1, [v,h]) space_to_batch_pad = [[pad_top, pad_bottom], [pad_left, pad_right]] value = array_ops.space_to_batch(input=value, paddings=space_to_batch_pad, block_size=rate) value = gen_nn_ops.conv2d(input=value, filter=filters, strides=[1, 1, 1, 1], padding="VALID", name=name) # The crops argument to batch_to_space is just the extra padding component v = array_ops.expand_dims(array_ops.pack([0, pad_bottom_extra]),1) h = array_ops.expand_dims(array_ops.pack([0, pad_right_extra]),1) batch_to_space_crop = array_ops.concat(1, [v,h]) batch_to_space_crop = [[0, pad_bottom_extra], [0, pad_right_extra]] value = array_ops.batch_to_space(input=value, crops=batch_to_space_crop, block_size=rate) return value
def atrous_conv2d(value, filters, rate, padding, name=None): """Atrous convolution (a.k.a. convolution with holes or dilated convolution). Computes a 2-D atrous convolution, also known as convolution with holes or dilated convolution, given 4-D `value` and `filters` tensors. If the `rate` parameter is equal to one, it performs regular 2-D convolution. If the `rate` parameter is greater than one, it performs convolution with holes, sampling the input values every `rate` pixels in the `height` and `width` dimensions. This is equivalent to convolving the input with a set of upsampled filters, produced by inserting `rate - 1` zeros between two consecutive values of the filters along the `height` and `width` dimensions, hence the name atrous convolution or convolution with holes (the French word trous means holes in English). More specifically: output[b, i, j, k] = sum_{di, dj, q} filters[di, dj, q, k] * value[b, i + rate * di, j + rate * dj, q] Atrous convolution allows us to explicitly control how densely to compute feature responses in fully convolutional networks. Used in conjunction with bilinear interpolation, it offers an alternative to `conv2d_transpose` in dense prediction tasks such as semantic image segmentation, optical flow computation, or depth estimation. It also allows us to effectively enlarge the field of view of filters without increasing the number of parameters or the amount of computation. For a description of atrous convolution and how it can be used for dense feature extraction, please see: [Semantic Image Segmentation with Deep Convolutional Nets and Fully Connected CRFs](http://arxiv.org/abs/1412.7062). The same operation is investigated further in [Multi-Scale Context Aggregation by Dilated Convolutions](http://arxiv.org/abs/1511.07122). Previous works that effectively use atrous convolution in different ways are, among others, [OverFeat: Integrated Recognition, Localization and Detection using Convolutional Networks](http://arxiv.org/abs/1312.6229) and [Fast Image Scanning with Deep Max-Pooling Convolutional Neural Networks] (http://arxiv.org/abs/1302.1700). Atrous convolution is also closely related to the so-called noble identities in multi-rate signal processing. There are many different ways to implement atrous convolution (see the refs above). The implementation here reduces atrous_conv2d(value, filters, rate, padding=padding) to the following three operations: paddings = ... net = space_to_batch(value, paddings, block_size=rate) net = conv2d(net, filters, strides=[1, 1, 1, 1], padding="VALID") crops = ... net = batch_to_space(net, crops, block_size=rate) Advanced usage. Note the following optimization: A sequence of `atrous_conv2d` operations with identical `rate` parameters, 'SAME' `padding`, and filters with odd heights/ widths: net = atrous_conv2d(net, filters1, rate, padding="SAME") net = atrous_conv2d(net, filters2, rate, padding="SAME") ... net = atrous_conv2d(net, filtersK, rate, padding="SAME") can be equivalently performed cheaper in terms of computation and memory as: pad = ... # padding so that the input dims are multiples of rate net = space_to_batch(net, paddings=pad, block_size=rate) net = conv2d(net, filters1, strides=[1, 1, 1, 1], padding="SAME") net = conv2d(net, filters2, strides=[1, 1, 1, 1], padding="SAME") ... net = conv2d(net, filtersK, strides=[1, 1, 1, 1], padding="SAME") net = batch_to_space(net, crops=pad, block_size=rate) because a pair of consecutive `space_to_batch` and `batch_to_space` ops with the same `block_size` cancel out when their respective `paddings` and `crops` inputs are identical. Args: value: A 4-D `Tensor` of type `float`. It needs to be in the default "NHWC" format. Its shape is `[batch, in_height, in_width, in_channels]`. filters: A 4-D `Tensor` with the same type as `value` and shape `[filter_height, filter_width, in_channels, out_channels]`. `filters`' `in_channels` dimension must match that of `value`. Atrous convolution is equivalent to standard convolution with upsampled filters with effective height `filter_height + (filter_height - 1) * (rate - 1)` and effective width `filter_width + (filter_width - 1) * (rate - 1)`, produced by inserting `rate - 1` zeros along consecutive elements across the `filters`' spatial dimensions. rate: A positive int32. The stride with which we sample input values across the `height` and `width` dimensions. Equivalently, the rate by which we upsample the filter values by inserting zeros across the `height` and `width` dimensions. In the literature, the same parameter is sometimes called `input stride` or `dilation`. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. name: Optional name for the returned tensor. Returns: A `Tensor` with the same type as `value`. Raises: ValueError: If input/output depth does not match `filters`' shape, or if padding is other than `'VALID'` or `'SAME'`. """ with ops.op_scope([value, filters], name, "atrous_conv2d") as name: value = ops.convert_to_tensor(value, name="value") filters = ops.convert_to_tensor(filters, name="filters") value_shape = value.get_shape() filter_shape = filters.get_shape() if not value_shape[3].is_compatible_with(filter_shape[2]): raise ValueError( "value's input channels does not match filters' input channels, " "{} != {}".format(value_shape[3], filter_shape[2])) if rate < 1: raise ValueError("rate {} cannot be less than one".format(rate)) if rate == 1: value = gen_nn_ops.conv2d(input=value, filter=filters, strides=[1, 1, 1, 1], padding=padding) return value # We have two padding contributions. The first is used for converting "SAME" # to "VALID". The second is required so that the height and width of the # zero-padded value tensor are multiples of rate. # Spatial dimensions of original input value_shape = array_ops.shape(value) in_height = value_shape[1] in_width = value_shape[2] # Spatial dimensions of the filters and the upsampled filters in which we # introduce (rate - 1) zeros between consecutive filter values. filter_height = int(filter_shape[0]) filter_width = int(filter_shape[1]) filter_height_up = filter_height + (filter_height - 1) * (rate - 1) filter_width_up = filter_width + (filter_width - 1) * (rate - 1) # Padding required to reduce to "VALID" convolution if padding == "SAME": pad_height = filter_height_up - 1 pad_width = filter_width_up - 1 elif padding == "VALID": pad_height = 0 pad_width = 0 else: raise ValueError("Invalid padding") # When padding is "SAME" and the pad_height (pad_width) is odd, we pad more # to bottom (right), following the same convention as conv2d(). pad_top = math_ops.floordiv(pad_height, 2) pad_bottom = pad_height - pad_top pad_left = math_ops.floordiv(pad_width, 2) pad_right = pad_width - pad_left # More padding so that rate divides the height and width of the input value in_height = in_height + pad_top + pad_bottom in_width = in_width + pad_left + pad_right mod_height = math_ops.mod(in_height, rate) mod_width = math_ops.mod(in_width, rate) null = constant_op.constant(0) pad_bottom_extra = control_flow_ops.cond(gen_math_ops.equal(mod_height, 0), lambda: null, lambda: rate - mod_height) pad_right_extra = control_flow_ops.cond(gen_math_ops.equal(mod_width, 0), lambda: null, lambda: rate - mod_width) # The paddings argument to space_to_batch includes both padding components pad_bottom = pad_bottom + pad_bottom_extra pad_right = pad_right + pad_right_extra space_to_batch_pad = [[pad_top, pad_bottom], [pad_left, pad_right]] value = array_ops.space_to_batch(input=value, paddings=space_to_batch_pad, block_size=rate) value = gen_nn_ops.conv2d(input=value, filter=filters, strides=[1, 1, 1, 1], padding="VALID", name=name) # The crops argument to batch_to_space is just the extra padding component batch_to_space_crop = [[0, pad_bottom_extra], [0, pad_right_extra]] value = array_ops.batch_to_space(input=value, crops=batch_to_space_crop, block_size=rate) return value
def _tf_equal(a, b): """Overload of "equal" for Tensors.""" return gen_math_ops.equal(a, b)
def atrous_conv2d(value, filters, rate, padding, name=None): """Atrous convolution (a.k.a. convolution with holes or dilated convolution). Computes a 2-D atrous convolution, also known as convolution with holes or dilated convolution, given 4-D `value` and `filters` tensors. If the `rate` parameter is equal to one, it performs regular 2-D convolution. If the `rate` parameter is greater than one, it performs convolution with holes, sampling the input values every `rate` pixels in the `height` and `width` dimensions. This is equivalent to convolving the input with a set of upsampled filters, produced by inserting `rate - 1` zeros between two consecutive values of the filters along the `height` and `width` dimensions, hence the name atrous convolution or convolution with holes (the French word trous means holes in English). More specifically: output[b, i, j, k] = sum_{di, dj, q} filters[di, dj, q, k] * value[b, i + rate * di, j + rate * dj, q] Atrous convolution allows us to explicitly control how densely to compute feature responses in fully convolutional networks. Used in conjunction with bilinear interpolation, it offers an alternative to `conv2d_transpose` in dense prediction tasks such as semantic image segmentation, optical flow computation, or depth estimation. It also allows us to effectively enlarge the field of view of filters without increasing the number of parameters or the amount of computation. For a description of atrous convolution and how it can be used for dense feature extraction, please see: [Semantic Image Segmentation with Deep Convolutional Nets and Fully Connected CRFs](http://arxiv.org/abs/1412.7062). The same operation is investigated further in [Multi-Scale Context Aggregation by Dilated Convolutions](http://arxiv.org/abs/1511.07122). Previous works that effectively use atrous convolution in different ways are, among others, [OverFeat: Integrated Recognition, Localization and Detection using Convolutional Networks](http://arxiv.org/abs/1312.6229) and [Fast Image Scanning with Deep Max-Pooling Convolutional Neural Networks] (http://arxiv.org/abs/1302.1700). Atrous convolution is also closely related to the so-called noble identities in multi-rate signal processing. There are many different ways to implement atrous convolution (see the refs above). The implementation here reduces atrous_conv2d(value, filters, rate, padding=padding) to the following three operations: paddings = ... net = space_to_batch(value, paddings, block_size=rate) net = conv2d(net, filters, strides=[1, 1, 1, 1], padding="VALID") crops = ... net = batch_to_space(net, crops, block_size=rate) Advanced usage. Note the following optimization: A sequence of `atrous_conv2d` operations with identical `rate` parameters, 'SAME' `padding`, and filters with odd heights/ widths: net = atrous_conv2d(net, filters1, rate, padding="SAME") net = atrous_conv2d(net, filters2, rate, padding="SAME") ... net = atrous_conv2d(net, filtersK, rate, padding="SAME") can be equivalently performed cheaper in terms of computation and memory as: pad = ... # padding so that the input dims are multiples of rate net = space_to_batch(net, paddings=pad, block_size=rate) net = conv2d(net, filters1, strides=[1, 1, 1, 1], padding="SAME") net = conv2d(net, filters2, strides=[1, 1, 1, 1], padding="SAME") ... net = conv2d(net, filtersK, strides=[1, 1, 1, 1], padding="SAME") net = batch_to_space(net, crops=pad, block_size=rate) because a pair of consecutive `space_to_batch` and `batch_to_space` ops with the same `block_size` cancel out when their respective `paddings` and `crops` inputs are identical. Args: value: A 4-D `Tensor` of type `float`. It needs to be in the default "NHWC" format. Its shape is `[batch, in_height, in_width, in_channels]`. filters: A 4-D `Tensor` with the same type as `value` and shape `[filter_height, filter_width, in_channels, out_channels]`. `filters`' `in_channels` dimension must match that of `value`. Atrous convolution is equivalent to standard convolution with upsampled filters with effective height `filter_height + (filter_height - 1) * (rate - 1)` and effective width `filter_width + (filter_width - 1) * (rate - 1)`, produced by inserting `rate - 1` zeros along consecutive elements across the `filters`' spatial dimensions. rate: A positive int32. The stride with which we sample input values across the `height` and `width` dimensions. Equivalently, the rate by which we upsample the filter values by inserting zeros across the `height` and `width` dimensions. In the literature, the same parameter is sometimes called `input stride` or `dilation`. padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm. name: Optional name for the returned tensor. Returns: A `Tensor` with the same type as `value`. Raises: ValueError: If input/output depth does not match `filters`' shape, or if padding is other than `'VALID'` or `'SAME'`. """ with ops.op_scope([value, filters], name, "atrous_conv2d") as name: value = ops.convert_to_tensor(value, name="value") filters = ops.convert_to_tensor(filters, name="filters") value_shape = value.get_shape() filter_shape = filters.get_shape() if not value_shape[3].is_compatible_with(filter_shape[2]): raise ValueError( "value's input channels does not match filters' input channels, " "{} != {}".format(value_shape[3], filter_shape[2])) if rate < 1: raise ValueError("rate {} cannot be less than one".format(rate)) if rate == 1: value = gen_nn_ops.conv2d(input=value, filter=filters, strides=[1, 1, 1, 1], padding=padding) return value # We have two padding contributions. The first is used for converting "SAME" # to "VALID". The second is required so that the height and width of the # zero-padded value tensor are multiples of rate. # Spatial dimensions of original input value_shape = array_ops.shape(value) in_height = value_shape[1] in_width = value_shape[2] # Spatial dimensions of the filters and the upsampled filters in which we # introduce (rate - 1) zeros between consecutive filter values. filter_height = int(filter_shape[0]) filter_width = int(filter_shape[1]) filter_height_up = filter_height + (filter_height - 1) * (rate - 1) filter_width_up = filter_width + (filter_width - 1) * (rate - 1) # Padding required to reduce to "VALID" convolution if padding == "SAME": pad_height = filter_height_up - 1 pad_width = filter_width_up - 1 elif padding == "VALID": pad_height = 0 pad_width = 0 else: raise ValueError("Invalid padding") # When padding is "SAME" and the pad_height (pad_width) is odd, we pad more # to bottom (right), following the same convention as conv2d(). pad_top = math_ops.floordiv(pad_height, 2) pad_bottom = pad_height - pad_top pad_left = math_ops.floordiv(pad_width, 2) pad_right = pad_width - pad_left # More padding so that rate divides the height and width of the input value in_height = in_height + pad_top + pad_bottom in_width = in_width + pad_left + pad_right mod_height = math_ops.mod(in_height, rate) mod_width = math_ops.mod(in_width, rate) null = constant_op.constant(0) pad_bottom_extra = control_flow_ops.cond( gen_math_ops.equal(mod_height, 0), lambda: null, lambda: rate - mod_height) pad_right_extra = control_flow_ops.cond( gen_math_ops.equal(mod_width, 0), lambda: null, lambda: rate - mod_width) # The paddings argument to space_to_batch includes both padding components pad_bottom = pad_bottom + pad_bottom_extra pad_right = pad_right + pad_right_extra space_to_batch_pad = [[pad_top, pad_bottom], [pad_left, pad_right]] value = array_ops.space_to_batch(input=value, paddings=space_to_batch_pad, block_size=rate) value = gen_nn_ops.conv2d(input=value, filter=filters, strides=[1, 1, 1, 1], padding="VALID", name=name) # The crops argument to batch_to_space is just the extra padding component batch_to_space_crop = [[0, pad_bottom_extra], [0, pad_right_extra]] value = array_ops.batch_to_space(input=value, crops=batch_to_space_crop, block_size=rate) return value
def test_q_ops_quantile_dqn(self): env = gym.make('CartPole-v0') ops.reset_default_graph() np.random.seed(42) random_seed.set_random_seed(42) env.seed(42) # Setup the policy and model global_step = training_util.get_or_create_global_step() deterministic_ph = array_ops.placeholder( dtypes.bool, [], name='deterministic') exploration_op = learning_rate_decay.exponential_decay( QTest.hparams.initial_exploration, global_step, QTest.hparams.exploration_decay_steps, QTest.hparams.exploration_decay_rate) state_distribution, state_ph = gym_ops.distribution_from_gym_space( env.observation_space, name='state_space') action_distribution, _ = gym_ops.distribution_from_gym_space( env.action_space, name='action_space') # Setup the dataset stream = streams.Uniform.from_distributions( state_distribution, action_distribution) with variable_scope.variable_scope('logits'): action_value_op = mlp(state_ph, QTest.hparams.hidden_layers) action_value_op = core.dense( action_value_op, stream.action_value_shape[-1] * QTest.hparams.num_quantiles, use_bias=False) action_value_op_shape = array_ops.shape(action_value_op) action_value_shape = [ action_value_op_shape[0], action_value_op_shape[1], stream.action_value_shape[-1], QTest.hparams.num_quantiles] action_value_op = gen_array_ops.reshape(action_value_op, action_value_shape) mean_action_value_op = math_ops.reduce_mean(action_value_op, axis=-1) action_op = math_ops.argmax(mean_action_value_op, axis=-1) action_op = array_ops.squeeze(action_op) policy_variables = variables.trainable_variables(scope='logits') next_state_ph = shortcuts.placeholder_like(state_ph, name='next_state_space') with variable_scope.variable_scope('targets'): target_next_action_value_op = mlp(next_state_ph, QTest.hparams.hidden_layers) target_next_action_value_op = core.dense( target_next_action_value_op, stream.action_value_shape[-1] * QTest.hparams.num_quantiles, use_bias=False) target_next_action_value_op_shape = array_ops.shape(target_next_action_value_op) target_next_action_value_shape = [ target_next_action_value_op_shape[0], target_next_action_value_op_shape[1], stream.action_value_shape[-1], QTest.hparams.num_quantiles] target_next_action_value_op = gen_array_ops.reshape( target_next_action_value_op, target_next_action_value_shape) mean_target_next_action_value_op = math_ops.reduce_mean( target_next_action_value_op, axis=-1) assign_target_op = shortcuts.assign_scope('logits', 'target_logits') replay_dataset = dataset.ReplayDataset( stream, max_sequence_length=QTest.hparams.max_sequence_length) replay_dataset = replay_dataset.batch(QTest.hparams.batch_size) replay_op = replay_dataset.make_one_shot_iterator().get_next() action_ph = array_ops.placeholder( stream.action_dtype, [None, None] + stream.action_shape, name='action') reward_ph = array_ops.placeholder( stream.reward_dtype, [None, None] + stream.reward_shape, name='reward') terminal_ph = array_ops.placeholder( dtypes.bool, [None, None], name='terminal') sequence_length_ph = array_ops.placeholder( dtypes.int32, [None, 1], name='sequence_length') sequence_length = array_ops.squeeze(sequence_length_ph, -1) q_value_op, expected_q_value_op = q_ops.expected_q_value( array_ops.expand_dims(reward_ph, -1), action_ph, action_value_op, (target_next_action_value_op, mean_target_next_action_value_op), weights=array_ops.expand_dims( 1 - math_ops.cast(terminal_ph, reward_ph.dtype), -1), discount=QTest.hparams.discount) u = expected_q_value_op - q_value_op loss_op = losses_impl.huber_loss(u, delta=QTest.hparams.huber_loss_delta) tau_op = (2. * math_ops.range( 0, QTest.hparams.num_quantiles, dtype=u.dtype) + 1) / ( 2. * QTest.hparams.num_quantiles) loss_op *= math_ops.abs(tau_op - math_ops.cast(u < 0, tau_op.dtype)) loss_op = math_ops.reduce_mean(loss_op, axis=-1) loss_op = math_ops.reduce_mean( math_ops.reduce_sum(loss_op, axis=-1) / math_ops.cast( sequence_length, loss_op.dtype)) optimizer = adam.AdamOptimizer( learning_rate=QTest.hparams.learning_rate) train_op = optimizer.minimize(loss_op, var_list=policy_variables) train_op = control_flow_ops.cond( gen_math_ops.equal( gen_math_ops.mod( ops.convert_to_tensor( QTest.hparams.assign_target_steps, dtype=dtypes.int64), (global_step + 1)), 0), lambda: control_flow_ops.group(*[train_op, assign_target_op]), lambda: train_op) with self.test_session() as sess: sess.run(variables.global_variables_initializer()) sess.run(assign_target_op) for iteration in range(QTest.hparams.num_iterations): rewards = gym_test_utils.rollout_on_gym_env( sess, env, state_ph, deterministic_ph, mean_action_value_op, action_op, num_episodes=QTest.hparams.num_episodes, stream=stream) while True: try: replay = sess.run(replay_op) except (errors_impl.InvalidArgumentError, errors_impl.OutOfRangeError): break loss, _ = sess.run( (loss_op, train_op), feed_dict={ state_ph: replay.state, next_state_ph: replay.next_state, action_ph: replay.action, reward_ph: replay.reward, terminal_ph: replay.terminal, sequence_length_ph: replay.sequence_length, }) rewards = gym_test_utils.rollout_on_gym_env( sess, env, state_ph, deterministic_ph, mean_action_value_op, action_op, num_episodes=QTest.hparams.num_episodes, deterministic=True, save_replay=False) print('average_rewards = {}'.format(rewards / QTest.hparams.num_episodes))
def _tf_equal(a, b): """Overload of "equal" for Tensors.""" return gen_math_ops.equal(a, b)
def test_ppo_ops_gae(self): ops.reset_default_graph() np.random.seed(42) random_seed.set_random_seed(42) env = gym.make('CartPole-v0') env.seed(42) # Setup the policy and model global_step = training_util.get_or_create_global_step() deterministic_ph = array_ops.placeholder(dtypes.bool, [], name='deterministic') exploration_op = learning_rate_decay.exponential_decay( PPOTest.hparams.initial_exploration, global_step, PPOTest.hparams.exploration_decay_steps, PPOTest.hparams.exploration_decay_rate) state_distribution, state_ph = gym_ops.distribution_from_gym_space( env.observation_space, name='state_space') # values with variable_scope.variable_scope('logits'): body_op = mlp(state_ph, PPOTest.hparams.hidden_layers) action_distribution, action_value_op = gym_ops.distribution_from_gym_space( env.action_space, logits=[body_op], name='action_space') action_op = array_ops.squeeze( sampling_ops.epsilon_greedy(action_distribution, exploration_op, deterministic_ph)) body_op = core.dense(body_op, units=PPOTest.hparams.value_units, activation=nn_ops.relu, use_bias=False) value_op = array_ops.squeeze( core.dense(body_op, units=1, use_bias=False), -1) policy_variables = variables.trainable_variables(scope='logits') # target with variable_scope.variable_scope('old_logits'): old_body_op = mlp(state_ph, PPOTest.hparams.hidden_layers) old_action_distribution, old_action_value_op = gym_ops.distribution_from_gym_space( env.action_space, logits=[old_body_op], name='action_space') assign_policy_op = shortcuts.assign_scope('logits', 'old_logits') # Setup the dataset stream = streams.Uniform.from_distributions(state_distribution, action_distribution, with_values=True) replay_dataset = dataset.ReplayDataset( stream, max_sequence_length=PPOTest.hparams.max_sequence_length) replay_dataset = replay_dataset.batch(PPOTest.hparams.batch_size) replay_op = replay_dataset.make_one_shot_iterator().get_next() action_ph = array_ops.placeholder(stream.action_dtype, [None, None] + stream.action_shape, name='action') value_ph = array_ops.placeholder(stream.reward_dtype, [None, None] + stream.reward_shape, name='value') reward_ph = array_ops.placeholder(stream.reward_dtype, [None, None] + stream.reward_shape, name='reward') terminal_ph = array_ops.placeholder(dtypes.bool, [None, None], name='terminal') sequence_length_ph = array_ops.placeholder(dtypes.int32, [None, 1], name='sequence_length') sequence_length = array_ops.squeeze(sequence_length_ph, -1) # Setup the loss/optimization procedure advantage_op, return_op = ppo_ops.generalized_advantage_estimate( reward_ph, value_ph, sequence_length, max_sequence_length=PPOTest.hparams.max_sequence_length, weights=(1 - math_ops.cast(terminal_ph, reward_ph.dtype)), discount=PPOTest.hparams.discount, lambda_td=PPOTest.hparams.lambda_td) # actor loss logits_prob = action_distribution.log_prob(action_ph) old_logits_prob = old_action_distribution.log_prob(action_ph) ratio = math_ops.exp(logits_prob - old_logits_prob) clipped_ratio = clip_ops.clip_by_value(ratio, 1. - PPOTest.hparams.epsilon, 1. + PPOTest.hparams.epsilon) actor_loss_op = -math_ops.minimum(ratio * advantage_op, clipped_ratio * advantage_op) critic_loss_op = math_ops.square( value_op - return_op) * PPOTest.hparams.value_coeff entropy_loss_op = -action_distribution.entropy( name='entropy') * PPOTest.hparams.entropy_coeff loss_op = actor_loss_op + critic_loss_op + entropy_loss_op # total loss loss_op = math_ops.reduce_mean( math_ops.reduce_sum(loss_op, axis=-1) / math_ops.cast(sequence_length, loss_op.dtype)) optimizer = adam.AdamOptimizer( learning_rate=PPOTest.hparams.learning_rate) train_op = optimizer.minimize(loss_op, var_list=policy_variables) train_op = control_flow_ops.cond( gen_math_ops.equal( gen_math_ops.mod( ops.convert_to_tensor(PPOTest.hparams.assign_policy_steps, dtype=dtypes.int64), (global_step + 1)), 0), lambda: control_flow_ops.group(*[train_op, assign_policy_op]), lambda: train_op) with self.test_session() as sess: sess.run(variables.global_variables_initializer()) sess.run(assign_policy_op) for iteration in range(PPOTest.hparams.num_iterations): rewards = gym_test_utils.rollout_with_values_on_gym_env( sess, env, state_ph, deterministic_ph, action_value_op, action_op, value_op, num_episodes=PPOTest.hparams.num_episodes, stream=stream) while True: try: replay = sess.run(replay_op) except (errors_impl.InvalidArgumentError, errors_impl.OutOfRangeError): break _, loss = sess.run( (train_op, loss_op), feed_dict={ state_ph: replay.state, action_ph: replay.action, value_ph: replay.value, reward_ph: replay.reward, terminal_ph: replay.terminal, sequence_length_ph: replay.sequence_length, }) print(loss) rewards = gym_test_utils.rollout_on_gym_env( sess, env, state_ph, deterministic_ph, action_value_op, action_op, num_episodes=PPOTest.hparams.num_episodes, deterministic=True, save_replay=False) print('average_rewards = {}'.format( rewards / PPOTest.hparams.num_episodes))