def _create_graph(self): self.x = Tensor(shape=[None, self.img_channels, self.img_height, self.img_width]).Variable() self.y_r = Tensor(shape=[None], name='Yr').Variable() # As implemented in A3C paper self.n1 = ops.Relu(ops.Conv2D([self.x] + self.weight_bias(), kernel_size=8, stride=4, num_output=16)) self.n2 = ops.Relu(ops.Conv2D([self.n1] + self.weight_bias(), kernel_size=4, stride=2, num_output=32)) self.action_index = Tensor(shape=[None, self.num_actions]).Variable() self.d1 = ops.Relu(ops.InnerProduct([self.n2] + self.weight_bias(), num_output=256)) self.logits_v = ops.InnerProduct([self.d1] + self.weight_bias(), num_output=1) self.cost_v = ops.L2Loss([self.y_r, self.logits_v]) self.logits_p = ops.InnerProduct([self.d1] + self.weight_bias(), num_output=self.num_actions) if Config.USE_LOG_SOFTMAX: raise NotImplementedError() else: self.softmax_p = ops.Softmax(self.logits_p) self.selected_action_prob = ops.Sum(self.softmax_p * self.action_index, axis=1) self.cost_p_1 = ops.Log(ops.Clip(self.selected_action_prob, self.log_epsilon, None)) * \ (self.y_r - ops.StopGradient(self.logits_v)) self.cost_p_2 = ops.Sum(ops.Log(ops.Clip(self.softmax_p, self.log_epsilon, None)) * self.softmax_p, axis=1) * (-self.beta) self.cost_p_1_agg = ops.Sum(self.cost_p_1) self.cost_p_2_agg = ops.Sum(self.cost_p_2) self.cost_p = -(self.cost_p_1_agg + self.cost_p_2_agg) self.cost_all = self.cost_p + self.cost_v if Config.DUAL_RMSPROP: raise NotImplementedError() else: if Config.USE_GRAD_CLIP: self.opt = updaters.RMSPropUpdater(decay=Config.RMSPROP_DECAY, eps=Config.RMSPROP_EPSILON, clip_gradient=Config.GRAD_CLIP_NORM) else: self.opt = updaters.RMSPropUpdater(decay=Config.RMSPROP_DECAY, eps=Config.RMSPROP_EPSILON) grads = T.grad(self.cost_all, self.network_params) for p, g in zip(self.network_params, grads): self.opt.append((p, g), lr_mult=1.0)
def relu(features, name=None): """ Computes Rectified Linear: `max(features, 0)`. Args: features: A `Tensor`. name: A name for the operation (optional). Returns: A `Tensor` with the same type. """ return ops.Relu(features, name=name)
def relu(x, alpha=0): """Rectified Linear Unit function, introduces by `[Nair & Hinton, 2010] <http://www.csri.utoronto.ca/~hinton/absps/reluICML.pdf>`_. Parameters ---------- x : Tensor The input tensor. alpha : float The slope of negative side. Returns ------- Tensor The output tensor. """ if alpha == 0: return ops.Relu(x) else: return ops.LRelu(x, slope=alpha)
def Setup(self, bottom): super(ReLULayer, self).Setup(bottom) input = bottom[0] if isinstance(bottom, list) else bottom return ops.Relu(input, **self._param)
def relu(features, name=None): return ops.Relu(features, name=name)
def LayerSetup(self, bottom): return _ops.Relu(bottom, **self.arguments)