コード例 #1
0
ファイル: masked.py プロジェクト: mindis/easykeras
 def call(self, inputs, mask=None, **kwargs):
     if self.return_masked:
         return [
             inputs[0],
             K.cast(self.compute_mask(inputs, mask)[0], K.floatx())
         ]
     return inputs[0]
コード例 #2
0
 def call(self, inputs, **kwargs):
     inputs, tasks = inputs
     if K.dtype(tasks) != 'int32':
         tasks = K.cast(tasks, 'int32')
     task_embed = K.gather(self.embeddings, tasks)
     if self.mask_zero:
         task_embed = task_embed * K.expand_dims(
             K.cast(K.not_equal(tasks, 0), K.floatx()), axis=-1)
     if K.backend() == 'theano':
         task_embed = K.tile(task_embed, (1, K.shape(inputs)[1], 1))
     return inputs + task_embed
コード例 #3
0
 def call(self, inputs, mask=None):
     if mask is not None:
         mask = K.cast(mask, K.floatx())
         inputs -= K.expand_dims((1.0 - mask) * 1e6, axis=-1)
     return K.max(inputs, axis=-2)
コード例 #4
0
    def get_updates(self, loss, params):
        grads = self.get_gradients(loss, params)
        self.updates = [K.update_add(self.iterations, 1)]

        t = K.cast(self.iterations, K.floatx()) + 1

        lr = K.switch(
            t <= self.warmup_steps,
            self.lr * (t / self.warmup_steps),
            self.min_lr + (self.lr - self.min_lr) *
            (1.0 - K.minimum(t, self.decay_steps) / self.decay_steps),
        )

        lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /
                     (1. - K.pow(self.beta_1, t)))

        ms = [
            K.zeros(K.int_shape(p), dtype=K.dtype(p), name='m_{}'.format(i))
            for i, p in enumerate(params)
        ]
        vs = [
            K.zeros(K.int_shape(p), dtype=K.dtype(p), name='v_{}'.format(i))
            for i, p in enumerate(params)
        ]
        if self.amsgrad:
            vhats = [
                K.zeros(K.int_shape(p),
                        dtype=K.dtype(p),
                        name='vh_{}'.format(i)) for i, p in enumerate(params)
            ]
        else:
            vhats = [
                K.zeros(1, dtype=K.dtype(p), name='vh_{}'.format(i))
                for i, p in enumerate(params)
            ]
        self.weights = [self.iterations] + ms + vs + vhats

        for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats):
            m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
            v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
            if self.amsgrad:
                vhat_t = K.maximum(vhat, v_t)
                p_t = m_t / (K.sqrt(vhat_t) + self.epsilon)
                self.updates.append(K.update(vhat, vhat_t))
            else:
                p_t = m_t / (K.sqrt(v_t) + self.epsilon)

            if self.initial_weight_decay > 0.0:
                if self.weight_decay_pattern is None:
                    p_t += self.weight_decay * p
                else:
                    for pattern in self.weight_decay_pattern:
                        if pattern in p.name:
                            p_t += self.weight_decay * p
                            break
            p_t = p - lr_t * p_t

            self.updates.append(K.update(m, m_t))
            self.updates.append(K.update(v, v_t))
            new_p = p_t

            if getattr(p, 'constraint', None) is not None:
                new_p = p.constraint(new_p)

            self.updates.append(K.update(p, new_p))
        return self.updates
コード例 #5
0
ファイル: conv.py プロジェクト: mindis/easykeras
 def call(self, inputs, mask=None):
     if mask is not None:
         mask = K.cast(mask, K.floatx())
         inputs *= K.expand_dims(mask, axis=-1)
     return super(MaskedConv1D, self).call(inputs)