コード例 #1
0
ファイル: A3CAgent.py プロジェクト: simcity429/YAI_PRJ
    def actor_optimizer(self):
        action = K.placeholder(shape=[None, self.action_size])
        advantages = K.placeholder(shape=[
            None,
        ])
        #advatages -> *multi-step*

        policy = self.actor.output

        action_prob = K.sum(action * policy, axis=1)
        cross_entropy = K.log(action_prob + 1e-10) * advantages
        cross_entropy = -K.mean(cross_entropy)

        # add (-entropy) to loss function, for enthusiastic search
        minus_entropy = K.sum(policy * K.log(policy + 1e-10), axis=1)
        minus_entropy = K.mean(minus_entropy)

        # optimizing loss minimizes cross_entropy, maximizes entropy
        loss = cross_entropy  #+ 0.01 * minus_entropy

        optimizer = Adam(lr=self.actor_lr)
        updates = optimizer.get_updates(loss, self.actor.trainable_weights)
        train = K.function([self.actor.input, action, advantages], [loss],
                           updates=updates)
        return train
コード例 #2
0
ファイル: losses.py プロジェクト: SveinnEirikur/HySpUnCoKit
    def __call__(self, y_true, y_pred):
        # There are additional parameters for this function
        # Note: some of the 'modes' for edge behavior do not yet have a gradient definition in the Theano tree
        #   and cannot be used for learning

        kernel = [self.kernel_size, self.kernel_size]
        y_true = KC.reshape(y_true, [-1] + list(self.__int_shape(y_pred)[1:]))
        y_pred = KC.reshape(y_pred, [-1] + list(self.__int_shape(y_pred)[1:]))

        patches_pred = KC.extract_image_patches(y_pred, kernel, kernel, 'valid', self.dim_ordering)
        patches_true = KC.extract_image_patches(y_true, kernel, kernel, 'valid', self.dim_ordering)

        # Reshape to get the var in the cells
        bs, w, h, c1, c2, c3 = self.__int_shape(patches_pred)
        patches_pred = KC.reshape(patches_pred, [-1, w, h, c1 * c2 * c3])
        patches_true = KC.reshape(patches_true, [-1, w, h, c1 * c2 * c3])
        # Get mean
        u_true = KC.mean(patches_true, axis=-1)
        u_pred = KC.mean(patches_pred, axis=-1)
        # Get variance
        var_true = K.var(patches_true, axis=-1)
        var_pred = K.var(patches_pred, axis=-1)
        # Get std dev
        covar_true_pred = K.mean(patches_true * patches_pred, axis=-1) - u_true * u_pred

        ssim = (2 * u_true * u_pred + self.c1) * (2 * covar_true_pred + self.c2)
        denom = (K.square(u_true) + K.square(u_pred) + self.c1) * (var_pred + var_true + self.c2)
        ssim /= denom  # no need for clipping, c1 and c2 make the denom non-zero
        return K.mean((1.0 - ssim) / 2.0)
コード例 #3
0
ファイル: utils.py プロジェクト: SveinnEirikur/HySpUnCoKit
def SCD(y_true, y_pred):
    s_t = y_true - K.mean(y_true, axis=1, keepdims=True)
    s_p = y_pred - K.mean(y_true, axis=1, keepdims=True)

    return 1 - K.mean(
        K.l2_normalize(s_t + K.epsilon(), axis=-1) *
        K.l2_normalize(s_p + K.epsilon(), axis=-1))
コード例 #4
0
ファイル: utils.py プロジェクト: SveinnEirikur/HySpUnCoKit
def normSAD2(y_true, y_pred):
    y_true2 = K.l2_normalize(y_true + K.epsilon(), axis=-1)
    y_pred2 = K.l2_normalize(y_pred + K.epsilon(), axis=-1)
    mse = K.mean(K.square(y_true - y_pred), axis=-1)
    # sad = -K.log(1.0-K.mean(y_true2 * y_pred2/np.pi, axis=-1))
    sad = K.mean(y_true2 * y_pred2, axis=-1)
    # sid = SID(y_true,y_pred)

    return 0.005 * mse - 0.75 * sad
コード例 #5
0
ファイル: atari_nets.py プロジェクト: spring01/hcdrl
def qnet(observation_space, action_space, net_name, net_size):
    num_actions = action_space.n
    net_size = int(net_size)
    net_name = net_name.lower()
    state, feature, net = _atari_state_feature_net(observation_space, net_name)

    # dueling or regular dqn/drqn
    if 'dueling' in net_name:
        value1 = net(net_size, activation='relu')(feature)
        adv1 = net(net_size, activation='relu')(feature)
        value2 = Dense(1)(value1)
        adv2 = Dense(num_actions)(adv1)
        mean_adv2 = Lambda(lambda x: K.mean(x, axis=1))(adv2)
        ones = K.ones([1, num_actions])
        lambda_exp = lambda x: K.dot(K.expand_dims(x, axis=1), -ones)
        exp_mean_adv2 = Lambda(lambda_exp)(mean_adv2)
        sum_adv = layers.add([exp_mean_adv2, adv2])
        exp_value2 = Lambda(lambda x: K.dot(x, ones))(value2)
        q_value = layers.add([exp_value2, sum_adv])
    else:
        hid = net(net_size, activation='relu')(feature)
        q_value = Dense(num_actions)(hid)

    # build model
    return models.Model(inputs=state, outputs=q_value)
コード例 #6
0
ファイル: utils.py プロジェクト: SveinnEirikur/HySpUnCoKit
    def __call__(self, x):
        regularization = 0.
        if self.l1:
            # X=K.eval(x)
            diff = x[1:] - x[:-1]
            regularization += K.mean(K.sqrt(diff**2 + 0.000001))

        return regularization * self.l1
コード例 #7
0
ファイル: seq_transform.py プロジェクト: tobytoy/MotionGAN
 def _get_avg_bone_len(arg):
     bone_list = tf.unstack(arg[:, :, 0, :], axis=1)
     bones = [
         bone_list[j] - bone_list[i]
         for i, j in zip(members_from, members_to)
     ]
     bones = K.expand_dims(K.stack(bones, axis=1), axis=2)
     bone_len = K.sqrt(
         K.sum(K.square(bones), axis=-1, keepdims=True) + K.epsilon())
     return K.mean(bone_len, axis=1, keepdims=True)
コード例 #8
0
ファイル: losses.py プロジェクト: SveinnEirikur/HySpUnCoKit
def normSAD2(y_true, y_pred):
    # y_true2 = K.l2_normalize(y_true + K.epsilon(), axis=-1)
    # y_pred2 = K.l2_normalize(y_pred + K.epsilon(), axis=-1)
    mse = K.mean(K.square(y_true - y_pred))
    sad = SAD(y_true, y_pred)
    # sad = -K.log(1.0-SAD(y_true, y_pred)/np.pi)
    # sid = SID(y_true,y_pred)

    # return 0.005 * mse + 0.75 * sad
    return 0.005 * mse + 10.0 * sad
コード例 #9
0
ファイル: losses.py プロジェクト: SveinnEirikur/HySpUnCoKit
def normSAD(y_true, y_pred):
    # y_true2 = K.l2_normalize(y_true + K.epsilon(), axis=-1)
    # y_pred2 = K.l2_normalize(y_pred + K.epsilon(), axis=-1)
    mse = K.mean(K.square(y_true - y_pred))
    # sad = -K.log(1.0-K.mean(y_true2 * y_pred2/np.pi, axis=-1))
    sad = SAD(y_true, y_pred)
    # sid = SID(y_true,y_pred)

    # return 0.008*mse-1.0*sad
    return 0.008 * mse + 1.0 * sad
コード例 #10
0
ファイル: A3CAgent.py プロジェクト: simcity429/YAI_PRJ
    def critic_optimizer(self):
        discounted_prediction = K.placeholder(shape=(None, ))

        value = self.critic.output

        # loss = MSE(discounted_prediction, value)
        loss = K.mean(K.square(discounted_prediction - value))

        optimizer = Adam(lr=self.critic_lr)
        updates = optimizer.get_updates(loss, self.critic.trainable_weights)
        train = K.function([self.critic.input, discounted_prediction], [loss],
                           updates=updates)
        return train
コード例 #11
0
ファイル: utils.py プロジェクト: SveinnEirikur/HySpUnCoKit
def MSE_KL(y_true, y_pred):
    # y_true=y_true[:,-162:]
    y_true = K.switch(
        K.min(y_true) < 0, y_true - K.min(y_true) + K.epsilon(),
        y_true + K.epsilon())
    y_pred = K.switch(
        K.min(y_pred) < 0, y_pred - K.min(y_pred) + K.epsilon(),
        y_pred + K.epsilon())
    p_n = y_true / K.max(y_true, axis=1, keepdims=True)
    q_n = y_pred / K.max(y_pred, axis=1, keepdims=True)

    return K.mean(K.square(y_true - y_pred),
                  axis=-1) + 0.5 * (K.sum(p_n * K.log(p_n / q_n)) + K.sum(
                      (1.001 - p_n) * K.log((1.01 - p_n) / (1.001 - q_n))))
コード例 #12
0
def add_loss(model, W):
    inputs = model.inputs[0]
    abnormal = model.inputs[1]
    # abnormal = K.print_tensor(abnormal, message='abnormal = ')
    outputs = model.outputs[0]
    z_mean = model.get_layer('z_mean').output
    z_log_var = model.get_layer('z_log_var').output

    beta = K.sum(1.0 - abnormal, axis=-1, keepdims=True) / W
    # beta = K.print_tensor(beta, message='beta = ')
    reconstruction_loss = mean_squared_error(inputs, outputs)
    reconstruction_loss *= W
    kl_loss = 1 + z_log_var - beta * K.square(z_mean) - K.exp(z_log_var)
    kl_loss = K.sum(kl_loss, axis=-1)
    kl_loss *= -0.5
    vae_loss = K.mean(reconstruction_loss + kl_loss)
    model.add_loss(vae_loss)
コード例 #13
0
ファイル: dmnn.py プロジェクト: tobytoy/MotionGAN
    def classifier(self, x):
        scope = Scoping.get_global_scope()
        with scope.name_scope('classifier'):
            if self.data_set == 'NTURGBD':
                blocks = [{'size': 128, 'bneck': 32,  'groups': 16, 'strides': 1},
                          {'size': 256, 'bneck': 64,  'groups': 16, 'strides': 2},
                          {'size': 512, 'bneck': 128, 'groups': 16, 'strides': 2}]
                n_reps = 3
            else:
                blocks = [{'size': 64,  'bneck': 32, 'groups': 8, 'strides': 3},
                          {'size': 128, 'bneck': 64, 'groups': 8, 'strides': 3}]
                n_reps = 3

            def _data_augmentation(x):
                return K.in_train_phase(_sim_occlusions(_jitter_height(x)), x)

            x = Lambda(_data_augmentation, name=scope+"data_augmentation")(x)

            x = CombMatrix(self.njoints, name=scope+'comb_matrix')(x)

            x = EDM(name=scope+'edms')(x)
            x = Reshape((self.njoints * self.njoints, self.seq_len, 1), name=scope+'resh_in')(x)

            x = BatchNormalization(axis=-1, name=scope+'bn_in')(x)
            x = Conv2D(blocks[0]['bneck'], 1, 1, name=scope+'conv_in', **CONV2D_ARGS)(x)
            for i in range(len(blocks)):
                for j in range(n_reps):
                    with scope.name_scope('block_%d_%d' % (i, j)):
                        x = _conv_block(x, blocks[i]['size'], blocks[i]['bneck'],
                                        blocks[i]['groups'], 3, blocks[i]['strides'] if j == 0 else 1)

            x = Lambda(lambda args: K.mean(args, axis=(1, 2)), name=scope+'mean_pool')(x)
            x = BatchNormalization(axis=-1, name=scope + 'bn_out')(x)
            x = Activation('relu', name=scope + 'relu_out')(x)

            x = Dropout(self.dropout, name=scope+'dropout')(x)
            x = Dense(self.num_actions, activation='softmax', name=scope+'label')(x)

        return x
コード例 #14
0
    def call(self, inputs, training=None):
        input_shape = K.int_shape(inputs)
        reduction_axes = list(range(0, len(input_shape)))

        if (self.axis is not None):
            del reduction_axes[self.axis]

        del reduction_axes[0]

        mean = K.mean(inputs, reduction_axes, keepdims=True)
        stddev = K.std(inputs, reduction_axes, keepdims=True) + self.epsilon
        normed = (inputs - mean) / stddev

        broadcast_shape = [1] * len(input_shape)
        if self.axis is not None:
            broadcast_shape[self.axis] = input_shape[self.axis]

        if self.scale:
            broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
            normed = normed * broadcast_gamma
        if self.center:
            broadcast_beta = K.reshape(self.beta, broadcast_shape)
            normed = normed + broadcast_beta
        return normed
コード例 #15
0
    def call(self, x, mask=None):
        if self.mode == 0 or self.mode == 2:
            assert self.built, 'Layer must be built before being called'
            input_shape = K.int_shape(x)

            reduction_axes = list(range(len(input_shape)))
            del reduction_axes[self.axis]
            broadcast_shape = [1] * len(input_shape)
            broadcast_shape[self.axis] = input_shape[self.axis]

            mean_batch, var_batch = _moments(x,
                                             reduction_axes,
                                             shift=None,
                                             keep_dims=False)
            std_batch = (K.sqrt(var_batch + self.epsilon))

            r_max_value = K.get_value(self.r_max)
            r = std_batch / (K.sqrt(self.running_std + self.epsilon))
            r = K.stop_gradient(K.clip(r, 1 / r_max_value, r_max_value))

            d_max_value = K.get_value(self.d_max)
            d = (mean_batch - self.running_mean) / K.sqrt(self.running_std +
                                                          self.epsilon)
            d = K.stop_gradient(K.clip(d, -d_max_value, d_max_value))

            if sorted(reduction_axes) == range(K.ndim(x))[:-1]:
                x_normed_batch = (x - mean_batch) / std_batch
                x_normed = (x_normed_batch * r + d) * self.gamma + self.beta
            else:
                # need broadcasting
                broadcast_mean = K.reshape(mean_batch, broadcast_shape)
                broadcast_std = K.reshape(std_batch, broadcast_shape)
                broadcast_r = K.reshape(r, broadcast_shape)
                broadcast_d = K.reshape(d, broadcast_shape)
                broadcast_beta = K.reshape(self.beta, broadcast_shape)
                broadcast_gamma = K.reshape(self.gamma, broadcast_shape)

                x_normed_batch = (x - broadcast_mean) / broadcast_std
                x_normed = (x_normed_batch * broadcast_r +
                            broadcast_d) * broadcast_gamma + broadcast_beta

            # explicit update to moving mean and standard deviation
            self.add_update([
                K.moving_average_update(self.running_mean, mean_batch,
                                        self.momentum),
                K.moving_average_update(self.running_std, std_batch**2,
                                        self.momentum)
            ], x)

            # update r_max and d_max
            r_val = self.r_max_value / (
                1 + (self.r_max_value - 1) * K.exp(-self.t))
            d_val = self.d_max_value / (1 + (
                (self.d_max_value / 1e-3) - 1) * K.exp(-(2 * self.t)))

            self.add_update([
                K.update(self.r_max, r_val),
                K.update(self.d_max, d_val),
                K.update_add(self.t, K.variable(np.array([self.t_delta])))
            ], x)

            if self.mode == 0:
                if sorted(reduction_axes) == range(K.ndim(x))[:-1]:
                    x_normed_running = K.batch_normalization(
                        x,
                        self.running_mean,
                        self.running_std,
                        self.beta,
                        self.gamma,
                        epsilon=self.epsilon)
                else:
                    # need broadcasting
                    broadcast_running_mean = K.reshape(self.running_mean,
                                                       broadcast_shape)
                    broadcast_running_std = K.reshape(self.running_std,
                                                      broadcast_shape)
                    broadcast_beta = K.reshape(self.beta, broadcast_shape)
                    broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
                    x_normed_running = K.batch_normalization(
                        x,
                        broadcast_running_mean,
                        broadcast_running_std,
                        broadcast_beta,
                        broadcast_gamma,
                        epsilon=self.epsilon)

                # pick the normalized form of x corresponding to the training phase
                # for batch renormalization, inference time remains same as batchnorm
                x_normed = K.in_train_phase(x_normed, x_normed_running)

        elif self.mode == 1:
            # sample-wise normalization
            m = K.mean(x, axis=self.axis, keepdims=True)
            std = K.sqrt(
                K.var(x, axis=self.axis, keepdims=True) + self.epsilon)
            x_normed_batch = (x - m) / (std + self.epsilon)

            r_max_value = K.get_value(self.r_max)
            r = std / (self.running_std + self.epsilon)
            r = K.stop_gradient(K.clip(r, 1 / r_max_value, r_max_value))

            d_max_value = K.get_value(self.d_max)
            d = (m - self.running_mean) / (self.running_std + self.epsilon)
            d = K.stop_gradient(K.clip(d, -d_max_value, d_max_value))

            x_normed = ((x_normed_batch * r) + d) * self.gamma + self.beta

            # update r_max and d_max
            t_val = K.get_value(self.t)
            r_val = self.r_max_value / (
                1 + (self.r_max_value - 1) * np.exp(-t_val))
            d_val = self.d_max_value / (1 + (
                (self.d_max_value / 1e-3) - 1) * np.exp(-(2 * t_val)))
            t_val += float(self.t_delta)

            self.add_update([
                K.update(self.r_max, r_val),
                K.update(self.d_max, d_val),
                K.update(self.t, t_val)
            ], x)

        return x_normed
コード例 #16
0
ファイル: edm.py プロジェクト: tobytoy/MotionGAN
def edm_loss(y_true, y_pred):
    return K.mean(K.sum(K.square(edm(y_true) - edm(y_pred)), axis=[1, 2]))
コード例 #17
0
ファイル: MLPtest.py プロジェクト: YufengChenK/PredictionPUBG
def rmse(y_true, y_pred):
    return backend.sqrt(backend.mean(backend.square(y_pred - y_true), axis=-1))
コード例 #18
0
ファイル: utils.py プロジェクト: SveinnEirikur/HySpUnCoKit
def normalized_mse_percentage_error(y_true, y_pred):
    y_true = y_true / y_true.max()
    y_pred = y_pred / y_pred.max()
    diff = K.abs((y_true - y_pred) / K.clip(K.abs(y_true), K.epsilon(), None))
    return 100. * K.mean(diff, axis=-1)
コード例 #19
0
ファイル: utils.py プロジェクト: SveinnEirikur/HySpUnCoKit
def SAD(y_true, y_pred):
    y_true2 = K.l2_normalize(y_true + K.epsilon(), axis=-1)
    y_pred2 = K.l2_normalize(y_pred + K.epsilon(), axis=-1)
    sad = -K.mean(y_true2 * y_pred2, axis=-1)

    return sad
コード例 #20
0
ファイル: losses.py プロジェクト: SveinnEirikur/HySpUnCoKit
def normMSE(y_true, y_pred):
    y_true2 = K.l2_normalize(y_true + K.epsilon(), axis=-1)
    y_pred2 = K.l2_normalize(y_pred + K.epsilon(), axis=-1)
    mse = K.mean(K.square(y_true - y_pred))
    return mse