示例#1
0
def qnet(observation_space, action_space, net_name, net_size):
    num_actions = action_space.n
    net_size = int(net_size)
    net_name = net_name.lower()
    state, feature, net = _atari_state_feature_net(observation_space, net_name)

    # dueling or regular dqn/drqn
    if 'dueling' in net_name:
        value1 = net(net_size, activation='relu')(feature)
        adv1 = net(net_size, activation='relu')(feature)
        value2 = Dense(1)(value1)
        adv2 = Dense(num_actions)(adv1)
        mean_adv2 = Lambda(lambda x: K.mean(x, axis=1))(adv2)
        ones = K.ones([1, num_actions])
        lambda_exp = lambda x: K.dot(K.expand_dims(x, axis=1), -ones)
        exp_mean_adv2 = Lambda(lambda_exp)(mean_adv2)
        sum_adv = layers.add([exp_mean_adv2, adv2])
        exp_value2 = Lambda(lambda x: K.dot(x, ones))(value2)
        q_value = layers.add([exp_value2, sum_adv])
    else:
        hid = net(net_size, activation='relu')(feature)
        q_value = Dense(num_actions)(hid)

    # build model
    return models.Model(inputs=state, outputs=q_value)
示例#2
0
 def call(self, inputs, **kwargs):
     # Return the transpose layer mapping using the explicit weight matrices
     output = K.dot(inputs - self.tied_weights[1],
                    K.transpose(self.tied_weights[0]))
     if self.activation is not None:
         output = self.activation(output)
     return output
def gram_matrix(x):
    assert K.ndim(x) == 3
    if K.image_data_format() == 'channels_first':
        features = K.flatten(x)
    else:
        features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))
    # Dot product of the flattened feature map and the transpose of the
    # flattened feature map
    gram = K.dot(features, K.transpose(features))
    return gram
示例#4
0
 def call(self, x, **kwargs):
     perm_dims = range(len(self.shape))
     perm_dims[self.joints_dim], perm_dims[-1] = perm_dims[-1], perm_dims[self.joints_dim]
     perm_shape = [int(self.shape[i]) for i in perm_dims]
     x = permute_dimensions(x, perm_dims)
     x = reshape(x, [np.prod(perm_shape[:-1]), perm_shape[-1]])
     x = dot(x, self.comb_matrix)
     x = reshape(x, perm_shape)
     x = permute_dimensions(x, perm_dims)
     return x
示例#5
0
文件: tsne.py 项目: tobytoy/MotionGAN
 def tsne(P, activations):
     #     d = K.shape(activations)[1]
     v = d - 1.
     eps = K.variable(
         10e-15
     )  # needs to be at least 10e-8 to get anything after Q /= K.sum(Q)
     sum_act = K.sum(K.square(activations), axis=1)
     Q = K.reshape(sum_act, [-1, 1]) + -2 * K.dot(activations,
                                                  K.transpose(activations))
     Q = (sum_act + Q) / v
     Q = K.pow(1 + Q, -(v + 1) / 2)
     Q *= K.variable(1 - np.eye(n))
     Q /= K.sum(Q)
     Q = K.maximum(Q, eps)
     C = K.log((P + eps) / (Q + eps))
     C = K.sum(P * C)
     return C
示例#6
0
 def call(self, x):
     output = K.dot(x, self.noisynet_kernel)
     output = K.bias_add(output, self.noisynet_bias)
     output = self.activation(output)
     return output