コード例 #1
0
    def build_critic(self):
        critic_embedding = embed_seq(input_seq=self.input_,
                                     from_=self.dimension,
                                     to_=self.input_embed,
                                     is_training=self.is_training,
                                     BN=True,
                                     initializer=self.initializer)
        critic_encoding = encode_seq(input_seq=critic_embedding,
                                     input_dim=self.input_embed,
                                     num_stacks=self.num_stacks,
                                     num_heads=self.num_heads,
                                     num_neurons=self.num_neurons,
                                     is_training=self.is_training)
        frame = full_glimpse(
            ref=critic_encoding,
            from_=self.input_embed,
            to_=self.num_units,
            initializer=tf.contrib.layers.xavier_initializer(
            ))  # Glimpse on critic_encoding [Batch_size, input_embed]

        with tf.variable_scope("ffn"):  #  2 dense layers for predictions
            h0 = tf.layers.dense(frame,
                                 self.num_neurons_critic,
                                 activation=tf.nn.relu,
                                 kernel_initializer=self.initializer)
            w1 = tf.get_variable("w1", [self.num_neurons_critic, 1],
                                 initializer=self.initializer)
            b1 = tf.Variable(self.init_B, name="b1")
            self.predictions = tf.squeeze(tf.matmul(h0, w1) + b1)
            tf.summary.scalar('predictions_mean',
                              tf.reduce_mean(self.predictions))
コード例 #2
0
def load_data(parent_id, go_id):
    data1 = list()
    data2 = list()
    labels = list()
    positive1 = list()
    negative1 = list()
    positive2 = list()
    negative2 = list()

    with open(DATA_ROOT + parent_id + '/' + go_id + '.txt') as f:
        for line in f:
            line = line.strip().split(' ')
            label = int(line[0])
            seq = line[2][:MAXLEN]
            sq1 = encode_seq_one_hot(seq, maxlen=MAXLEN)
            sq2 = encode_seq(OGAK980101, seq, maxlen=MAXLEN)
            sq3 = encode_seq(MEHP950102, seq, maxlen=MAXLEN)
            sq4 = encode_seq(CROG050101, seq, maxlen=MAXLEN)
            sq5 = encode_seq(TOBD000101, seq, maxlen=MAXLEN)
            sq6 = encode_seq(ALTS910101, seq, maxlen=MAXLEN)
            if label == 1:
                positive1.append([sq1])
                positive2.append(sq1)
            else:
                negative1.append([sq1])
                negative2.append(sq1)
    shuffle(negative1, negative2, seed=0)
    n = min(len(positive1), len(negative1))
    data1 = negative1[:n] + positive1[:n]
    data2 = negative2[:n] + positive2[:n]
    labels = [0.0] * n + [1.0] * n
    # Previous was 30
    shuffle(data1, data2, labels, seed=0)
    data = (
        numpy.array(data1, dtype='float32'),
        numpy.array(data2, dtype='float32'))
    return (numpy.array(labels), data)
コード例 #3
0
def load_data(parent_id, go_id):
    data1 = list()
    data2 = list()
    labels = list()
    positive1 = list()
    negative1 = list()
    positive2 = list()
    negative2 = list()

    with open(DATA_ROOT + parent_id + '/' + go_id + '.txt') as f:
        for line in f:
            line = line.strip().split(' ')
            label = int(line[0])
            seq = line[2][:MAXLEN]
            sq1 = encode_seq_one_hot(seq, maxlen=MAXLEN)
            sq2 = encode_seq(OGAK980101, seq, maxlen=MAXLEN)
            sq3 = encode_seq(MEHP950102, seq, maxlen=MAXLEN)
            sq4 = encode_seq(CROG050101, seq, maxlen=MAXLEN)
            sq5 = encode_seq(TOBD000101, seq, maxlen=MAXLEN)
            sq6 = encode_seq(ALTS910101, seq, maxlen=MAXLEN)
            if label == 1:
                positive1.append([sq1])
                positive2.append(sq1)
            else:
                negative1.append([sq1])
                negative2.append(sq1)
    shuffle(negative1, negative2, seed=0)
    n = min(len(positive1), len(negative1))
    data1 = negative1[:n] + positive1[:n]
    data2 = negative2[:n] + positive2[:n]
    labels = [0.0] * n + [1.0] * n
    # Previous was 30
    shuffle(data1, data2, labels, seed=0)
    data = (numpy.array(data1,
                        dtype='float32'), numpy.array(data2, dtype='float32'))
    return (numpy.array(labels), data)
コード例 #4
0
    def encode(self):

        encoded_output, encoded_state = utils.encode_seq(
            input_seq=self.q1,
            seq_len=self.len1,
            word_embeddings=self.word_embeddings,
            num_neurons=self.num_neurons)  # [batch_size, 2*num_neurons]

        with tf.variable_scope(
                "variational_inference"):  # Variational inference
            mean = utils.linear(encoded_state, self.hidden_size,
                                scope='mean')  # [batch_size, n_hidden]
            logsigm = utils.linear(encoded_state,
                                   self.hidden_size,
                                   scope='logsigm')  # [batch_size, n_hidden]
            self.mean, self.logsigm = mean, logsigm

            # Gaussian Multivariate kld(z,N(0,1)) = -0.5 * [ sum_d(logsigma) + d - sum_d(sigma) - mu_T*mu]
            klds = -0.5 * (tf.reduce_sum(logsigm, 1) +
                           tf.cast(tf.shape(mean)[1], tf.float32) -
                           tf.reduce_sum(tf.exp(logsigm), 1) -
                           tf.reduce_sum(tf.square(mean), 1)
                           )  # KLD(q(z|x), N(0,1))     tensor [batch_size]
            utils.variable_summaries(
                'klds', klds)  # posterior distribution close to prior N(0,1)
            self.kld = tf.reduce_mean(klds, 0)  # mean over batches: scalar

            h_ = tf.get_variable("GO", [1, self.hidden_size],
                                 initializer=self.initializer)
            h_ = tf.tile(h_, [self.batch_size, 1
                              ])  # trainable tensor: decoder init_state[1]

            eps = tf.random_normal((self.batch_size, self.hidden_size), 0, 1)
            self.doc_vec = tf.multiply(
                tf.exp(logsigm), eps
            ) + mean  # sample from latent intent space: decoder init_state[0]
            self.doc_vec = self.doc_vec, h_  # tuple state Z, h
コード例 #5
0
    def encode_decode(self):
        actor_embedding = embed_seq(input_seq=self.input_,
                                    from_=self.dimension,
                                    to_=self.input_embed,
                                    is_training=self.is_training,
                                    BN=True,
                                    initializer=self.initializer)
        actor_encoding = encode_seq(input_seq=actor_embedding,
                                    input_dim=self.input_embed,
                                    num_stacks=self.num_stacks,
                                    num_heads=self.num_heads,
                                    num_neurons=self.num_neurons,
                                    is_training=self.is_training)
        if self.is_training == False:
            actor_encoding = tf.tile(actor_encoding, [self.batch_size, 1, 1])

        idx_list, log_probs, entropies = [], [], [
        ]  # tours index, log_probs, entropies
        mask = tf.zeros((self.batch_size, self.max_length))  # mask for actions

        n_hidden = actor_encoding.get_shape().as_list()[2]  # input_embed
        W_ref = tf.get_variable("W_ref", [1, n_hidden, self.num_units],
                                initializer=self.initializer)
        W_q = tf.get_variable("W_q", [self.query_dim, self.num_units],
                              initializer=self.initializer)
        v = tf.get_variable("v", [self.num_units],
                            initializer=self.initializer)

        encoded_ref = tf.nn.conv1d(
            actor_encoding, W_ref, 1, "VALID"
        )  # actor_encoding is the ref for actions [Batch size, seq_length, n_hidden]
        query1 = tf.zeros((self.batch_size, n_hidden))  # initial state
        query2 = tf.zeros((self.batch_size, n_hidden))  # previous state
        query3 = tf.zeros(
            (self.batch_size, n_hidden))  # previous previous state

        W_1 = tf.get_variable(
            "W_1", [n_hidden, self.query_dim],
            initializer=self.initializer)  # update trajectory (state)
        W_2 = tf.get_variable("W_2", [n_hidden, self.query_dim],
                              initializer=self.initializer)
        W_3 = tf.get_variable("W_3", [n_hidden, self.query_dim],
                              initializer=self.initializer)

        for step in range(self.max_length):  # sample from POINTER
            query = tf.nn.relu(
                tf.matmul(query1, W_1) + tf.matmul(query2, W_2) +
                tf.matmul(query3, W_3))
            logits = pointer(encoded_ref=encoded_ref,
                             query=query,
                             mask=mask,
                             W_ref=W_ref,
                             W_q=W_q,
                             v=v,
                             C=config.C,
                             temperature=config.temperature)
            prob = distr.Categorical(logits)  # logits = masked_scores
            idx = prob.sample()

            idx_list.append(idx)  # tour index
            log_probs.append(prob.log_prob(idx))  # log prob
            entropies.append(prob.entropy())  # entropies
            mask = mask + tf.one_hot(idx, self.max_length)  # mask

            idx_ = tf.stack([tf.range(self.batch_size, dtype=tf.int32), idx],
                            1)  # idx with batch
            query3 = query2
            query2 = query1
            query1 = tf.gather_nd(actor_encoding,
                                  idx_)  # update trajectory (state)

        idx_list.append(idx_list[0])  # return to start
        self.tour = tf.stack(idx_list, axis=1)  # permutations
        self.log_prob = tf.add_n(
            log_probs)  # corresponding log-probability for backprop
        self.entropies = tf.add_n(entropies)
        tf.summary.scalar('log_prob_mean', tf.reduce_mean(self.log_prob))
        tf.summary.scalar('entropies_mean', tf.reduce_mean(self.entropies))