コード例 #1
0
ファイル: old_ops.py プロジェクト: jonbruner/PixelRNN
def skew(inputs, scope="skew"):
    with tf.name_scope(scope):
        batch, height, width, channel = get_shape(inputs)
        new_width = width + height - 1
        skewed_rows = []# inputs = tf.zeros([batch, width * 2 - 1 , height, channel])
        #rows = tf.unpack(tf.transpose(inputs, [1, 0, 3, 2])) # [height, batch, channel, width]
        rows = tf.split(1, height, inputs)  # [batch, 1, width, channel]

        for i, row in enumerate(rows):
            transposed_row = tf.transpose(tf.squeeze(row, [1]), [0, 2, 1])
            reshaped_row = tf.reshape(transposed_row, [-1, width]) # [batch * channel, width]
            padded_row = tf.pad(reshaped_row, ((0, 0), (i, height - 1 - i)))

            unsqueezed_row = tf.reshape(padded_row, [-1, channel, new_width])  # [batch, channel, width*2-1]
            new_row = tf.transpose(unsqueezed_row, [0, 2, 1])  # [batch, width*2-1, channel]

            assert get_shape(new_row) == [batch, new_width, channel], "wrong shape of skewed row"
            skewed_rows.append(new_row)

        skewed_inputs = tf.pack(skewed_rows, axis=1, name="skewed_inputs")
        desired_shape = [None, height, new_width, channel]
        skewed_shape = get_shape(skewed_inputs)
        assert skewed_shape == desired_shape, "wrong shape of skewed input. Actual {}; Expected {}".format(skewed_shape, desired_shape)

    return skewed_inputs
コード例 #2
0
ファイル: policy_graph.py プロジェクト: hybug/test_ppo
        def feature_net(self, image, rnn, a, r, state_in, scope="feature"):
            shape = get_shape(image)
            image = tf.cast(image, tf.float32) / 255.0
            with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
                image = tf.reshape(image, [-1] + shape[-3:])
                filter = [16, 32, 32]
                kernel = [(3, 3), (3, 3), (5, 3)]
                stride = [(1, 2), (1, 2), (2, 1)]
                for i in range(len(filter)):
                    image = tf.layers.conv2d(
                        image,
                        filters=filter[i],
                        kernel_size=kernel[i][0],
                        strides=stride[i][0],
                        padding="valid",
                        activation=None,
                        name="conv_%d" % i)
                    image = tf.layers.max_pooling2d(
                        image,
                        pool_size=kernel[i][1],
                        strides=stride[i][1],
                        padding="valid",
                        name="maxpool_%d" % i)
                    image = self.resblock(
                        image, "res0_%d" % i)
                    # image = self.resblock(
                    #     image, "res1_%d" % i)
                image = tf.nn.relu(image)

                new_shape = get_shape(image)
                feature = tf.reshape(
                    image, [shape[0], shape[1], new_shape[1] * new_shape[2] * new_shape[3]])

                a_onehot = tf.one_hot(
                    a, depth=self.act_space, dtype=tf.float32)

                feature = tf.layers.dense(feature, 256, tf.nn.relu, name="feature")
                feature = tf.concat([feature, a_onehot, r[:, :, None]], axis=-1)

                if self.use_hrnn:
                    initial_state = tf.split(state_in, [1, -1], axis=-1)
                    feature, count_out, state_out = rnn(
                        feature, initial_state=initial_state)
                    state_out = tf.concat([count_out, state_out], axis=-1)
                elif self.use_rmc:
                    initial_state = [state_in]
                    feature, state_out = rnn(
                        feature, initial_state=initial_state)
                elif self.use_amc:
                    initial_state = tf.split(state_in, [1, 64, 64, -1], axis=-1)
                    feature, count_out, ns_out, pos_out, state_out = rnn(
                        feature, initial_state=initial_state)
                    state_out = tf.concat([count_out, ns_out, pos_out, state_out], axis=-1)
                else:
                    c_in, h_in = tf.split(state_in, 2, axis=-1)
                    feature, c_out, h_out = rnn(
                        feature, initial_state=[c_in, h_in])
                    state_out = tf.concat([c_out, h_out], axis=-1)

            return feature, state_out
コード例 #3
0
 def _last_relevant(seq, length):
     batch_size = get_shape(seq)[0]
     max_length = get_shape(seq)[1]
     input_size = get_shape(seq)[2]
     index = tf.range(0, batch_size) * max_length + (length - 1)
     flat = tf.reshape(seq, [-1, input_size])
     return tf.gather(flat, index)
コード例 #4
0
ファイル: policy_graph.py プロジェクト: hybug/test_ppo
        def control_net(self, feature, scope="pixel_control"):
            shape = get_shape(feature)
            with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
                feature = tf.reshape(feature, [-1, shape[-1]])
                feature = tf.layers.dense(feature,
                                          7 * 7 * 32,
                                          tf.nn.relu,
                                          name="feature")
                image = tf.reshape(feature, [-1, 7, 7, 32])
                image = tf.nn.conv2d_transpose(
                    image,
                    filter=tf.get_variable(name="deconv", shape=[9, 9, 32,
                                                                 32]),
                    output_shape=[get_shape(feature)[0], 21, 21, 32],
                    strides=2,
                    padding="VALID")
                image = tf.nn.relu(image)
                image = tf.nn.conv2d_transpose(
                    image,
                    filter=tf.get_variable(name="control",
                                           shape=[4, 4, self.act_space, 32]),
                    output_shape=[
                        get_shape(feature)[0], 21, 21, self.act_space
                    ],
                    strides=1,
                    padding="SAME")

                image = tf.reshape(image,
                                   shape=[shape[0], shape[1]] +
                                   get_shape(image)[-3:])

            return image
コード例 #5
0
    def sent_level_attention(self):
        with tf.variable_scope('sent-level') as scope:
            sent_inputs = tf.reshape(self.word_outputs, [-1, self.max_sent_length, 2 * self.cell_dim])

            # sentence encoder
            cell_fw = rnn.GRUCell(self.cell_dim, name='cell_fw')
            cell_bw = rnn.GRUCell(self.cell_dim, name='cell_bw')

            init_state_fw = tf.tile(tf.get_variable('init_state_fw',
                                                  shape=[1, self.cell_dim],
                                                  initializer=tf.constant_initializer(0)),
                                  multiples=[get_shape(sent_inputs)[0], 1])
            init_state_bw = tf.tile(tf.get_variable('init_state_bw',
                                                  shape=[1, self.cell_dim],
                                                  initializer=tf.constant_initializer(0)),
                                  multiples=[get_shape(sent_inputs)[0], 1])

            rnn_outputs, _ = bidirectional_rnn(cell_fw=cell_fw,
                                             cell_bw=cell_bw,
                                             inputs=sent_inputs,
                                             input_lengths=self.sent_lengths,
                                             initial_state_fw=init_state_fw,
                                             initial_state_bw=init_state_bw,
                                             scope=scope)

            sent_outputs, sent_att_weights = attention(inputs=rnn_outputs,
                                                     att_dim=self.att_dim,
                                                     sequence_lengths=self.sent_lengths)
            self.sent_outputs = tf.layers.dropout(sent_outputs, self.dropout_rate, training=self.is_training)
コード例 #6
0
def make_demo(sess, dphs, enqueue_op):
    burn_in = FLAGS.burn_in
    seqlen = FLAGS.seqlen + burn_in
    n_step = FLAGS.n_step
    gamma = FLAGS.gamma

    if FLAGS.use_all_demos:
        names = glob.glob("/opt/tiger/test_ppo/Demos/*.demo")
    else:
        names = glob.glob("/opt/tiger/test_ppo/Demos/*_0.demo")
    fd = OrderedDict()
    for name in names:
        dicseg = QueueReader.read(name)[0]
        dicseg["n_step_r"] = get_n_step_rewards(dicseg["r"], n_step, gamma)
        while len(dicseg["s"]) > burn_in:
            next_seg = dict()

            next_seg["s"] = padding(dicseg["s"][:seqlen], seqlen,
                                    dicseg["s"][0], np.float32)
            next_seg["prev_a"] = padding(dicseg["a"][:seqlen], seqlen,
                                         dicseg["a"][0], np.int32)
            next_seg["a"] = padding(dicseg["a"][1:seqlen + 1], seqlen,
                                    dicseg["a"][0], np.int32)
            next_seg["r"] = padding(dicseg["a"][:seqlen], seqlen,
                                    dicseg["r"][0], np.float32)
            next_seg["n_step_r"] = padding(dicseg["n_step_r"][:seqlen], seqlen,
                                           dicseg["n_step_r"][0], np.float32)
            next_seg["state_in"] = np.zeros(get_shape(dphs["state_in"])[1:])
            next_seg["slots"] = padding([1] * len(dicseg["s"][:-1][:seqlen]),
                                        seqlen, 0, np.int32)

            next_seg["bootstrap_s"] = padding(
                dicseg["s"][seqlen:seqlen + n_step], n_step, dicseg["s"][0],
                np.float32)
            next_seg["bootstrap_prev_a"] = padding(
                dicseg["a"][seqlen:seqlen + n_step], n_step, dicseg["a"][0],
                np.int32)
            next_seg["bootstrap_slots"] = padding(
                [1] * len(dicseg["s"][:-1][seqlen:seqlen + n_step]), n_step, 0,
                np.int32)

            next_seg["a_logits"] = np.zeros(get_shape(dphs["a_logits"])[1:])
            next_seg["v_cur"] = np.zeros(get_shape(dphs["v_cur"])[1:])
            next_seg["v_tar"] = np.zeros(get_shape(dphs["v_tar"])[1:])
            next_seg["adv"] = np.zeros(get_shape(dphs["adv"])[1:])

            dicseg = {k: v[burn_in:] for k, v in dicseg.items()}

            for key in dphs:
                if key == "priority":
                    fd[dphs[key]] = [-1000000]
                else:
                    fd[dphs[key]] = [next_seg[key]]

            sess.run(enqueue_op, feed_dict=fd)
コード例 #7
0
ファイル: coex.py プロジェクト: hybug/test_ppo
def coex(s, s1, a, act_size, layers=2, activation=tf.nn.leaky_relu, scope="coex"):
    # s1 = tf.stop_gradient(s1)

    s_shape = get_shape(s)
    s1_shape = get_shape(s1)
    assert len(s_shape) > 3
    assert len(s1_shape) > 3
    s_size = s_shape[-1]
    s1_size = s1_shape[-1]
    assert s_size == s1_size
    feature_size = s_size

    s = tf.reshape(
        s, shape=s_shape[:-3] + [s_shape[-3] * s_shape[-2], s_size])
    s1 = tf.reshape(
        s1, shape=s1_shape[:-3] + [s1_shape[-3] * s1_shape[-2], s1_size])

    with tf.variable_scope(scope):
        with tf.variable_scope("attentive_dynamic_model"):
            e_logits = tf.concat([s1 - s, s], axis=-1)
            for i in range(layers - 1):
                e_logits = tf.layers.dense(
                    e_logits,
                    feature_size,
                    activation=activation,
                    name="e_mlp_%d" % i)
            e_logits = tf.layers.dense(
                e_logits,
                act_size,
                activation=None,
                name="e_logits")

            alpha_logits = s1
            for i in range(layers - 1):
                alpha_logits = tf.layers.dense(
                    alpha_logits,
                    feature_size,
                    activation=activation,
                    name="alpha_mlp_%d" % i)
            alpha_logits = tf.layers.dense(
                alpha_logits,
                act_size,
                activation=None,
                name="alpha_logits")

            alpha = tf.nn.softmax(alpha_logits, axis=-2)

            a_logits = tf.reduce_sum(e_logits * alpha, axis=-2)

            i_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
                labels=a, logits=a_logits)

        tf.summary.scalar("i_loss", tf.reduce_mean(i_loss))

    return i_loss
コード例 #8
0
    def _init_word_encoder(self):
        '''
    Build Word Encoder part as in the paper
    :return:
    '''
        with tf.variable_scope('word-encoder') as scope:

            # collapses num docs,num of sentences and creates (number sentences, number words,embedding)
            # treats each sentece independent of docs, sentence location
            word_inputs = tf.reshape(self.embedded_inputs,
                                     [-1, self.max_word_length, self.emb_size])

            # containing the length of each sentence
            word_lengths = tf.reshape(self.word_lengths, [-1])

            # define forward and backword GRU cells
            cell_fw = rnn.GRUCell(self.cell_dim, name='cell_fw')
            cell_bw = rnn.GRUCell(self.cell_dim, name='cell_bw')

            # initialize state of forward GRU cell as 0's, for each sentence in batch
            init_state_fw = tf.tile(tf.get_variable(
                'init_state_fw',
                shape=[1, self.cell_dim],
                initializer=tf.constant_initializer(0)),
                                    multiples=[get_shape(word_inputs)[0], 1])
            # same but for backward GRU cell
            init_state_bw = tf.tile(tf.get_variable(
                'init_state_bw',
                shape=[1, self.cell_dim],
                initializer=tf.constant_initializer(0)),
                                    multiples=[get_shape(word_inputs)[0], 1])

            # bidirectional_rnn returns outputs, state; why do we keep the output and not hidden state???
            rnn_outputs, _ = bidirectional_rnn(cell_fw=cell_fw,
                                               cell_bw=cell_bw,
                                               inputs=word_inputs,
                                               input_lengths=word_lengths,
                                               initial_state_fw=init_state_fw,
                                               initial_state_bw=init_state_bw,
                                               scope=scope)
            # rnn_outputs.shape = [number sentences, number words, 2*self.cell_dim]

            # word_outputs sentence vectors, word_att_weights alpha
            # output dim for word_outputs (num sentences,1,2* hidden state cell dim); sentence vectors as in paper
            word_outputs, word_att_weights = attention(
                inputs=rnn_outputs,
                att_dim=self.att_dim,
                sequence_lengths=word_lengths)

            # apply dropout, only activate during training
            self.word_outputs = tf.layers.dropout(word_outputs,
                                                  self.dropout_rate,
                                                  training=self.is_training)
コード例 #9
0
def get_channel_indices(args, nrms=5.):
    # Initial moments
    if args.chanran:
        aux1, aux2 = args.chanran
        ind = range(aux1, aux2 + 1)
    elif args.chans:
        ichans = args.chans
        nrows, ncols = ut.get_shape(args, len(ichans))
        args.logger.info('Number of channels = %i', len(ichans))
        return ichans, nrows, ncols
    else:
        raise ValueError('No channels were selected')

    # Get rms
    rms = quick_rms(args.cube.unmasked_data[ind, :, :].value) * args.cube.unit
    args.logger.info('Preliminary rms: %s', rms)
    mask = args.cube >= nrms * rms
    mask = np.squeeze(mask.include())
    nvalid = np.sum(mask, axis=(-1, -2))

    # Final channel limits
    for i, nval in zip(ind, nvalid[ind]):
        if nval == 0:
            args.logger.info('Channel %i below threshold', i)
            aux1 = i + 1
        else:
            break
    for i, nval in zip(ind[::-1], nvalid[ind][::-1]):
        if nval == 0:
            args.logger.info('Channel %i below threshold', i)
            aux2 = i - 1
        else:
            break
    args.logger.info('Final channel range: %i, %i, %i', aux1, aux2,
                     args.every[0])

    # Final results
    ichans = range(aux1, aux2 + 1, args.every[0])
    nrows, ncols = ut.get_shape(args, len(ichans), minimize=True)
    args.logger.info('Number of channels = %i', len(ichans))
    args.logger.info('Figure size = %i', len(ichans))
    while nrows * ncols < len(ichans):
        i0 = ichans[0]
        i1 = ichans[-1]
        if nvalid[i0] <= nvalid[i1]:
            args.logger.info('Dropping channel %i to match shape', i0)
            ichans = ichans[1:]
        else:
            args.logger.info('Dropping channel %i to match shape', i1)
            ichans = ichans[:-1]

    return ichans, nrows, ncols
コード例 #10
0
ファイル: KLDiv.py プロジェクト: hybug/test_ppo
def KL_from_gaussians(p_mus, p_sigmas, q_mus, q_sigmas):
    k = get_shape(p_mus)[-1]
    assert k == get_shape(p_sigmas)[-1]
    assert k == get_shape(q_mus)[-1]
    assert k == get_shape(q_sigmas)[-1]

    trace_term = tf.reduce_sum(p_sigmas / q_sigmas, axis=-1)
    quadratic_term = tf.reduce_sum((q_mus - p_mus)**2 / q_sigmas, axis=-1)
    k_term = tf.cast(k, tf.float32)
    log_det_term = tf.reduce_sum(tf.math.log(q_sigmas) - tf.math.log(p_sigmas),
                                 axis=-1)

    kl = 0.5 * (trace_term + quadratic_term - k_term + log_det_term)
    return kl
コード例 #11
0
    def _init_sent_encoder(self):
        '''
    Build Sentence Encoder part as in the paper
    :return:
    '''
        with tf.variable_scope('sent-encoder') as scope:

            # input shape: (number docs, max sentence per document, 2*cell_dim)
            sent_inputs = tf.reshape(
                self.word_outputs,
                [-1, self.max_sent_length, 2 * self.cell_dim])

            # sentence encoder
            cell_fw = rnn.GRUCell(self.cell_dim, name='cell_fw')
            cell_bw = rnn.GRUCell(self.cell_dim, name='cell_bw')

            # for each document get the hidden state array
            init_state_fw = tf.tile(tf.get_variable(
                'init_state_fw',
                shape=[1, self.cell_dim],
                initializer=tf.constant_initializer(0)),
                                    multiples=[get_shape(sent_inputs)[0], 1])
            init_state_bw = tf.tile(tf.get_variable(
                'init_state_bw',
                shape=[1, self.cell_dim],
                initializer=tf.constant_initializer(0)),
                                    multiples=[get_shape(sent_inputs)[0], 1])

            rnn_outputs, _ = bidirectional_rnn(cell_fw=cell_fw,
                                               cell_bw=cell_bw,
                                               inputs=sent_inputs,
                                               input_lengths=self.sent_lengths,
                                               initial_state_fw=init_state_fw,
                                               initial_state_bw=init_state_bw,
                                               scope=scope)
            # rnn_outputs.shape = [num docs, number sentences, 2*self.cell_dim]

            # Returns document vectors
            # output dim for word_outputs (num docs,1,2* hidden state cell dim); sentence vectors as in paper
            sent_outputs, sent_att_weights = attention(
                inputs=rnn_outputs,
                att_dim=self.att_dim,
                sequence_lengths=self.sent_lengths)

            #dropout
            self.sent_outputs = tf.layers.dropout(sent_outputs,
                                                  self.dropout_rate,
                                                  training=self.is_training)
コード例 #12
0
ファイル: old_ops.py プロジェクト: jonbruner/PixelRNN
def diagonal_bilstm(inputs, hidden_dims, use_residual=False, scope='diagonal_bilstm'):
    with tf.variable_scope(scope):
        def reverse(inputs):
          return tf.reverse(inputs, [False, False, True, False])

        output_state_fw = diagonal_lstm(inputs, hidden_dims, scope='output_state_fw')
        output_state_bw = reverse(diagonal_lstm(reverse(inputs), hidden_dims, scope='output_state_bw'))


        if use_residual:
            #conv2d(input, num_outputs, kernel_height, kernel_width, mask_type='A', scope='conv2d'):
            residual_state_fw = conv2d(output_state_fw, hidden_dims * 2, 1, 1, "B", scope="residual_fw")
            output_state_fw = residual_state_fw + inputs

            residual_state_bw = conv2d(output_state_bw, hidden_dims * 2, 1, 1, "B", scope="residual_bw")
            output_state_bw = residual_state_bw + inputs

        batch, height, width, channel = get_shape(output_state_bw)

        output_state_bw_except_last = tf.slice(output_state_bw, [0, 0, 0, 0], [-1, height-1, -1, -1])
        output_state_bw_only_last = tf.slice(output_state_bw, [0, height-1, 0, 0], [-1, 1, -1, -1])
        dummy_zeros = tf.zeros_like(output_state_bw_only_last)

        output_state_bw_with_last_zeros = tf.concat(1, [output_state_bw_except_last, dummy_zeros])

        return output_state_fw + output_state_bw_with_last_zeros
コード例 #13
0
def categorical(logits):
    shape = get_shape(logits)
    if len(shape) > 2:
        logits = tf.reshape(logits, shape=[-1, shape[-1]])
    samples = tf.random.categorical(logits, 1)
    samples = tf.reshape(samples, shape=shape[:-1] + [1])
    return samples
コード例 #14
0
def load_act_model(load_file, model_scope, env, nenvs=1, num_actions=5):
    print('Loading from...', load_file)

    ob_shape = utils.get_shape(env.observation_space)
    ac_space = env.action_space

    sess = tf.get_default_session()

    act = CnnPolicy(sess,
                    ob_shape,
                    ac_space,
                    nenvs,
                    1,
                    model_scope,
                    reuse=False)

    with tf.variable_scope(model_scope):
        params = tf.trainable_variables(model_scope)

    loaded_params = joblib.load(Config.MODEL_DIR + load_file)
    restores = []
    for p, loaded_p in zip(params, loaded_params):
        restores.append(p.assign(loaded_p))
    sess.run(restores)

    return act
コード例 #15
0
def max_over_samples(inputs, context: ModularContext):
    """ Maximise the value of inputs over samples."""
    if context.mode == ModularMode.SAMPLES_EVALUATION:
        shape = get_shape(inputs)
        inputs = tf.reshape(inputs, [context.sample_size, -1] + shape[1:])
        return tf.reduce_max(inputs, axis=0)
    return inputs
コード例 #16
0
ファイル: old_ops.py プロジェクト: jonbruner/PixelRNN
def conv2d(input, num_outputs, kernel_height, kernel_width, mask_type='A', scope='conv2d'):
    with tf.variable_scope(scope):
        batch_size, image_height, image_width, num_channels = get_shape(input)

        center_height = kernel_height // 2
        center_width = kernel_width // 2

        # initialize kernel weights
        weights_shape = [kernel_height, kernel_width, num_channels, num_outputs]
        weights = tf.get_variable("weights", weights_shape, tf.float32, WEIGHT_INITIALIZER, None)

        # pre-convolution mask
        mask_shape = (kernel_height, kernel_width, num_channels, num_outputs)
        mask = np.ones(mask_shape, dtype=np.float32)

        mask[center_height, center_width+1:, :, :] = 0.0
        mask[center_height+1:, :, :, :] = 0.0

        # in type A, we do not allow a connection to the current focus of the kernel
        # which is its center pixel
        if mask_type.lower() == 'a':
            mask[center_height, center_width, :, :] = 0.0

        # apply the mask
        weights *= tf.constant(mask, dtype=tf.float32)
        # store the weights variable
        tf.add_to_collection('conv2d_weights_mask_%s' % mask_type, weights)

    stride_shape = [1, 1, 1, 1]
    outputs = tf.nn.conv2d(input, weights, stride_shape, padding='SAME', name='conv2d_outputs')
    tf.add_to_collection('conv2d_outputs', outputs)
    return outputs
コード例 #17
0
 def __init__(self,
              *,
              env,
              model,
              opponent_model1,
              opponent_model2=None,
              nsteps,
              gamma,
              lam):
     self.env = env
     self.model = MultiModel(model, opponent_model1, opponent_model2)
     nenv = env.num_envs
     input_shape = utils.get_shape(env.observation_space)
     self.primary_obs = np.zeros((nenv, ) + input_shape,
                                 dtype=model.train_model.X.dtype.name)
     self.opponent_obs1 = np.zeros((nenv, ) + input_shape,
                                   dtype=model.train_model.X.dtype.name)
     self.opponent_obs2 = None
     if Config.NUM_SNAKES == 3:
         self.opponent_obs2 = np.zeros((nenv, ) + input_shape,
                                       dtype=model.train_model.X.dtype.name)
     multi_agent_obs = env.reset()
     self.use_multi_agent_obs(multi_agent_obs)
     self.gamma = gamma
     self.lam = lam
     self.nsteps = nsteps
     self.states = model.initial_state
     self.dones = [False for _ in range(nenv)]
コード例 #18
0
ファイル: model.py プロジェクト: jamie0725/Review-Generation
    def _decode_lstm(self, x, h, context):
        with tf.compat.v1.variable_scope('decode_lstm'):
            w_h = tf.compat.v1.get_variable(
                'w_h', [self.C, self.W], initializer=self.weight_initializer)
            b_h = tf.compat.v1.get_variable('b_h', [self.W],
                                            initializer=self.const_initializer)
            w_out = tf.compat.v1.get_variable(
                'w_out', [self.W, self.V], initializer=self.weight_initializer)
            b_out = tf.compat.v1.get_variable(
                'b_out', [self.V], initializer=self.const_initializer)

            h = tf.layers.dropout(h,
                                  self.dropout_rate,
                                  training=self.is_training)
            h_logits = tf.matmul(h, w_h) + b_h

            w_ctx2out = tf.compat.v1.get_variable(
                'w_ctx2out', [get_shape(context)[1], self.W],
                initializer=self.weight_initializer)
            h_logits += tf.matmul(context, w_ctx2out)

            h_logits += x
            h_logits = tf.nn.tanh(h_logits)

            h_logits = tf.layers.dropout(h_logits,
                                         self.dropout_rate,
                                         training=self.is_training)
            out_logits = tf.matmul(h_logits, w_out) + b_out
            return out_logits
コード例 #19
0
ファイル: plot_data.py プロジェクト: folguinch/plotter
def plot_data(args):
    # Keyword arguments for tile plotter
    opts = {}
    opts['nrows'], opts['ncols'] = utils.get_shape(args,
                                                   len(args.data),
                                                   default_cols=1)
    assert opts['nrows'] * opts['ncols'] >= len(args.data)
    args.logger.info('Rows, columns = %i, %i', opts['nrows'], opts['ncols'])

    # Setup tile plotter
    args.logger.debug('Initializing figure')
    fig = plt.NPlotter(config=args.config[0], section=args.section[0], **opts)

    # Iterate over data
    xunits = []
    yunits = []
    for i, (loc, data) in enumerate(zip(fig.axes, args.data)):
        label = fig.get_value('axlabel', None, loc, sep=',')
        label = utils.get_axis_label(args, i, label)
        overplots = utils.get_overplots(args, i)

        ax, xunit, yunit = utils.splot(loc,
                                       fig,
                                       data,
                                       args.logger,
                                       overplots=overplots,
                                       cols=args.columns,
                                       errorcol=args.errcols[0])

        xunits += [xunit]
        yunits += [yunit]

    fig.auto_config(legend=args.legend, units=(xunits, yunits))

    return fig
コード例 #20
0
 def get_feature_size_per_file(self, f_name):
     """
     Return the dimensionality of the features in a given file.
     Typically, this will be the number of bins in a T-F representation
     """
     shape = utils.get_shape(os.path.join(f_name.replace('.data',
                                                         '.shape')))
     return shape[1]
コード例 #21
0
ファイル: bench_onnxruntime.py プロジェクト: ToriML/DNN-bench
def benchmark_onnxruntime(path_to_model,
                          repeat=1000,
                          number=1,
                          warmup=100,
                          quantize=False):
    """
    Parameters
    ----------
    path_to_model: str or onnx.ModelProto
        Path to an onnx model.
    repeat: int
        Repetition of experiment. Default: 1000
    number: int
        Number of forward passes in each experiment. Default: 1
    warmup: int
        Number of disregarded experiments. Default: 100
    quantize: bool
        Dynamically quantize the model with default parameters.

    Returns
    -------
    info: dict
        Information about the size and min, max, mean, std of the time
        of the experiments.
    """
    assert repeat >= 2 * warmup

    if quantize:
        import onnx
        from onnx import version_converter
        from onnxruntime.quantization import quantize_dynamic

        orig_model = onnx.load(path_to_model)
        if orig_model.opset_import[0].version < 11:
            converted_model = version_converter.convert_version(orig_model, 11)
            path_to_model = '/tmp/model_conv.onnx'
            with open(path_to_model, 'wb') as f:
                f.write(converted_model.SerializeToString())
            del orig_model, converted_model
        path_to_quant_model = "/tmp/model_quant.onnx"
        model = quantize_dynamic(path_to_model, path_to_quant_model)
        size = os.path.getsize(path_to_quant_model)
        sess = ort.InferenceSession(path_to_quant_model)
    else:
        size = os.path.getsize(path_to_model)
        sess = ort.InferenceSession(path_to_model)

    inputs = {
        x.name: np.random.randn(*get_shape(x)).astype(get_type(x))
        for x in sess.get_inputs()
    }

    def _benchmark():
        output = sess.run(None, inputs)

    res = dict(size=size, input_size=[tuple(x.shape) for x in inputs.values()])
    res.update(benchmark_speed(_benchmark, repeat, number, warmup))
    return res
コード例 #22
0
def icm(s, s1, a, act_size, layers=2, activation=tf.nn.relu, scope="icm"):
    """Curiosity-driven Exploration by Self-supervised Prediction"""

    # s1 = tf.stop_gradient(s1)

    s_size = get_shape(s)[-1]
    s1_size = get_shape(s1)[-1]
    assert s_size == s1_size
    feature_size = s_size
    a_onehot = tf.one_hot(a, act_size, dtype=tf.float32)

    with tf.variable_scope(scope):
        with tf.variable_scope("forward_model"):
            s1_hat = tf.concat([s, a_onehot], axis=-1)
            for i in range(layers - 1):
                s1_hat = tf.layers.dense(s1_hat,
                                         feature_size,
                                         activation=activation,
                                         name="layer_%d" % i)
            s1_hat = tf.layers.dense(s1_hat,
                                     feature_size,
                                     activation=None,
                                     name="predict_target")

            f_loss = 0.5 * tf.reduce_sum(tf.square(s + s1_hat - s1), axis=-1)

        with tf.variable_scope("inverse_model"):
            a_logits_hat = tf.concat([s, s1 - s], axis=-1)
            for i in range(layers - 1):
                a_logits_hat = tf.layers.dense(a_logits_hat,
                                               feature_size,
                                               activation=activation,
                                               name="layers_%d" % i)
            a_logits_hat = tf.layers.dense(a_logits_hat,
                                           act_size,
                                           activation=None,
                                           name="predict_act_logits")

            i_loss = tf.nn.softmax_cross_entropy_with_logits(
                labels=a_onehot, logits=a_logits_hat)

        tf.summary.scalar("f_loss", tf.reduce_mean(f_loss))
        tf.summary.scalar("i_loss", tf.reduce_mean(i_loss))

    return icmLoss(f_loss, i_loss)
コード例 #23
0
 def get_num_instances_per_file(self, f_name):
     """
     Return the number of context_windows, patches, or instances generated out of a given file
     """
     shape = utils.get_shape(os.path.join(f_name.replace('.data',
                                                         '.shape')))
     file_frames = float(shape[0])
     return np.maximum(
         1, int(np.ceil((file_frames - self.patch_len) / self.patch_hop)))
コード例 #24
0
ファイル: old_ops.py プロジェクト: jonbruner/PixelRNN
def unskew(skewed_outputs, width=0, scope="unskew"):
    with tf.name_scope(scope):
        batch, height, skewed_width, channel = get_shape(skewed_outputs)
        #rows = tf.unpack(tf.transpose(skewed_outputs, [1, 0, 2, 3,]))  # [height, batch, width, channel]
        rows = tf.split(1, height, skewed_outputs)  # [batch, 1, width, channel]
        width = width if width else height

        unskewed_rows = []
        # iterate through the rows
        for i, row in enumerate(rows):
            sliced_row = tf.slice(row, [0, 0, i, 0], [-1, -1, width, -1])
            unskewed_rows.append(sliced_row)
        unskewed_output = tf.concat(1, unskewed_rows, name="unskewed_output")

        desired_shape = [None, height, width, channel]
        output_shape = get_shape(unskewed_output)
        assert output_shape == desired_shape, "wrong shape of unskewed output. Actual {}; Expected {}".format(output_shape, desired_shape)
    return unskewed_output
コード例 #25
0
ファイル: Trainer_r2d2.py プロジェクト: hybug/test_ppo
    def __init__(self, act_space, lstm, scope="agent", **kwargs):
        self.act_space = act_space
        self.scope = scope

        self.s = kwargs.get("s")
        self.prev_a = kwargs.get("prev_a")
        self.state_in = kwargs.get("state_in")
        self.slots = tf.cast(kwargs.get("slots"), tf.float32)

        feature, self.state_out = self.feature_net(self.s, lstm, self.prev_a,
                                                   self.state_in)

        self.qf = self.q_fn(feature, self.slots, self.scope + "_current")

        self.current_act = tf.argmax(self.qf, axis=-1)

        # base_probs = tf.ones(
        #     get_shape(self.prev_a) + [act_space]
        # ) * epsilon / tf.cast(act_space, tf.float32)
        # argmax_a = tf.argmax(self.qf, axis=-1)
        # argmax_probs = tf.one_hot(
        #     argmax_a, depth=act_space, dtype=tf.float32
        # ) * (1.0 - epsilon)
        # self.current_act_probs = base_probs + argmax_probs
        #
        # self.current_act = tf.squeeze(
        #     categorical(tf.math.log(self.current_act_probs)), axis=-1)

        self.bootstrap_s = kwargs.get("bootstrap_s")
        if self.bootstrap_s is not None:
            self.bootstrap_prev_a = kwargs.get("bootstrap_prev_a")
            self.bootstrap_slots = tf.cast(kwargs.get("bootstrap_slots"),
                                           tf.float32)
            self.a = kwargs.get("a")
            self.r = kwargs.get("r")

            self.qa = tf.reduce_sum(
                tf.one_hot(self.a, depth=self.act_space, dtype=tf.float32) *
                self.qf,
                axis=-1)

            bootstrap_feature, _ = self.feature_net(self.bootstrap_s, lstm,
                                                    self.bootstrap_prev_a,
                                                    self.state_out)

            n_step = get_shape(bootstrap_feature)[1]

            feature1 = tf.concat([feature[:, n_step:, :], bootstrap_feature],
                                 axis=1)
            slots1 = tf.concat([self.slots[:, n_step:], self.bootstrap_slots],
                               axis=1)
            self.q1f1 = self.q_fn(feature1, slots1, self.scope + "_target")

            self.q1f = self.q_fn(feature1, slots1, self.scope + "_current")

            self.qa1 = doubleQ(self.q1f1, self.q1f)
コード例 #26
0
ファイル: Trainer_SAC.py プロジェクト: hybug/test_ppo
 def categorical(tensor, num):
     shape = get_shape(tensor)
     if len(shape) == 2:
         return tf.random.categorical(tensor, num)
     elif len(shape) == 3:
         new = tf.reshape(tensor, [-1, shape[-1]])
         sample = tf.random.categorical(new, num)
         return tf.reshape(sample, [shape[0], shape[1], num])
     else:
         raise ValueError(tensor.name + "should have dim 2 or 3")
コード例 #27
0
    def pad_left(self, x, size, dim, shift_right=False):
        if self.state_old is None:
            shape = get_shape(x)
            shape[dim] = size
            x_pad = torch.zeros(shape, dtype=x.dtype, device=x.device)
        else:
            x_pad = self.state_old[0]

        #add left part of x
        x_padded = torch.cat([x_pad, x], dim)

        #get right part for padding on next iter
        x_splited = torch.split(x_padded,
                                [get_shape(x_padded)[dim] - size, size],
                                dim=dim)

        self.update(x_splited[-1:])

        return x_splited[0] if shift_right else x_padded
コード例 #28
0
ファイル: old_ops.py プロジェクト: jonbruner/PixelRNN
def conv1d(input, num_outputs, kernel_size, scope='conv1d'):
    with tf.variable_scope(scope):
        batch_size, image_height, image_width, num_channels = get_shape(input)
        kernel_height, kernel_width = kernel_size, 1
        # initialize kernel weights
        weights_shape = [kernel_height, kernel_width, num_channels, num_outputs]
        weights = tf.get_variable("weights", weights_shape, tf.float32, WEIGHT_INITIALIZER, None)

    stride_shape = [1, 1, 1, 1]
    outputs = tf.nn.conv2d(input, weights, stride_shape, padding='SAME', name='conv1d_outputs')
    return outputs
コード例 #29
0
    def encode(self, x):
        # [B,T]->[B,1,1,T]
        x = x.unsqueeze(1).unsqueeze(2)
        # [B,1,1,T]->[B,2F,1,T]
        X = torch.nn.functional.conv2d(x,
                                       self._encdec_matrix,
                                       stride=self.hop_length)

        # [B,2F,1,T]->[B,2,F,T]
        B, F2, _, T = get_shape(X)
        return X.reshape(B, 2, F2 // 2, T)
コード例 #30
0
ファイル: policy_graph.py プロジェクト: hybug/test_ppo
        def reconstruct_net(self, feature, scope="reconstruct"):
            shape = get_shape(feature)
            with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
                feature = tf.reshape(feature, [-1, shape[-1]])
                feature = tf.layers.dense(feature,
                                          800,
                                          tf.nn.relu,
                                          name="feature")
                image = tf.reshape(feature, [-1, 5, 5, 32])
                filter = [16, 32, 32]
                size = [(84, 82), (40, 38), (18, 7)]
                kernel = [(3, 3), (3, 3), (5, 3)]
                stride = [(1, 2), (1, 2), (2, 1)]
                for i in range(len(filter) - 1, -1, -1):
                    image = self.resblock(image, "res0_%d" % i)

                    image = tf.image.resize_nearest_neighbor(
                        image, [size[i][1], size[i][1]])

                    output_channels = filter[i - 1] if i > 0 else 1
                    input_channels = filter[i]
                    image = tf.nn.conv2d_transpose(
                        image,
                        filter=tf.get_variable(name="deconv_%d" % i,
                                               shape=[
                                                   kernel[i][0], kernel[i][0],
                                                   output_channels,
                                                   input_channels
                                               ]),
                        output_shape=[
                            get_shape(feature)[0], size[i][0], size[i][0],
                            output_channels
                        ],
                        strides=stride[i][0],
                        padding="VALID")

                image = tf.reshape(image,
                                   shape=[shape[0], shape[1]] +
                                   get_shape(image)[-3:])

            return image