Example #1
0
    def __init__(self,
                 layer_name,
                 target_direction,
                 num_images,
                 normalize=False,
                 target_metric='max'):

        assert layer_name in RF_SIZES, 'Invalid layer name: %s' % layer_name
        #assert input_images.shape[1] == RF_SIZES[layer_name], 'Invalid input size for layer name: %s' % layer_name

        input_size = RF_SIZES[layer_name]

        self.graph = tf.Graph()
        with self.graph.as_default():
            self.images = tf.placeholder(tf.float32,
                                         shape=(num_images, input_size,
                                                input_size, 3),
                                         name='images')
            if normalize:
                norm = tf.sqrt(
                    tf.reduce_sum(tf.square(self.images),
                                  axis=[1, 2, 3],
                                  keep_dims=True))
                self.images = (NORMS[layer_name] / 2) * self.images / norm
            self.vgg = vgg19(self.images,
                             subtract_mean=False,
                             final_endpoint=layer_name)
            vgg_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                         scope='vgg_19')
            saver_vgg = tf.train.Saver(var_list=vgg_vars)

            if type(target_direction) == int:
                unit_num = target_direction
                target_direction = np.zeros(int(
                    self.vgg[layer_name].shape[-1]),
                                            dtype=np.float32)
                target_direction[unit_num] = 1
            else:
                if target_metric in ['max', 'cos']:
                    target_direction /= np.sqrt(np.sum(target_direction**2))
            self.target_direction = tf.constant(target_direction,
                                                shape=[target_direction.size],
                                                name='target_direction')
            vgg_flat = tf.reshape(self.vgg[layer_name], [num_images, -1])
            if target_metric in ['max', 'cos']:
                if target_metric == 'cos':
                    vgg_flat /= tf.sqrt(
                        tf.reduce_sum(tf.square(vgg_flat), axis=1) +
                        tf.float32(0.01))[:, None]
                self.predictions = tf.tensordot(vgg_flat,
                                                self.target_direction,
                                                axes=[[1], [0]],
                                                name='predictions')
            else:
                self.predictions = -tf.reduce_mean(
                    tf.square(vgg_flat - self.target_direction), axis=1)

            self.session = tf.Session()
            self.session.run(tf.global_variables_initializer())
            saver_vgg.restore(self.session, VGG_CHECKPOINT_FILE)
Example #2
0
 def _build(self):
     self.state_ph = tf.placeholder(tf.float32, (None, state_dim), 'states')
     self.target_values = tf.placeholder(tf.float32(None, ), 'values')
     flow = self.state_ph
     for i, dim in enumerate(dimensions):
         flow = fullyConnected('layer_%i' % i, flow, dim, tf.nn.relu)
     self.value = fullyConnected('output', flow, 1, None)
     self.loss = tf.reduce_mean(tf.square(self.value - self.target_values))
     optimizer = tf.train.AdamOptimizer(self.learning_rate)
     self.train_op = optimizer.minimize(self.loss)
Example #3
0
def adv_tfrecord(file_dir, save_dir):
    db = h5.File(file_dir, mode='r')
    stride = 1
    X = db[
        "X"][::
             stride]  # X is 4-D array of image data of float dtype, whose value ranges from 0~1
    Y = db["Y"][::
                stride]  # Y is 1-D array of label, and 0 for fake, 1 for real
    X = tf.float32(X)
    print("Making TF Record: writing {}".format(save_dir))
    np2tfrecord(X, Y, 0, save_dir)
Example #4
0
    def initialize_network(self, feat_1_node=None, feat_2_node=None):
        """ Setup input placeholders and build network """
        with self._graph.as_default():
            if feat_1_node is not None:
                # this means that we are training the net
                # setup input placeholders
                self._input_feat_1_node = tf.placeholder_with_default(
                    feat_1_node, (None, self._num_feat))
                self._input_feat_2_node = tf.placeholder_with_default(
                    feat_2_node, (None, self._num_feat))

                # create input node for drop rate
                self._input_drop_rate_node = tf.placeholder_with_default(
                    tf.constant(0.0), ())

                # build networks
                self._out_1 = self._build_network(self._input_feat_1_node,
                                                  self._input_drop_rate_node,
                                                  'network_1')
                self._out_2 = self._build_network(self._input_feat_2_node,
                                                  self._input_drop_rate_node,
                                                  'network_2')

                # create feed tensors for prediction:
                self._input_feat_1_arr = np.zeros((self._bsz, self._num_feat))
                self._input_feat_2_arr = np.zeros((self._bsz, self._num_feat))
            else:
                # this means that we are inferring
                self._input_feat_node = tf.placeholder(
                    tf.float32(self._bsz, self._num_feat))

                # create input node for drop rate
                self._input_drop_rate_node = tf.placeholder_with_default(
                    tf.constant(0.0), ())

                # build network
                self._out = self._build_network(self._input_feat_node,
                                                self._input_drop_rate_node,
                                                'inference_net')

                # create feed tensor for prediction:
                self._input_feat_arr = np.zeros((self._bsz, self._num_feat))
def train_copos(env_id, num_timesteps, seed, trial, hist_len, block_high,
                nsteps, method, hid_size, give_state, vf_iters):
    import baselines.common.tf_util as U
    sess = U.single_threaded_session()
    sess.__enter__()

    workerseed = seed * 10000
    def policy_fn(name, ob_space, ac_space, ob_name):
        return CompatibleMlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space,
            hid_size=hid_size, num_hid_layers=2, ob_name=ob_name)

    set_global_seeds(workerseed)
    env = make_control_env(env_id, seed, hist_len=hist_len,
                           block_high=block_high, version0=False, give_state=give_state)
    env.seed(workerseed)

    timesteps_per_batch=nsteps
    beta = -1
    if beta < 0:
        nr_episodes = num_timesteps // timesteps_per_batch
        # Automatically compute beta based on initial entropy and number of iterations
        tmp_pi = policy_fn("tmp_pi", env.observation_space, env.action_space, ob_name="tmp_ob")
        sess.run(tf.global_variables_initializer())
        tmp_pistate = tmp_vfstate = tmp_pi.initial_state
        new = tf.float32(False)

        tmp_ob = np.zeros((1,) + env.observation_space.shape)
        entropy = sess.run(tmp_pi.pd.entropy(), feed_dict={tmp_pi.ob: tmp_ob, tmp_pi.Spi:tmp_pistate,
                                                           tmp_pi.Svf:tmp_vfstate, tmp_pi.M:new})
        beta = 2 * entropy / nr_episodes
        print("Initial entropy: " + str(entropy) + ", episodes: " + str(nr_episodes))
        print("Automatically set beta: " + str(beta))

    copos_mpi.learn(env, policy_fn, timesteps_per_batch=timesteps_per_batch, epsilon=0.01, beta=beta,
                    cg_iters=10, cg_damping=0.1, method=method,
                    max_timesteps=num_timesteps, gamma=0.99, lam=0.98, vf_iters=vf_iters, vf_stepsize=1e-3,
                    trial=trial, kl_target=0.01, sess=sess)
    env.close()
Example #6
0
    def Add(self, tokens, state_vectors, attention_vectors):
        '''
        Function adds the vectors to cachce
        state_vectors: tensor of size (batch_size x ... x hidden_size)
        attention_vectors: tensor of size (batch_size x ... x hidden_size)
        tokens: tensor of size (batch_size x ...)

        return: tf.float32(0)
        '''

        indeces, alphas = tf.py_func(self._AddPy, [tokens],
                                     (tf.int64, tf.float32))

        self.state_tensor_[tf.range(self.batch_size_)[:, None], indeces] = \
            state_vectors * alphas[:, :, None] + \
            self.state_tensor_[tf.range(self.batch_size_)[
                :, None], indeces] * (1 - alphas[:, :, None])

        self.attention_tensor_[tf.range(self.batch_size_)[:, None], indeces] = \
            attention_vectors * alphas[:, :, None] + \
            self.attention_tensor_[tf.range(self.batch_size_)[
                :, None], indeces] * (1 - alphas[:, :, None])
        return tf.float32(0)
Example #7
0
    def __init__(self,
                 embedding_mat,
                 emb_size,
                 hidden_unit,
                 filter_sizes,
                 num_filters,
                 max_pool_size,
                 num_classes,
                 sequence_length,
                 l2_reg_lambda=0.0):
        self.input_x = tf.placeholder(tf.int32, [None, sequence_length, None],
                                      name='input_x')
        self.input_y = tf.placeholder(tf.float32, [None, num_classes],
                                      name='input_y')
        self.dropout_keep_prob = tf.placeholder(tf.float32,
                                                name='dropout_keep_prob')
        self.batch_size = tf.placeholder(tf.int32, [])
        self.pad = tf.placeholder(tf.float32, [None, 1, emb_size, 1],
                                  name='pad')
        self.real_len = tf.placeholder(tf.int32, [None], name='real_len')

        l2_regu = tf.constant([0.0])

        with tf.name_scope('embedding'), tf.device('/cpu:0'):
            w = tf.Variable(name='w', initial_value=embedding_mat)
            self.embedding = tf.nn.embedding_lookup(w, self.input_x)
            self.embedding = tf.reduce_mean(self.embedding, axis=2)
            emb = tf.expand_dims(self.embedding, -1)

        pool_cat = []
        pool_reduced = np.int32(np.ceil(sequence_length * 1.0 / max_pool_size))

        for i, filter_size in enumerate(filter_sizes):
            with tf.name_scope('conv-maxpool % s' % filter_size) as scope:
                filter_shape = [filter_size, emb_size, 1, num_filters]
                W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1),
                                name='W')
                b = tf.Variable(tf.constant(0.1, shape=[filter_size]),
                                name='b')
                conv1 = tf.nn.conv2d(emb,
                                     filter=W,
                                     strides=[1, 1, 1, 1],
                                     padding='VALID',
                                     name='conv')

                h = tf.nn.relu(tf.nn.bias_add(conv1, b), name='relu')

                h_pooled = tf.nn.max_pool(h,
                                          ksize=[1, max_pool_size, 1, 1],
                                          strides=[1, max_pool_size, 1, 1],
                                          padding='SAME',
                                          data_format="NHWC",
                                          name=None)
                h_pooled = tf.reshape(h_pooled,
                                      shape=[-1, pool_reduced, num_filters])

                pool_cat.append(h_pooled)

        pool_cat = tf.concat(pool_cat, axis=2)

        pool_cat = tf.nn.dropout(pool_cat, keep_prob=self.dropout_keep_prob)

        gru_cell = tf.contrib.rnn.rnn_cell.GRUCell(num_units=hidden_unit)

        gru_cell = tf.contrib.rnn.rnn_cell.DropoutWrapper(
            gru_cell, input_keep_prob=self.dropout_keep_prob)

        gru_inputs = [
            input_ for input_ in tf.split(
                pool_cat, num_or_size_splits=pool_reduced, axis=1)
        ]

        self._init_state = gru_cell.zero_state(self.batch_size, tf.float32)

        outputs, hidden = tf.contrib.rnn.static_rnn(
            gru_cell,
            gru_inputs,
            initial_state=self._initial_state,
            sequence_length=self.real_len)
        with tf.name_scope('output') as scope:
            tf.get_variable_scope().reuse_variables()
            out = outputs[0]
            ones = tf.ones(shape=[1, hidden_unit])
            for i in range(1, len(outputs)):
                ind = self.real_len < (i - 1)
                ind = tf.float32(ind)
                ind = tf.expand_dims(ind, -1)
                mat = tf.matmul(ind, ones)

                out = tf.add(
                    tf.multiply(out, mat) + tf.multiply(out, (1.0 - mat)))

        with tf.name_scope('fc-predict') as scope:
            self.w = tf.Variable(name='w',
                                 initial_value=tf.constant(
                                     tf.truncated_normal(
                                         shape=[hidden_unit, num_classes],
                                         stddev=0.1)))
            b = tf.Variable(initial_value=tf.constant([0.1],
                                                      shape=[num_classes]),
                            name='b')

            self.scores = tf.nn.xw_plus_b(out, w, b, name='scores')

            self.predict = tf.argmax(self.scores, axis=1, name='predict')
        with tf.name_scope('loss') as scope:

            l2_regu += tf.nn.l2_loss(w)
            l2_regu += tf.nn.l2_loss(b)

            loss = tf.nn.softmax_cross_entropy_with_logits(labels=self.input_y,
                                                           logits=self.scores,
                                                           name='loss')
            self.loss = tf.reduce_mean(loss) + l2_reg_lambda * l2_regu
        with tf.name_scope('accuracy') as scope:
            correct_count = tf.equal(tf.argmax(self.input_y, 1), self.predict)
            self.accracy = tf.reduce_mean(tf.cast(correct_count,
                                                  dtype=tf.float32),
                                          name='accuracy')
Example #8
0
y_data = np.argmax(y_data, axis=1)

print(y_data)
print(y_data.shape)  #(6,)

x_data = x_data.reshape(1, 6, 5)
y_data = y_data.reshape(1, 6)

print(x_data.shape)
print(y_data.shape)

sequence_lenth = 6
input_dim = 5

X = tf.compat.v1.placeholder(tf.float32(None, sequence_lenth, input_dim))
Y = tf.compat.v1.placeholder(tf.float32(None, sequence_lenth))

output = 100
batch_size = 6
print(X)
print(Y)

#.2모델구성

cell = tf.keras.layers.LSTMCell(output)
hypothesis, _states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)

weights = tf.ones([batch_size, sequence_lenth])
sequence_loss = tf.contrib.seq2seq.sequence_loss(logits=hypothesis,
                                                 targets=Y,