def get_R(net_in):
    with tf.variable_scope('R'):
        n_objects = int(net.shape[-2])
        n_relations = n_objects * (n_objects - 1) / 2
        attn_in = tf.reshape(net_in, [-1, net_in.shape[-1] * n_objects])
        attn = tf.sigmoid(nets.dense_net(attn_in, [1024, 1024, n_relations]))
        R_Es = []
        counter = 0
        for i in range(n_objects):
            for j in range(i):
                print(i, j)
                with tf.variable_scope('R_R', reuse=tf.AUTO_REUSE):
                    pair = tf.concat([net_in[:, i, :], net_in[:, j, :]],
                                     axis=1)
                    attn_ij = tf.expand_dims(attn[:, counter], -1)
                    R_Es.append(attn_ij *
                                nets.dense_net(pair, [1024, 512, 512]))
                counter += 1
        return tf.add_n(R_Es, 'R_E') / n_relations
Example #2
0
def feature_eval_setup(sess: Session,
                       X: Tensor,
                       Z: Tensor,
                       data_train: DataSet,
                       data_test: DataSet,
                       eval_fn: Callable[[Tensor, Tensor], Tensor],
                       eval_loss_fn: Callable[[Tensor, Tensor], Tensor],
                       supervise_net: Optional[Callable[[Tensor], Tensor]] = None,
                       optimizer: tf.train.Optimizer = (
                               tf.train.RMSPropOptimizer(learning_rate=1e-4)),
                       mb_size: Optional[int] = 128,
                       max_iter: int = 5000,
                       restart_training: bool = True
                       ) -> Callable[[Session], Tuple[Number, Number]]:
    with tf.variable_scope('feature_eval'):
        if supervise_net is not None:
            y_logits = supervise_net(Z)
        else:
            y_logits = dense_net(Z, [256, data_train.dim_y])

    y_hat = tf.sigmoid(y_logits)
    y = tf.placeholder(tf.float32, [None] + data_train.dim_Y)
    eval_loss = tf.reduce_mean(eval_loss_fn(y_logits, y))
    eval_result = eval_fn(y_hat, y)
    vars_fteval = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                    scope='feature_eval')
    train = optimizer.minimize(eval_loss, var_list=vars_fteval)
    eval_vars_initializer = tf.variables_initializer(
        tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='feature_eval'))
    sess.run(eval_vars_initializer)

    def feature_eval(_sess: Session) -> Tuple[Number, Number]:
        if restart_training:
            _sess.run(eval_vars_initializer)
        for _ in tqdm(range(max_iter)):
            if mb_size is not None:
                _mb = data_train.sample(mb_size)
            else:
                _mb = data_train
            data_feed = {X: _mb.x, y: _mb.y}
            _sess.run(train, feed_dict=data_feed)
        data_feed = {X: data_test.x, y: data_test.y}
        val_eval_loss = _sess.run(eval_loss, feed_dict=data_feed)
        val_eval = _sess.run(eval_result, feed_dict=data_feed)
        return val_eval_loss, val_eval

    return feature_eval
                counter += 1
        return tf.add_n(R_Es, 'R_E') / n_relations


with tf.variable_scope('E'):
    net = conv80_ch(X__)
    n_objects = int(net.shape[1] * net.shape[2])
    net = tf.reshape(net, [-1, n_objects, net.shape[-1]])
    R_E = get_R(net)

    Z, kl_losses = nets.get_variational_layer(R_E, dim_z)

with tf.variable_scope('G'):
    n_objects = 8
    dim_objects = 64
    net = nets.dense_net(Z, [n_objects * dim_objects])
    net = tf.reshape(net, (-1, n_objects, dim_objects))
    R_G = get_R(net)
    G_logits = nets.deconv80(R_G, out_channels=data.dim_X[-1], is_train=True)
    G_X = tf.nn.sigmoid(G_logits)

sum_dims = list(range(1, 1 + len(data.dim_X)))
recon_loss = tf.reduce_mean(tf.reduce_sum((G_X - X__)**2, sum_dims), 0)
# recon_loss = tf.reduce_mean(tf.abs(G_X - X__), 1)
loss = dim_z * tf.reduce_mean(kl_losses) + recon_loss

train = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(loss)
print([_.name for _ in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)])

interpolation = interpolation_setup(
    X,
Example #4
0
def D(_in, init=False):
    with tf.variable_scope('D', reuse=not init):
        tmp_logits = nets.mini_rnn(_in, out_dim_t=4, state_size=64)
        _out_logit = nets.dense_net(tmp_logits, [512, 32, 1])
    return _out_logit
Example #5
0
data_name = DataName.PDSST
data, data_test = import_data(data_name)
name = 'AAE'
dim_z = 32
mb_size = 128
lr = 1e-4

input_size = [None] + data.dim_X
X = tf.placeholder(tf.float32, shape=[None, data.dim_x])
X__ = tf.reshape(X, shape=[-1] + data.dim_X, name='X__')

with tf.variable_scope('E'):
    tmp_logits = nets.conv80(X__, dim_z, is_train=True)
    tmp_logits = nets.mini_rnn(tmp_logits, out_dim_t=4, state_size=64)
    Z = nets.dense_net(tmp_logits, [256, dim_z])
    # Z, _ = nets.get_variational_layer(tmp_logits, dim_z)
    # Z = tmp_logits

with tf.variable_scope('G'):
    tmp_logits = nets.mini_rnn(Z, out_dim_t=4, state_size=64)
    tmp_logits = nets.dense_net(tmp_logits, [512],
                                activation_fn=tf.nn.relu,
                                batch_norm=True,
                                is_train=True)
    G_logits = nets.deconv80(tmp_logits,
                             out_channels=data.dim_X[-1],
                             is_train=True)
    G_X = tf.nn.sigmoid(G_logits)