Пример #1
0
 def log_likelihoods(self, samples, name=None):
     name = adapt_name(name, "log-likelihood_%s" % self.name)
     with tf.name_scope(name):
         out = np.log(2 * np.pi) + self.log_variances_with_L
         out += tf.square(samples - self.means_with_L) / self.variances_with_L
         out *= -0.5
         return tf.identity(out, name)
Пример #2
0
 def sample(self, L=1, name=None):
     name = adapt_name(name, "sample_%s" % self.name)
     with tf.name_scope(name):
         shape = tf.concat([[self.N, L], self.dims], axis=0)
         noise = tf.random_normal(shape, 0, 1, dtype=tf.float32, name="noise")
         samples = self.means_with_L + (self.log_variances_with_L * noise)
         return tf.identity(samples, name)
Пример #3
0
def reducechoice(inp, axis, name=None):
    name = adapt_name(name, "reducechoice")
    with tf.name_scope(name):
        avg = tfl.layers.core.flatten(tf.reduce_mean(inp, axis), name="avg")
        maxx = tfl.layers.core.flatten(tf.reduce_max(inp, axis), name="max")
        minn = tfl.layers.core.flatten(tf.reduce_min(inp, axis), name="min")
        pooled = choice([avg, maxx, minn], name=name)
        return pooled
Пример #4
0
def split_and_recombined(inps, fn, num_splits, name=None):
    name = adapt_name(name, "split-and-recombine")
    with tf.name_scope(name):
        adapted_inps = []
        # Split inputs:
        with tf.name_scope("preprocessing"):
            for inp in inps:
                if isinstance(inp, list) or isinstance(inp, tuple):
                    if len(inp) % num_splits != 0:
                        raise RuntimeError(
                            "List not divisible by number of splits: %s" %
                            repr(inp))
                    stride = len(inp) // num_splits
                    squeeze = lambda x: x[0] if len(x) == 1 else x
                    adapted_inps.append([
                        squeeze(inp[i:(i + stride)])
                        for i in range(0, len(inp), stride)
                    ])
                elif (isinstance(inp, tf.Variable) or isinstance(inp, tf.Tensor))\
                        and len(inp.shape) > 0:
                    if inp.shape[0].value is None:
                        raise RuntimeError(
                            "Batch index must be defined for tensor")
                    leng = int(inp.shape[0])
                    if leng % num_splits != 0:
                        raise RuntimeError(
                            "Tensor not divisible by number of splits (%d): %s"
                            % (num_splits, inp.shape))
                    stride = leng // num_splits
                    adapted_inps.append([
                        tf.slice(inp, [
                            i if j == 0 else 0 for j in range(len(inp.shape))
                        ], [
                            stride if j == 0 else -1
                            for j in range(len(inp.shape))
                        ]) for i in range(0, leng, stride)
                    ])
                else:
                    adapted_inps.append([inp] * num_splits)
        # Zip inputs to divide work:
        adapted_inps = list(zip(*adapted_inps))
        # Do work
        raw_outputs = []
        for split, args in enumerate(adapted_inps):
            with tf.name_scope("bin%d" % split):
                raw_outputs.append(fn(*args))
        # Post-process outputs
        outputs = []
        with tf.name_scope("postprocessing"):
            for i, group in enumerate(raw_outputs):
                for j, var in enumerate(group):
                    if i == 0:
                        outputs.append([var])
                    else:
                        outputs[j].append(var)
        return outputs
Пример #5
0
 def kl_divergence(cls, p, q, name=None):
     name = adapt_name(name, "kl-divergence")
     with tf.name_scope(name):
         inner = p.variances + tf.square(p.means - q.means)
         inner /= q.variances
         inner = 1 + p.log_variances - q.log_variances - inner
         inner *= -0.5 
         kl = tf.reduce_sum(inner, list(range(1, len(inner.shape))))
         kl = tf.identity(kl, name)
     return kl
Пример #6
0
def SpatialMaxPooling(inp, kW, kH, dW=1, dH=1, padW=0, padH=0, **kwargs):
    name = adapt_name(kwargs.get("name", None), "pool")
    with tf.name_scope(name):
        out = inp
        out = pad(out, padW, padH)
        config = dict(
            strides=(dW, dH),
            padding='valid',
        )
        config.update(kwargs)
        out = tfl.layers.conv.max_pool_2d(out, (kW, kH), **config)
    return out
Пример #7
0
def merge_grads(tower_grads, name=None):
    name = adapt_name(name, "merge-grads")
    with tf.name_scope(name):
        grads = []
        for grad_group in zip(*tower_grads):
            var = grad_group[0][1]
            vals = [val for val, var in grad_group if val is not None]
            if len(vals) == 0:
                grads.append((None, var))
            else:
                grads.append((tf.reduce_mean(tf.stack(vals, 0), 0), var))
        return grads
Пример #8
0
 def __init__(self, means, log_variances, name=None):
     name = adapt_name(name, "gaussian")
     with tf.name_scope(name):
         self.name = name
         self.N = tf.identity(tf.shape(means)[0], "N")
         self.dims = tf.shape(means)[1:]
         self.means = tf.identity(means, "mean")
         self.means_with_L = tf.expand_dims(means, 1)
         self.log_variances = tf.identity(log_variances, "log-variance")
         self.log_variances_with_L = tf.expand_dims(log_variances, 1)
         self.stddevs = tf.identity(tf.exp(0.5 * self.log_variances), "stddev")
         self.variances = tf.identity(tf.exp(self.log_variances), "variance")
         self.variances_with_L = tf.expand_dims(self.variances, 1)
Пример #9
0
def SpatialConvolution(inp, _, nfilters, kW, kH, dW, dH, padW, padH, **kwargs):
    name = adapt_name(kwargs.get("name", None), "conv")
    with tf.variable_scope(name):
        out = inp
        out = pad(out, padW, padH)
        config = dict(
            strides=(dW, dH),
            padding='valid',
            regularizer='L2',
            weights_init='xavier',
            bias_init='zeros',
            weight_decay=1.0,
        )
        config.update(kwargs)
        out = tfl.layers.conv.conv_2d(out, nfilters, (kW, kH), **config)
    return out
Пример #10
0
def choice(inps, name=None):
    name = adapt_name(name, "choice")
    with tf.variable_scope(name):
        assert len(inps) > 0, "You must provide at least one input."
        shape = [len(inps)] + [int(x) for x in inps[0].shape[1:]]
        W = tf.get_variable("W",
                            shape=shape,
                            dtype=tf.float32,
                            initializer=tf.constant_initializer(1.0),
                            trainable=True)
        mask = tf.expand_dims(tf.nn.softmax(W, dim=0), 0)
        stacked_inps = tf.stack(inps, axis=1)
        out = tf.reduce_sum(tf.multiply(stacked_inps, mask), axis=1)
        out = tf.identity(out, name)
        print(out)
        return out
Пример #11
0
def pad(tensor, padW, padH, name=None):
    name = adapt_name(name, "pad")
    return tf.pad(tensor, [[0, 0], [padW, padW], [padH, padH], [0, 0]],
                  name=name)
Пример #12
0
 def symmetric_kl_divergence(cls, p, q, name=None):
     name = adapt_name(name, "symmetric-kl-divergence")
     with tf.name_scope(name):
         return tf.identity(cls.kl_divergence(p, q) + cls.kl_divergence(q, p), name)
Пример #13
0
 def kl_divergence_from_unit(self, name=None):
     name = adapt_name(name, "kl-divergence-from-unit_%s" % self.name)
     return DiagonalCovarianceGaussian.kl_divergence(self,
         self.unit_gaussian(), name=name)
Пример #14
0
 def unit_gaussian(self, name=None):
     name = adapt_name(name, "unit-gaussian")
     with tf.name_scope(name):
         return DiagonalCovarianceGaussian(
                 tf.zeros(tf.shape(self.means)),
                 tf.ones(tf.shape(self.log_variances)), name=name)
Пример #15
0
def splitfn(inp, fn, maxbatch=None, allow_unrolling=True, name=None):
    name = adapt_name(name, "splitfn")
    with tf.variable_scope(name) as scope:
        if not allow_unrolling or inp.shape[0].value is None:
            leng = tf.shape(inp)[0]

            def minibatch():
                scope.reuse_variables()
                remainder = tf.mod(leng, maxbatch, name="remainder")
                splits = tf.identity(tf.floor_div(leng - remainder, maxbatch),
                                     "splits")
                remainder_inp = tf.slice(inp, [
                    leng - remainder if i == 0 else 0
                    for i in range(len(inp.shape))
                ], [-1 for i in range(len(inp.shape))])
                majority_inp = tf.slice(inp,
                                        [0 for i in range(len(inp.shape))], [
                                            leng - remainder if i == 0 else -1
                                            for i in range(len(inp.shape))
                                        ])
                split_inp = tf.reshape(
                    majority_inp,
                    tf.concat([[splits, maxbatch],
                               tf.shape(inp)[1:]], 0))
                majority_out = tf.map_fn(fn, split_inp)
                scope.reuse_variables()
                remainder_out = fn(remainder_inp)
                out = tf.concat([
                    tf.reshape(
                        majority_out,
                        tf.concat([[leng - remainder],
                                   tf.shape(majority_out)[2:]], 0)),
                    remainder_out
                ], 0)
                if inp.shape[0].value is not None:
                    out = tf.reshape(
                        out,
                        tf.concat([[int(inp.shape[0])],
                                   tf.shape(out)[1:]], 0))
                return out

            if maxbatch is None:
                out = fn(inp)
            else:
                out = tf.case([(maxbatch < leng, minibatch)], lambda: fn(inp))
        else:
            leng = int(inp.shape[0])
            if maxbatch is not None and maxbatch < leng:
                remainder = leng % maxbatch
                splits = (leng - remainder) // maxbatch
                remainder_inp = tf.slice(inp, [
                    leng - remainder if i == 0 else 0
                    for i in range(len(inp.shape))
                ], [-1 for i in range(len(inp.shape))])
                majority_inp = tf.slice(inp,
                                        [0 for i in range(len(inp.shape))], [
                                            leng - remainder if i == 0 else -1
                                            for i in range(len(inp.shape))
                                        ])
                split_inp = tf.reshape(
                    majority_inp,
                    tf.concat([[splits, maxbatch],
                               tf.shape(inp)[1:]], 0))
                majority_out = tf.map_fn(fn, split_inp)
                scope.reuse_variables()
                remainder_out = fn(remainder_inp)
                out = tf.concat([
                    tf.reshape(
                        majority_out,
                        tf.concat([[leng - remainder],
                                   tf.shape(majority_out)[2:]], 0)),
                    remainder_out
                ], 0)
            else:
                out = fn(inp)
        return tf.identity(out, name)