Ejemplo n.º 1
0
    def train_loop(self, iter, beta, anneal=True):

        beta = tf.convert_to_tensor(beta, tf.float32)
        beta_conv = tf.cast(beta, tf.float32)
        history = {
            'step': [],
            'Free energy mean': [],
            'Free energy std': [],
            'Energy mean': [],
            'Energy std': [],
            'Train time': []
        }
        interval = 20

        t1 = time()

        for step in tqdm(range(iter)):
            if anneal == True:
                beta = beta_conv * (1 - self.beta_anneal**step)
            loss, energy = self.backprop(beta)  #type: ignore

            if (step % interval) == interval - 1:
                t2 = time()
                history['step'].append(step + 1)
                history['Free energy mean'].append(tfm.reduce_mean(loss))
                history['Free energy std'].append(tfm.reduce_std(loss))
                history['Energy mean'].append(tfm.reduce_mean(energy))
                history['Energy std'].append(tfm.reduce_std(energy))
                history['Train time'].append((t2 - t1) / interval)
                t1 = time()

        return history
Ejemplo n.º 2
0
    def var_train_loop(self, iter, anneal=True, mean=0.5, delta=0.1):
        history = {
            'step': [],
            'Free energy mean': [],
            'Free energy std': [],
            'Energy mean': [],
            'Energy std': [],
            'Train time': []
        }
        interval = 20
        t1 = time()

        for step in tqdm(range(iter)):
            if anneal == True:
                mean_beta = mean * (1 - self.beta_anneal**step)
            else:
                mean_beta = mean
            beta = tf.random.normal([], mean_beta, delta)
            sample = self.model.graph_sampler(self.batch_size, beta, self.seed)
            loss, energy = self.var_backprop(sample, beta)  # type: ignore

            if (step % interval) == interval - 1:
                t2 = time()
                history['step'].append(step + 1)
                history['Free energy mean'].append(tfm.reduce_mean(loss))
                history['Free energy std'].append(tfm.reduce_std(loss))
                history['Energy mean'].append(tfm.reduce_mean(energy))
                history['Energy std'].append(tfm.reduce_std(energy))
                history['Train time'].append((t2 - t1) / interval)
                t1 = time()

        return history
Ejemplo n.º 3
0
 def _interpolate(a, b=None):
     if b is None:  # interpolation in DRAGAN
         beta = random.uniform(shape=shape(a), minval=0., maxval=1.)
         b = a + 0.5 * math.reduce_std(a) * beta
     shape_ = [shape(a)[0]] + [1] * (a.shape.ndims - 1)
     alpha = random.uniform(shape=shape_, minval=0., maxval=1.)
     inter = a + alpha * (b - a)
     inter.set_shape(a.shape)
     return inter
Ejemplo n.º 4
0
    def call(self, inputs):
        mean = reduce_mean(inputs, axis=0)
        std = reduce_std(inputs, axis=0) + 1e-6

        InputBatchNormalization.temp += 1
        InputBatchNormalization.mean += mean
        InputBatchNormalization.std += std

        inputs = divide(subtract(inputs, mean), std)

        return inputs.squeeze(0)
Ejemplo n.º 5
0
 def call(self, x):
     input_shape = x.shape.as_list()
     axis = tuple(
         range(0 if self.batch else 1,
               len(input_shape) if input_shape else 4))
     if self.mean:
         x = x - tfm.reduce_mean(x, axis=axis, keepdims=True)
         if self.std:
             std = tfm.sqrt(
                 tfm.reduce_mean(tfm.square(x), axis=axis, keepdims=True))
     elif self.std:
         std = tfm.reduce_std(x, axis=axis, keepdims=True)
     return x / ((self.eps + std) if self.eps else std) if self.std else x
Ejemplo n.º 6
0
    def execute(self, inputs):
        myInputs = concat(inputs, -1)
        inputShape = inputs.shape
        means = reduce_mean(inputs, [-2, -3])
        standardDeviations = reduce_std(inputs, [-2, -3])
        #NOTE: the following madness creates new memory addresses full of stuff
        #this should be updated at somepoint to make it much more vRAM efficient

        #this gets means ready to do an itemwise subtraction
        #it has to have the same shape as the input
        formattedMeans = transpose(broadcast_to(
            means,
            [inputShape[1], inputShape[2], inputShape[0], inputShape[3]]),
                                   perm=[2, 0, 1, 3])
        #gets it ready for itemwise division
        formattedStddev = transpose(broadcast_to(
            standardDeviations,
            [inputShape[1], inputShape[2], inputShape[0], inputShape[3]]),
                                    perm=[2, 0, 1, 3])

        out = ((inputs - formattedMeans) /
               formattedStddev) * self.stddev + self.mean
        return out