Ejemplo n.º 1
0
def do_matops(shape=(100,100)):
    A = uniform(shape)
    B = uniform(shape)
    C = matmul(A,B)
    D = matmul(B,A)
    eig(A);eig(B);eig(C);eig(D)
    svd(A);svd(B);svd(C);eig(D)
Ejemplo n.º 2
0
 def _interpolate(a, b=None):
     if b is None:  # interpolation in DRAGAN
         beta = random.uniform(shape=shape(a), minval=0., maxval=1.)
         b = a + 0.5 * math.reduce_std(a) * beta
     shape_ = [shape(a)[0]] + [1] * (a.shape.ndims - 1)
     alpha = random.uniform(shape=shape_, minval=0., maxval=1.)
     inter = a + alpha * (b - a)
     inter.set_shape(a.shape)
     return inter
Ejemplo n.º 3
0
def get_initial_weights(K, N, L):
    from tensorflow import random as tfrandom, int32 as tfint32
    return {
        'Alice': tfrandom.uniform((K, N),
                                  minval=-L,
                                  maxval=L + 1,
                                  dtype=tfint32),
        'Bob': tfrandom.uniform((K, N), minval=-L, maxval=L + 1,
                                dtype=tfint32),
        # TODO: doesn't work for probabilistic:
        'Eve': tfrandom.uniform((K, N), minval=-L, maxval=L + 1, dtype=tfint32)
    }
Ejemplo n.º 4
0
def do_gradops(opt,loss,model,insh,t=None):
    with GradientTape() as tape:
        y = do_eval(model, insh)[1]
        if t == None:
            t = uniform(shape(y))
        l = loss(t,y)
    grad = tape.gradient(l,model.trainable_weights)
    opt.apply_gradients(zip(grad,model.trainable_weights))
Ejemplo n.º 5
0
 def train_g(self):
     z = random.uniform((self.batch_size, 1, 1, self.z_dim))
     with tf.GradientTape() as t:
         x_fake = self.G(z, training=True)
         fake_logits = self.D(x_fake, training=True)
         loss = ops.g_loss_fn(fake_logits)
     grad = t.gradient(loss, self.G.trainable_variables)
     self.g_opt.apply_gradients(zip(grad, self.G.trainable_variables))
     return loss
Ejemplo n.º 6
0
    def trainable(config, reporter):
        """
        Args:
            config (dict): Parameters provided from the search algorithm
                or variant generation.
        """
        if not isinstance(config['update_rule'], str):
            update_rule = update_rules[int(config['update_rule'])]
        else:
            update_rule = config['update_rule']
        K, N, L = int(config['K']), int(config['N']), int(config['L'])

        run_name = f"run-{get_session_num(logdir)}"
        run_logdir = join(logdir, run_name)
        # for each attack, the TPMs should start with the same weights
        initial_weights_tensors = get_initial_weights(K, N, L)
        training_steps_ls = {}
        eve_scores_ls = {}
        losses_ls = {}
        # for each attack, the TPMs should use the same inputs
        seed = tfrandom.uniform([],
                                minval=0,
                                maxval=tfint64.max,
                                dtype=tfint64).numpy()
        for attack in ['none', 'geometric']:
            initial_weights = {
                tpm: weights_tensor_to_variable(weights, tpm)
                for tpm, weights in initial_weights_tensors.items()
            }
            tfrandom.set_seed(seed)

            if tensorboard:
                attack_logdir = join(run_logdir, attack)
                attack_writer = tensorflow.summary.create_file_writer(
                    attack_logdir)
                with attack_writer.as_default():
                    training_steps, sync_scores, loss = run(
                        update_rule, K, N, L, attack, initial_weights)
            else:
                training_steps, sync_scores, loss = run(
                    update_rule, K, N, L, attack, initial_weights)
            training_steps_ls[attack] = training_steps
            eve_scores_ls[attack] = sync_scores
            losses_ls[attack] = loss
        avg_training_steps = tensorflow.math.reduce_mean(
            list(training_steps_ls.values()))
        avg_eve_score = tensorflow.math.reduce_mean(
            list(eve_scores_ls.values()))
        mean_loss = tensorflow.math.reduce_mean(list(losses_ls.values()))
        reporter(
            avg_training_steps=avg_training_steps.numpy(),
            avg_eve_score=avg_eve_score.numpy(),
            mean_loss=mean_loss.numpy(),
            done=True,
        )
Ejemplo n.º 7
0
 def gradient_penalty(self, f, real, fake):
     alpha = random.uniform([self.batch_size, 1, 1, 1], 0., 1.)
     diff = fake - real
     inter = real + (alpha * diff)
     with tf.GradientTape() as t:
         t.watch(inter)
         pred = f(inter)
     grad = t.gradient(pred, [inter])[0]
     slopes = tf.sqrt(tf.reduce_sum(tf.square(grad), axis=[1, 2, 3]))
     gp = tf.reduce_mean((slopes - 1.)**2)
     return gp
Ejemplo n.º 8
0
def build_model_dense(architecture=None):
    if architecture == None:
        depth = int(uniform([1],2,10))
        units = [2**int(uniform([1],2,9)) for _ in range(depth)]
        activations = ['elu','exponential','linear','relu','selu','swish','tanh']
        architecture = []
        for i in range(depth):
            j = int(uniform([1],0,len(activations)))
            doNorm = int(uniform([1],0,2))
            architecture.append((units[i],activations[j],doNorm))
    inp = Input(architecture[0][0])
    x = Dense(architecture[0][0],activation=architecture[-0][1])(inp)
    for units,acti,doNorm in architecture[1:-1]:
        x = Dense(units,activation=acti)(x)
        if doNorm:
            x = BatchNormalization()(x)
    out = Dense(architecture[-1][0],activation=architecture[-1][1])(x)
    loss = 'categorical_crossentropy'
    optimizers = ['adadelta','adagrad','adam','adamax','ftrl','nadam','rmsprop','sgd']
    optimizer = optimizers[int(uniform([1],0,len(optimizers)))]
    return Model(inputs=inp,outputs=out),architecture[0][0],loss,optimizer
Ejemplo n.º 9
0
 def train_d(self, x_real):
     z = random.uniform((self.batch_size, 1, 1, self.z_dim))
     with tf.GradientTape() as t:
         x_fake = self.G(z, training=True)
         fake_logits = self.D(x_fake, training=True)
         real_logits = self.D(x_real, training=True)
         cost = ops.d_loss_fn(fake_logits, real_logits)
         gp = self.gradient_penalty(partial(self.D, training=True), x_real,
                                    x_fake)
         cost += self.grad_penalty_weight * gp
     grad = t.gradient(cost, self.D.trainable_variables)
     self.d_opt.apply_gradients(zip(grad, self.D.trainable_variables))
     return cost
Ejemplo n.º 10
0
def apply_phaseshuffle(args):
    x, rad = args
    pad_type = 'reflect'
    b, x_len, nch = x.get_shape().as_list()
    phase = random.uniform([], minval=-rad, maxval=rad + 1, dtype=int32)
    pad_l = maximum(phase, 0)
    pad_r = maximum(-phase, 0)
    phase_start = pad_r
    x = pad(x, [[0, 0], [pad_l, pad_r], [0, 0]], mode=pad_type)

    x = x[:, phase_start:phase_start + x_len]
    x.set_shape([b, x_len, nch])

    return x
Ejemplo n.º 11
0
def test(generator, mapping_net):
    """
	Test the model.

	:param generator: generator model

	:return: None
	"""
    img = np.array(
        generator(adain_net,
                  mapping_net(uniform(batch_size, z_dim), minval=-1, maxval=1),
                  np.random.randint(num_genres + 1, size=(batch_size, ))))

    ### Below, we've already provided code to save these generated images to files on disk
    # Rescale the image from (-1, 1) to (0, 255)
    img = ((img / 2) - 0.5) * 255
    # Convert to uint8
    img = img.astype(np.uint8)
    # Save images to disk
    for i in range(0, batch_size):
        img_i = img[i]
        s = out_dir + '/' + str(i) + '.png'
        imwrite(s, img_i)
Ejemplo n.º 12
0
from tensorflow.compat.v1 import logging, placeholder, global_variables_initializer, Session
from tensorflow import Variable, random, zeros, name_scope, nn, matmul, sigmoid, reduce_mean, Graph
from tensorflow.compat.v1.train import GradientDescentOptimizer, Saver
from numpy import array, square

logging.set_verbosity(logging.ERROR)  #Не показываем лишние предупреждения

with Graph().as_default():  #Открываем граф как главный

    X = placeholder("float32", shape=[4, 2],
                    name='X')  #Создаём объект для хранения входа
    Y = placeholder("float32", shape=[4, 1],
                    name='Y')  #Создаём объект для хранения выхода

    W = Variable(
        random.uniform([2, 2], -1, 1), name="W"
    )  #Создаём переменную (наклон функции) с рандомным значением между -1 и 1 для входа
    w = Variable(
        random.uniform([2, 1], -1, 1), name="w"
    )  #Создаём переменную (наклон функции) с рандомным значением между -1 и 1 для выхода

    c = Variable(
        zeros([4, 2]), name="c"
    )  #Создаём переменную (смещение по оси координат) с нулями для входа
    b = Variable(
        zeros([4, 1]), name="b"
    )  #Создаём переменную (смещение по оси координат) с нулями для выхода

    with name_scope("hidden_layer") as scope:
        h = nn.relu(
            matmul(X, W) + c
Ejemplo n.º 13
0
def train(
    generator,
    discriminator,
    dataset,
    genre_labels,
    manager,
    mapping_net,
    noise_net,
    adain_net,
):
    """
	Train the model for one epoch. Save a checkpoint every 500 or so batches.

	:param generator: generator model
	:param discriminator: discriminator model
	:param dataset: list of all album covers
	:param manager: the manager that handles saving checkpoints by calling save()

	:return: The average FID score over the epoch
	"""
    sum_fid = 0
    indices = tf.random.shuffle(tf.range(len(genre_labels)))
    num_examples = len(indices)

    # Loop over our data until we run out
    for i in range(num_examples):
        batch = tf.gather(
            dataset, indices[i:i + batch_size if i +
                             batch_size < num_examples else num_examples])
        labels = tf.gather(
            genre_labels, indices[i:i + batch_size if i +
                                  batch_size < num_examples else num_examples])

        z = uniform((batch_size, z_dim), minval=-1, maxval=1)

        with GradientTape(persistent=True) as tape:
            w = mapping_net(z)

            # generated images
            G_sample = generator(adain_net, w, labels)

            # test discriminator against real images
            logits_real = discriminator(batch, labels)
            # re-use discriminator weights on new inputs
            logits_fake = discriminator(G_sample, labels)

            g_loss = generator_loss(logits_fake)
            # g_loss = tf.reduce_sum(p)
            #g_loss = tf.reduce_sum(G_sample)
            d_loss = discriminator_loss(logits_real, logits_fake)

        map_grads = tape.gradient(g_loss, mapping_net.trainable_variables
                                  )  # success measured by same parameters
        map_optimizer.apply_gradients(
            zip(map_grads, mapping_net.trainable_variables))

        a_grads = tape.gradient(g_loss, adain_net.trainable_variables
                                )  # success measured by same parameters
        adain_optimizer.apply_gradients(
            zip(a_grads, adain_net.trainable_variables))

        # optimize the generator and the discriminator
        gen_gradients = tape.gradient(g_loss, generator.trainable_variables)
        generator_optimizer.apply_gradients(
            zip(gen_gradients, generator.trainable_variables))

        if (i % num_gen_updates == 0):
            disc_gradients = tape.gradient(d_loss,
                                           discriminator.trainable_variables)
            discriminator_optimizer.apply_gradients(
                zip(disc_gradients, discriminator.trainable_variables))

        # Save
        if i % args.save_every == 0:
            manager.save()

        # Calculate inception distance and track the fid in order
        # to return the average
        if i % 500 == 0:
            fid_ = fid_function(batch, G_sample)
            print('**** D_LOSS: %g ****' % d_loss)
            print('**** G_LOSS: %g ****' % g_loss)
            print('**** INCEPTION DISTANCE: %g ****' % fid_)
            sum_fid += fid_
    return sum_fid / (i // 500)
Ejemplo n.º 14
0
def do_eval(model, insh):
    x = uniform(insh)
    y = model(x)
    return (x,y)
Ejemplo n.º 15
0
def do_prepops(shape=(10000,50,50)):
    ds = uniform(shape)
    permutation = shuffle(range(shape[0]))
    permuted_ds = gather(ds, permutation, axis=0)