Example #1
0
def summ(step, wtr = tf.summary.FileWriter(pform(P.log, C.trial))
         , summary = tf.summary.merge(
             ( tf.summary.scalar('step_errt', model.errt)
             , tf.summary.scalar('step_loss', model.loss)))):
    errt, loss = map(comp(np.mean, np.concatenate), zip(*chain(*(
        batch_run(sess, m, (m.errt_samp, m.loss_samp), s, t, batch= C.batch_valid)
        for m, (s, t) in zip(valid, data_valid)))))
    wtr.add_summary(sess.run(summary, {model.errt: errt, model.loss: loss}), step)
    wtr.flush()
Example #2
0
            yield src, tgt
            bat = []
        bat.append(enc(vocab, raw[i], cap=max_len))


###############
# build model #
###############

model_valid = vae('valid', **C)

if A.profile:
    from util_tf import profile
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        with tf.summary.FileWriter(pform(P.log, A.trial), sess.graph) as wtr:
            profile(sess, wtr, model_valid.loss, {
                model_valid.src: valid[:32],
                model_valid.tgt: valid[:32]
            })
if not A.rounds: sys.exit("profiling done")

src, tgt = pipe(batch, (tf.int32, tf.int32), prefetch=A.prefetch)
model_train = vae('train', src=src, tgt=tgt, **C)

############
# training #
############

sess = tf.InteractiveSession()
saver = tf.train.Saver()
Example #3
0
from tqdm import tqdm
from trial import config as C, paths as P, train as T
from util import partial, comp, select
from util_io import pform, load_txt, save_txt
from util_np import np, partition, batch_sample
from util_sp import load_spm, encode, decode
from util_tf import tf, pipe
tf.set_random_seed(C.seed)

C.trial = 'm1_'

#############
# load data #
#############

valid_en, train_en = np.load(pform(P.data, "valid_en.npy")), np.load(pform(P.data, "train_en.npy"))
# valid_nl, train_nl = np.load(pform(P.data, "valid_nl.npy")), np.load(pform(P.data, "train_nl.npy"))
valid_de, train_de = np.load(pform(P.data, "valid_de.npy")), np.load(pform(P.data, "train_de.npy"))
# valid_da, train_da = np.load(pform(P.data, "valid_da.npy")), np.load(pform(P.data, "train_da.npy"))
valid_sv, train_sv = np.load(pform(P.data, "valid_sv.npy")), np.load(pform(P.data, "train_sv.npy"))

data_index =        0,        2,        4
data_valid = valid_en, valid_de, valid_sv
data_train = train_en, train_de, train_sv

def batch(arrs, size= C.batch_train, seed= C.seed):
    size //= len(arrs) * (len(arrs) - 1)
    for i in batch_sample(len(arrs[0]), size, seed):
        yield tuple(arr[i] for arr in arrs)

perm = comp(tuple, partial(permutations, r= 2))
Example #4
0
def train(anomaly_class, loss_type):
    #set gpu
    os.environ["CUDA_VISIBLE_DEVICES"] = "1"

    #load data
    (train_images,
     train_labels), (test_images,
                     test_labels) = tf.keras.datasets.mnist.load_data()
    inlier = train_images[train_labels != anomaly_class]
    x_train = np.reshape(inlier, (len(inlier), 28 * 28)) / 255
    #y_train = train_labels[train_labels!=anomaly_class]
    y_train = np.zeros(len(x_train), dtype=np.int8)  # dummy
    outlier = train_images[train_labels == anomaly_class]
    x_test = np.reshape(np.concatenate([outlier, test_images]),
                        (len(outlier) + len(test_images), 28 * 28)) / 255
    y_test = np.concatenate(
        [train_labels[train_labels == anomaly_class], test_labels])
    y_test = [0 if y != anomaly_class else 1 for y in y_test]
    x_test, y_test = unison_shfl(x_test, np.array(y_test))

    path_log = "/cache/tensorboard-logdir/ae"
    path_ckpt = "/project/outlier_detection/ckpt"

    epochs = 400
    batch_size = 700
    dim_btlnk = 32
    dim_z = dim_btlnk
    dim_dense = 32
    accelerate = 1e-5
    context_weight = 1
    trial = f"vaegan_{loss_type}_{anomaly_class}_b{batch_size}_btlnk{dim_btlnk}_d{dim_dense}_n{dim_z}_a{accelerate}"

    dim_x = len(x_train[0])
    #reset graphs and fix seeds
    tf.reset_default_graph()
    if 'sess' in globals(): sess.close()
    rand = RandomState(0)
    tf.set_random_seed(0)

    # data pipeline
    batch_fn = lambda: batch2(x_train, y_train, batch_size, dim_z)
    x, y, z = pipe(batch_fn, (tf.float32, tf.float32, tf.float32), prefetch=4)

    # load graph
    aegan = VAEGAN.new(dim_x, dim_btlnk, dim_dense, dim_z, accelerate)
    model = VAEGAN.build(aegan, x, y, z, loss_type)

    # start session, initialize variables

    sess = tf.InteractiveSession()
    saver = tf.train.Saver()

    wrtr = tf.summary.FileWriter(pform(path_log, trial))
    #wrtr.add_graph(sess.graph)

    ### if load pretrained model
    # pretrain = "modelname"
    #saver.restore(sess, pform(path_ckpt, pretrain))
    ### else:
    auc_vars = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope='AUC')
    init = tf.group(tf.global_variables_initializer(),
                    tf.variables_initializer(var_list=auc_vars))
    sess.run(init)

    def log(
            step,
            wrtr=wrtr,
            log=tf.summary.
        merge([
            tf.summary.scalar('g_loss', model.g_loss),
            tf.summary.scalar('d_loss', model.d_loss),
            tf.summary.scalar('mu', model.m),
            tf.summary.scalar('lv', model.l),
            tf.summary.image('gzx400',
                             spread_image(model.gzx[:400], 20, 20, 28, 28))
            #, tf.summary.image('gz400', spread_image(model.gz[:400], 20,20,28,28))
            ,
            tf.summary.scalar("AUC_gzx", model.auc_gzx),
            tf.summary.scalar("AUC_dgzx", model.auc_dgzx),
            tf.summary.scalar("AUC_dx", model.auc_dx)
            #, tf.summary.scalar("gz_loss",model.gz_loss)
            ,
            tf.summary.scalar("gzx_loss", model.gzx_loss),
            tf.summary.scalar("ftr_loss", model.ftr_loss),
            tf.summary.scalar("kl_loss", model.kl_loss),
            tf.summary.scalar("dx_loss", model.dx_loss)
            #, tf.summary.scalar("dgz_loss",model.dgz_loss)
            ,
            tf.summary.scalar("dgzx_loss", model.dgzx_loss)
        ]),
            y=y_test,
            x=x_test):
        mu = sess.run(model.mu, {model.x: x})
        wrtr.add_summary(sess.run(log, {
            model.zx: mu,
            model.x: x,
            model.y: y
        }), step)
        wrtr.flush()

    steps_per_epoch = len(x_train) // batch_size
    for epoch in tqdm(range(epochs)):
        for i in range(steps_per_epoch):
            sess.run(model.g_step)
            sess.run(model.d_step)

        # tensorboard writer
        #log(sess.run(model["step"])//steps_per_epoch)
        log(sess.run(model["step"] // steps_per_epoch))

    saver.save(sess, pform(path_ckpt, trial), write_meta_graph=False)
Example #5
0
def train(anomaly_class=8):
    #set gpu
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"

    #load data
    (train_images,
     train_labels), (test_images,
                     test_labels) = tf.keras.datasets.mnist.load_data()
    inlier = train_images[train_labels != anomaly_class]
    x_train = np.reshape(inlier, (len(inlier), 28 * 28)) / 255
    #y_train = train_labels[train_labels!=anomaly_class]
    y_train = np.zeros(len(x_train), dtype=np.int8)  # dummy

    outlier = train_images[train_labels == anomaly_class]
    x_test = np.reshape(np.concatenate([outlier, test_images]),
                        (len(outlier) + len(test_images), 28 * 28)) / 255
    y_test = np.concatenate(
        [train_labels[train_labels == anomaly_class], test_labels])
    y_test = [0 if y != anomaly_class else 1 for y in y_test]
    x_test, y_test = unison_shfl(x_test, np.array(y_test))

    path_log = "/cache/tensorboard-logdir/ae"
    path_ckpt = "/project/outlier_detection/ckpt"

    epochs = 400
    batch_size = 700
    dim_btlnk = 32
    mult = 20
    lr_max = 1e-4
    trial = f"dae{anomaly_class}_b{batch_size}_btlnk{dim_btlnk}_lr_{lr_max}m{mult}"
    #trial="test1"
    dim_x = len(x_train[0])

    #reset graphs and fix seeds
    tf.reset_default_graph()
    if 'sess' in globals(): sess.close()
    rand = RandomState(0)
    tf.set_random_seed(0)

    # data pipeline
    batch_fn = lambda: batch2(x_train, y_train, batch_size)
    x, y = pipe(batch_fn, (tf.float32, tf.float32), prefetch=4)
    #z = tf.random_normal((batch_size, z_dim))

    # load graph
    dae = DAE.new(dim_x, dim_btlnk)
    model = DAE.build(dae, x, y, lr_max, mult)

    # start session, initialize variables
    sess = tf.InteractiveSession()
    saver = tf.train.Saver()

    wrtr = tf.summary.FileWriter(pform(path_log, trial))
    #wrtr.add_graph(sess.graph)

    ### if load pretrained model
    # pretrain = "modelname"
    #saver.restore(sess, pform(path_ckpt, pretrain))
    ### else:
    auc_vars = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope='AUC')
    init = tf.group(tf.global_variables_initializer(),
                    tf.variables_initializer(var_list=auc_vars))
    sess.run(init)

    def log(step,
            wrtr=wrtr,
            log=tf.summary.merge([
                tf.summary.scalar('g_loss', model.g_loss),
                tf.summary.scalar('d_loss', model.d_loss),
                tf.summary.image('gx400',
                                 spread_image(model.gx[:400], 20, 20, 28, 28)),
                tf.summary.image('dgx400',
                                 spread_image(model.dgx[:400], 20, 20, 28,
                                              28)),
                tf.summary.image('dx400',
                                 spread_image(model.dx[:400], 20, 20, 28, 28)),
                tf.summary.scalar("AUC_dgx", model.auc_dgx),
                tf.summary.scalar("AUC_dx", model.auc_dx),
                tf.summary.scalar("AUC_gx", model.auc_gx)
            ]),
            y=y_test,
            x=x_test):
        wrtr.add_summary(sess.run(log, {model.x: x, model.y: y}), step)
        wrtr.flush()

    steps_per_epoch = len(x_train) // batch_size
    for epoch in tqdm(range(epochs)):
        for i in range(steps_per_epoch):
            sess.run(model.d_step)
            sess.run(model.g_step)

        # tensorboard writer
        log(sess.run(model["step"]) // steps_per_epoch)

    saver.save(sess, pform(path_ckpt, trial), write_meta_graph=False)
Example #6
0
from trial import config as C, paths as P, train as T
from util import partial, comp, select
from util_io import pform, load_txt, save_txt
from util_np import np, partition, batch_sample
from util_sp import load_spm, encode, decode
from util_tf import tf, pipe
tf.set_random_seed(C.seed)

C.trial = 'm4_'

#############
# load data #
#############

# valid_en, train_en = np.load(pform(P.data, "valid_en.npy")), np.load(pform(P.data, "train_en.npy"))
valid_nl, train_nl = np.load(pform(P.data, "valid_nl.npy")), np.load(pform(P.data, "train_nl.npy"))
# valid_de, train_de = np.load(pform(P.data, "valid_de.npy")), np.load(pform(P.data, "train_de.npy"))
valid_da, train_da = np.load(pform(P.data, "valid_da.npy")), np.load(pform(P.data, "train_da.npy"))
# valid_sv, train_sv = np.load(pform(P.data, "valid_sv.npy")), np.load(pform(P.data, "train_sv.npy"))

train_nl = train_nl[:2**17].copy()
train_da = train_da[:2**17].copy()

data_index =        1,        3
data_valid = valid_nl, valid_da
data_train = train_nl, train_da

def batch(arrs, size= C.batch_train, seed= C.seed):
    size //= len(arrs) * (len(arrs) - 1)
    for i in batch_sample(len(arrs[0]), size, seed):
        yield tuple(arr[i] for arr in arrs)
Example #7
0
from util import partial
from util_io import pform, load_txt, save_txt
from util_np import np, vpack
from util_sp import spm, load_spm, decode

langs = 'en', 'nl', 'de', 'da', 'sv'

#######################
# align all 5 corpora #
#######################

# load all corpora
corp2pairs = {
    corp: tuple((s, t) for s, t in zip(
        map(str.strip,
            load_txt(pform(P.raw, "europarl-v7.{}-en.{}".format(corp, corp)))),
        map(str.strip,
            load_txt(pform(P.raw, "europarl-v7.{}-en.en".format(corp)))))
                if 0 < len(s) and 0 < len(t))
    for corp in langs[1:]
}

# partition into equivalence classes
sent2class = defaultdict(set)
for corp, pairs in corp2pairs.items():
    for s, t in tqdm(pairs, ncols=70):
        s = s, corp
        t = t, 'en'
        c = set.union(sent2class[s], sent2class[t])
        c.add(s)
        c.add(t)
Example #8
0
File: data.py Project: ysmiraak/eti
#!/usr/bin/env python3

from trial import config as C, paths as P, train as T
from util_io import pform, load_txt, save_txt
from util_np import np, vpack
from util_sp import spm

path_src = pform(P.raw, "europarl-v7.de-en.de")
path_tgt = pform(P.raw, "europarl-v7.de-en.en")

###############
# build vocab #
###############

vocab_src = spm(pform(P.data, "vocab_src"), path_src, C.dim_src, C.bos, C.eos, C.unk)
vocab_tgt = spm(pform(P.data, "vocab_tgt"), path_tgt, C.dim_tgt, C.bos, C.eos, C.unk)

#############
# load data #
#############

src_tgt = list(zip(load_txt(path_src), load_txt(path_tgt)))
np.random.seed(C.seed)
np.random.shuffle(src_tgt)

####################
# filter and split #
####################

train_src = []
train_tgt = []
Example #9
0
from trial import config as C, paths as P, train as T
from util import partial, comp, select
from util_io import pform, load_txt, save_txt
from util_np import np, partition, batch_sample
from util_sp import load_spm, encode, decode
from util_tf import tf, pipe
tf.set_random_seed(C.seed)

C.trial = 'm5_'

#############
# load data #
#############

# valid_en, train_en = np.load(pform(P.data, "valid_en.npy")), np.load(pform(P.data, "train_en.npy"))
valid_nl, train_nl = np.load(pform(P.data, "valid_nl.npy")), np.load(
    pform(P.data, "train_nl.npy"))
# valid_de, train_de = np.load(pform(P.data, "valid_de.npy")), np.load(pform(P.data, "train_de.npy"))
valid_da, train_da = np.load(pform(P.data, "valid_da.npy")), np.load(
    pform(P.data, "train_da.npy"))
# valid_sv, train_sv = np.load(pform(P.data, "valid_sv.npy")), np.load(pform(P.data, "train_sv.npy"))

train_nl = train_nl[:2**17].copy()
train_da = train_da[:2**17].copy()

data_index = 1, 3
data_valid = valid_nl, valid_da
data_train = train_nl, train_da


def batch(arrs, size=C.batch_train, seed=C.seed):
Example #10
0
from model import vAe, encode, decode
from util_io import pform
from util_np import np
from util_tf import tf
import util_sp as sp

path_vocab = "../trial/data/vocab.model"
path_ckpt = "../trial/ckpt"

vocab = sp.load_spm(path_vocab)
s0 = "This is a test."
s1 = "Dragons have been portrayed in film and television in many different forms."
s2 = "His development of infinitesimal calculus opened up new applications of the methods of mathematics to science."
tgt = sp.encode(vocab, (s0, s1, s2))

vae = vAe('infer')
sess = tf.InteractiveSession()


def auto(z, steps=256):
    for s in sp.decode(vocab, decode(sess, vae, z, steps)):
        print(s)


for i in range(1, 7):
    print()
    ckpt = "master{}".format(i)
    tf.train.Saver().restore(sess, pform(path_ckpt, ckpt))
    auto(encode(sess, vae, tgt))
    auto(np.zeros((1, int(vae.mu.shape[1]))))
Example #11
0
from model import Model, batch_run
from tqdm import tqdm
from trial import config as C, paths as P, train as T
from util import partial, comp, select
from util_io import pform, load_txt, save_txt
from util_np import np, partition, batch_sample
from util_sp import load_spm, encode, decode
from util_tf import tf, pipe
tf.set_random_seed(C.seed)

C.trial = "m3_"
C.ckpt = 3

langs = 'en', 'nl', 'de', 'da', 'sv'
vocab = tuple(
    load_spm(pform(P.data, "vocab_{}.model".format(lang))) for lang in langs)
sents = tuple(
    encode(voc, load_txt(pform(P.data, "eval_{}.txt".format(lang))))
    for lang, voc in zip(langs, vocab))

index = tuple(permutations(range(5), 2))
model = Model.new(**select(C, *Model._new))
model = tuple(model.data(i, j).infer() for i, j in index)

sess = tf.InteractiveSession()
saver = tf.train.Saver()


def trans(sents, model, vocab):
    for preds in batch_run(sess, model, model.pred, sents,
                           batch=C.batch_infer):
Example #12
0
path_train = "../trial/data/train.txt"
path_valid = "../trial/data/valid.npy"
valid_size = 4096

from util_io import pform, load_json, clean, save_txt, load_txt
from util_np import np, vpack
from util_sp import load_spm, spm, encode, encode_capped
import json
import os

posts = tuple(
    clean(post[3])
    # extract the cleaned raw texts
    for filename in sorted(os.listdir(path_raw))
    # each json: posts, annotations, metadata
    for post in load_json(pform(path_raw, filename))[0]
    # each post: id, side(unused), author, raw text, annotations, parent post id, category (unused), timestamp
)

# removes empty posts
posts = tuple(post for post in posts if 0 < len(post))

# saves raw texts
save_txt(path_txt, posts)

# train a sentence piece model
spm(name=path_vocab, path=path_txt)

# load the trained sentence piece model
vocab = load_spm(path_vocab + ".model")
def train(anomaly_class=8,
          dataset="cifar",
          n_dis=1,
          epochs=25,
          dim_btlnk=32,
          batch_size=64,
          loss="mean",
          context_weight=1,
          dim_d=64,
          dim_g=64,
          extra_layers=0,
          gpu="0"):

    #set gpu
    os.environ["CUDA_VISIBLE_DEVICES"] = gpu

    path_log = f"/cache/tensorboard-logdir/{dataset}"
    path_ckpt = "/project/multi-discriminator-gan/ckpt"
    path_data = "/project/multi-discriminator-gan/data"

    #reset graphs and fix seeds
    tf.reset_default_graph()
    if 'sess' in globals(): sess.close()
    rand = RandomState(0)
    tf.set_random_seed(0)

    #load data
    if dataset == "ucsd1":
        x_train = np.load("./data/ucsd1_train_x.npz")["arr_0"] / 255
        y_train = np.load("./data/ucsd1_train_y.npz")["arr_0"]
        x_test = np.load("./data/ucsd1_test_x.npz")["arr_0"] / 255
        y_test = np.load("./data/ucsd1_test_y.npz")["arr_0"]

    elif dataset == "uscd2":
        x_train = np.load("./data/ucsd2_train_x.npz")["arr_0"]
        y_train = np.load("./data/ucsd2_train_y.npz")["arr_0"]
        x_test = np.load("./data/ucsd2_test_x.npz")["arr_0"]
        y_test = np.load("./data/ucsd2_test_y.npz")["arr_0"]

    else:
        if dataset == "mnist":
            (train_images, train_labels), (
                test_images,
                test_labels) = tf.keras.datasets.mnist.load_data()
            train_images = resize_images(train_images)
            test_images = resize_images(test_images)
        else:
            (train_images, train_labels), (
                test_images,
                test_labels) = tf.keras.datasets.cifar10.load_data()
            train_labels = np.reshape(train_labels, len(train_labels))
            test_labels = np.reshape(test_labels, len(test_labels))

        inlier = train_images[train_labels != anomaly_class]
        #data_size = prod(inlier[0].sha
        x_train = inlier / 255
        #x_train = np.reshape(inlier, (len(inlier), data_size))/255
        #y_train = train_labels[train_labels!=anomaly_class]
        y_train = np.zeros(len(x_train), dtype=np.int8)  # dummy
        outlier = train_images[train_labels == anomaly_class]
        x_test = np.concatenate([outlier, test_images]) / 255
        #x_test = np.reshape(np.concatenate([outlier, test_images])
        #                    ,(len(outlier)+len(test_images), data_size))/255
        y_test = np.concatenate(
            [train_labels[train_labels == anomaly_class], test_labels])
        y_test = [0 if y != anomaly_class else 1 for y in y_test]
        x_test, y_test = unison_shfl(x_test, np.array(y_test))

    img_size_x = x_train[0].shape[0]
    img_size_y = x_train[0].shape[1]
    channel = x_train[0].shape[-1]
    trial = f"{dataset}_{loss}_dis{n_dis}_{anomaly_class}_w{context_weight}_btlnk{dim_btlnk}_d{dim_d}_g{dim_g}e{extra_layers}"

    # data pipeline
    batch_fn = lambda: batch2(x_train, y_train, batch_size)
    x, y = pipe(batch_fn, (tf.float32, tf.float32), prefetch=4)
    #z = tf.random_normal((batch_size, z_dim))

    # load graph
    mg_gan = MG_GAN.new(img_size_x,
                        channel,
                        dim_btlnk,
                        dim_d,
                        dim_g,
                        n_dis,
                        extra_layers=0)
    model = MG_GAN.build(mg_gan, x, y, context_weight, loss)

    # start session, initialize variables

    sess = tf.InteractiveSession()
    saver = tf.train.Saver()

    wrtr = tf.summary.FileWriter(pform(path_log, trial))
    wrtr.add_graph(sess.graph)

    ### if load pretrained model
    # pretrain = "modelname"
    #saver.restore(sess, pform(path_ckpt, pretrain))
    ### else:
    auc_vars = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope='AUC')
    init = tf.group(tf.global_variables_initializer(),
                    tf.variables_initializer(var_list=auc_vars))
    sess.run(init)

    #if "ucsd" in dataset:
    summary_test = tf.summary.merge([
        tf.summary.scalar('g_loss', model.g_loss),
        tf.summary.scalar("lambda", model.lam),
        tf.summary.scalar("gl_rec", model.gl_rec),
        tf.summary.scalar("gl_adv", model.gl_adv),
        tf.summary.scalar("gl_lam", model.gl_lam),
        tf.summary.scalar('d_loss_mean', model.d_loss_mean),
        tf.summary.scalar('d_max', model.d_max)
        #, tf.summary.scalar('d_loss', model.d_loss)
        ,
        tf.summary.scalar("AUC_gx", model.auc_gx)
    ])
    if dataset == "ucsd1":
        summary_images = tf.summary.merge(
            (tf.summary.image("gx", model.gx, max_outputs=8),
             tf.summary.image("x", model.x, max_outputs=8),
             tf.summary.image(
                 'gx400',
                 spread_image(tf.concat([model.gx, model.x], axis=1), 8, 2,
                              img_size_x, img_size_y, channel))))
    else:
        summary_images = tf.summary.merge(
            (tf.summary.image("gx", model.gx, max_outputs=8),
             tf.summary.image(
                 'gx400',
                 spread_image(model.gx[:400], 20, 20, img_size_x, img_size_y,
                              channel)),
             tf.summary.image("x", model.x, max_outputs=8)))

    if n_dis > 1:
        d_wrtr = {
            i: tf.summary.FileWriter(pform(path_log, trial + f"d{i}"))
            for i in range(n_dis)
        }
        summary_discr = {
            i: tf.summary.scalar('d_loss_multi', model.d_loss[i])
            for i in range(n_dis)
        }

    def summ(step):
        fetches = model.g_loss, model.lam, model.d_loss_mean, model.auc_gx
        results = map(
            np.mean,
            zip(*(sess.run(fetches, {
                model['x']: x_test[i:j],
                model['y']: y_test[i:j]
            }) for i, j in partition(len(x_test), batch_size, discard=False))))
        results = list(results)
        wrtr.add_summary(sess.run(summary_test, dict(zip(fetches, results))),
                         step)

        if dataset == "ucsd1":
            # bike, skateboard, grasswalk, shopping cart, car, normal, normal, grass
            wrtr.add_summary(
                sess.run(
                    summary_images, {
                        model.x:
                        x_test[[990, 1851, 2140, 2500, 2780, 2880, 3380, 3580]]
                    }), step)
        else:
            wrtr.add_summary(sess.run(summary_images, {model.x: x_test}), step)
        wrtr.flush()

    def summ_discr(step):
        fetches = model.d_loss
        results = map(
            np.mean,
            zip(*(sess.run(fetches, {
                model['x']: x_test[i:j],
                model['y']: y_test[i:j]
            }) for i, j in partition(len(x_test), batch_size, discard=False))))
        results = list(results)
        if n_dis > 1:  # put all losses of the discriminators in one plot
            for i in range(n_dis):
                d_wrtr[i].add_summary(
                    sess.run(summary_discr[i], dict(zip(fetches, results))),
                    step)
                #d_wrtr[i].add_summary(sess.run(summary_discr[i], dict([(fetches[i], results[i])])), step)
                d_wrtr[i].flush()

    #def log(step
    #        , wrtr= wrtr
    #        , log = tf.summary.merge([tf.summary.scalar('g_loss', model.g_loss)
    #                                  , tf.summary.scalar('d_loss', tf.reduce_mean(model.d_loss))
    #                                  , tf.summary.scalar("lambda", model.lam)
    #                                  , tf.summary.image("gx", model.gx, max_outputs=5)
    #                                  , tf.summary.image('gx400', spread_image(model.gx[:400], 20,20, img_size, img_size, channel))
    #                                  #, tf.summary.scalar("AUC_dgx", model.auc_dgx)
    #                                  #, tf.summary.scalar("AUC_dx", model.auc_dx)
    #                                  , tf.summary.scalar("AUC_gx", model.auc_gx)])
    #        , y= y_test
    #        , x= x_test):
    #    wrtr.add_summary(sess.run(log, {model["x"]:x
    #                                    , model["y"]:y})
    #                     , step)
    #    wrtr.flush()

    steps_per_epoch = len(x_train) // batch_size - 1
    for epoch in tqdm(range(epochs)):
        for i in range(steps_per_epoch):
            #sess.run(model["train_step"])
            sess.run(model['d_step'])
            sess.run(model['g_step'])
        # tensorboard writer
        #if "ucsd" in dataset:
        summ(sess.run(model["step"]) // steps_per_epoch)
        #else:
        #    log(sess.run(model["step"])//steps_per_epoch)
        if n_dis > 1:
            summ_discr(sess.run(model["step"]) // steps_per_epoch)

    saver.save(sess, pform(path_ckpt, trial), write_meta_graph=False)
Example #14
0
from trial import config as C, paths as P, train as T
from util import partial, comp, select
from util_io import pform, load_txt, save_txt
from util_np import np, partition, batch_sample
from util_sp import load_spm, encode, decode
from util_tf import tf, pipe
tf.set_random_seed(C.seed)

C.trial = 'm2_'

#############
# load data #
#############

# valid_en, train_en = np.load(pform(P.data, "valid_en.npy")), np.load(pform(P.data, "train_en.npy"))
valid_nl, train_nl = np.load(pform(P.data, "valid_nl.npy")), np.load(
    pform(P.data, "train_nl.npy"))
# valid_de, train_de = np.load(pform(P.data, "valid_de.npy")), np.load(pform(P.data, "train_de.npy"))
valid_da, train_da = np.load(pform(P.data, "valid_da.npy")), np.load(
    pform(P.data, "train_da.npy"))
# valid_sv, train_sv = np.load(pform(P.data, "valid_sv.npy")), np.load(pform(P.data, "train_sv.npy"))

data_index = 1, 3
data_valid = valid_nl, valid_da
data_train = train_nl, train_da


def batch(arrs, size=C.batch_train, seed=C.seed):
    size //= len(arrs) * (len(arrs) - 1)
    for i in batch_sample(len(arrs[0]), size, seed):
        yield tuple(arr[i] for arr in arrs)