############### model_valid = vae('valid', **C) if A.profile: from util_tf import profile with tf.Session() as sess: tf.global_variables_initializer().run() with tf.summary.FileWriter(pform(P.log, A.trial), sess.graph) as wtr: profile(sess, wtr, model_valid.loss, { model_valid.src: valid[:32], model_valid.tgt: valid[:32] }) if not A.rounds: sys.exit("profiling done") src, tgt = pipe(batch, (tf.int32, tf.int32), prefetch=A.prefetch) model_train = vae('train', src=src, tgt=tgt, **C) ############ # training # ############ sess = tf.InteractiveSession() saver = tf.train.Saver() if A.ckpt: saver.restore(sess, pform(P.ckpt, A.ckpt)) else: tf.global_variables_initializer().run() wtr = tf.summary.FileWriter(pform(P.log, A.trial)) summary = tf.summary.merge(
# valid_da, train_da = np.load(pform(P.data, "valid_da.npy")), np.load(pform(P.data, "train_da.npy")) valid_sv, train_sv = np.load(pform(P.data, "valid_sv.npy")), np.load(pform(P.data, "train_sv.npy")) data_index = 0, 2, 4 data_valid = valid_en, valid_de, valid_sv data_train = train_en, train_de, train_sv def batch(arrs, size= C.batch_train, seed= C.seed): size //= len(arrs) * (len(arrs) - 1) for i in batch_sample(len(arrs[0]), size, seed): yield tuple(arr[i] for arr in arrs) perm = comp(tuple, partial(permutations, r= 2)) data_index = perm(data_index) data_valid = perm(data_valid) data_train = perm(pipe(partial(batch, data_train), (tf.int32,)*len(data_train), prefetch= 16)) ############### # build model # ############### model = Model.new(**select(C, *Model._new)) valid = tuple(model.data(i, j).valid() for i, j in data_index) train = tuple(model.data(i, j, s, t).train(**T) for (i, j), (s, t) in zip(data_index, data_train)) model.lr = train[0].lr model.step = train[0].step model.errt = train[0].errt model.loss = tf.add_n([t.loss for t in train]) model.down = tf.train.AdamOptimizer(model.lr, T.beta1, T.beta2, T.epsilon).minimize(model.loss, model.step)
def train(anomaly_class=8): #set gpu os.environ["CUDA_VISIBLE_DEVICES"] = "0" #load data (train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data() inlier = train_images[train_labels != anomaly_class] x_train = np.reshape(inlier, (len(inlier), 28 * 28)) / 255 #y_train = train_labels[train_labels!=anomaly_class] y_train = np.zeros(len(x_train), dtype=np.int8) # dummy outlier = train_images[train_labels == anomaly_class] x_test = np.reshape(np.concatenate([outlier, test_images]), (len(outlier) + len(test_images), 28 * 28)) / 255 y_test = np.concatenate( [train_labels[train_labels == anomaly_class], test_labels]) y_test = [0 if y != anomaly_class else 1 for y in y_test] x_test, y_test = unison_shfl(x_test, np.array(y_test)) path_log = "/cache/tensorboard-logdir/ae" path_ckpt = "/project/outlier_detection/ckpt" epochs = 400 batch_size = 700 dim_btlnk = 32 mult = 20 lr_max = 1e-4 trial = f"dae{anomaly_class}_b{batch_size}_btlnk{dim_btlnk}_lr_{lr_max}m{mult}" #trial="test1" dim_x = len(x_train[0]) #reset graphs and fix seeds tf.reset_default_graph() if 'sess' in globals(): sess.close() rand = RandomState(0) tf.set_random_seed(0) # data pipeline batch_fn = lambda: batch2(x_train, y_train, batch_size) x, y = pipe(batch_fn, (tf.float32, tf.float32), prefetch=4) #z = tf.random_normal((batch_size, z_dim)) # load graph dae = DAE.new(dim_x, dim_btlnk) model = DAE.build(dae, x, y, lr_max, mult) # start session, initialize variables sess = tf.InteractiveSession() saver = tf.train.Saver() wrtr = tf.summary.FileWriter(pform(path_log, trial)) #wrtr.add_graph(sess.graph) ### if load pretrained model # pretrain = "modelname" #saver.restore(sess, pform(path_ckpt, pretrain)) ### else: auc_vars = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope='AUC') init = tf.group(tf.global_variables_initializer(), tf.variables_initializer(var_list=auc_vars)) sess.run(init) def log(step, wrtr=wrtr, log=tf.summary.merge([ tf.summary.scalar('g_loss', model.g_loss), tf.summary.scalar('d_loss', model.d_loss), tf.summary.image('gx400', spread_image(model.gx[:400], 20, 20, 28, 28)), tf.summary.image('dgx400', spread_image(model.dgx[:400], 20, 20, 28, 28)), tf.summary.image('dx400', spread_image(model.dx[:400], 20, 20, 28, 28)), tf.summary.scalar("AUC_dgx", model.auc_dgx), tf.summary.scalar("AUC_dx", model.auc_dx), tf.summary.scalar("AUC_gx", model.auc_gx) ]), y=y_test, x=x_test): wrtr.add_summary(sess.run(log, {model.x: x, model.y: y}), step) wrtr.flush() steps_per_epoch = len(x_train) // batch_size for epoch in tqdm(range(epochs)): for i in range(steps_per_epoch): sess.run(model.d_step) sess.run(model.g_step) # tensorboard writer log(sess.run(model["step"]) // steps_per_epoch) saver.save(sess, pform(path_ckpt, trial), write_meta_graph=False)
def train(anomaly_class, loss_type): #set gpu os.environ["CUDA_VISIBLE_DEVICES"] = "1" #load data (train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data() inlier = train_images[train_labels != anomaly_class] x_train = np.reshape(inlier, (len(inlier), 28 * 28)) / 255 #y_train = train_labels[train_labels!=anomaly_class] y_train = np.zeros(len(x_train), dtype=np.int8) # dummy outlier = train_images[train_labels == anomaly_class] x_test = np.reshape(np.concatenate([outlier, test_images]), (len(outlier) + len(test_images), 28 * 28)) / 255 y_test = np.concatenate( [train_labels[train_labels == anomaly_class], test_labels]) y_test = [0 if y != anomaly_class else 1 for y in y_test] x_test, y_test = unison_shfl(x_test, np.array(y_test)) path_log = "/cache/tensorboard-logdir/ae" path_ckpt = "/project/outlier_detection/ckpt" epochs = 400 batch_size = 700 dim_btlnk = 32 dim_z = dim_btlnk dim_dense = 32 accelerate = 1e-5 context_weight = 1 trial = f"vaegan_{loss_type}_{anomaly_class}_b{batch_size}_btlnk{dim_btlnk}_d{dim_dense}_n{dim_z}_a{accelerate}" dim_x = len(x_train[0]) #reset graphs and fix seeds tf.reset_default_graph() if 'sess' in globals(): sess.close() rand = RandomState(0) tf.set_random_seed(0) # data pipeline batch_fn = lambda: batch2(x_train, y_train, batch_size, dim_z) x, y, z = pipe(batch_fn, (tf.float32, tf.float32, tf.float32), prefetch=4) # load graph aegan = VAEGAN.new(dim_x, dim_btlnk, dim_dense, dim_z, accelerate) model = VAEGAN.build(aegan, x, y, z, loss_type) # start session, initialize variables sess = tf.InteractiveSession() saver = tf.train.Saver() wrtr = tf.summary.FileWriter(pform(path_log, trial)) #wrtr.add_graph(sess.graph) ### if load pretrained model # pretrain = "modelname" #saver.restore(sess, pform(path_ckpt, pretrain)) ### else: auc_vars = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope='AUC') init = tf.group(tf.global_variables_initializer(), tf.variables_initializer(var_list=auc_vars)) sess.run(init) def log( step, wrtr=wrtr, log=tf.summary. merge([ tf.summary.scalar('g_loss', model.g_loss), tf.summary.scalar('d_loss', model.d_loss), tf.summary.scalar('mu', model.m), tf.summary.scalar('lv', model.l), tf.summary.image('gzx400', spread_image(model.gzx[:400], 20, 20, 28, 28)) #, tf.summary.image('gz400', spread_image(model.gz[:400], 20,20,28,28)) , tf.summary.scalar("AUC_gzx", model.auc_gzx), tf.summary.scalar("AUC_dgzx", model.auc_dgzx), tf.summary.scalar("AUC_dx", model.auc_dx) #, tf.summary.scalar("gz_loss",model.gz_loss) , tf.summary.scalar("gzx_loss", model.gzx_loss), tf.summary.scalar("ftr_loss", model.ftr_loss), tf.summary.scalar("kl_loss", model.kl_loss), tf.summary.scalar("dx_loss", model.dx_loss) #, tf.summary.scalar("dgz_loss",model.dgz_loss) , tf.summary.scalar("dgzx_loss", model.dgzx_loss) ]), y=y_test, x=x_test): mu = sess.run(model.mu, {model.x: x}) wrtr.add_summary(sess.run(log, { model.zx: mu, model.x: x, model.y: y }), step) wrtr.flush() steps_per_epoch = len(x_train) // batch_size for epoch in tqdm(range(epochs)): for i in range(steps_per_epoch): sess.run(model.g_step) sess.run(model.d_step) # tensorboard writer #log(sess.run(model["step"])//steps_per_epoch) log(sess.run(model["step"] // steps_per_epoch)) saver.save(sess, pform(path_ckpt, trial), write_meta_graph=False)
val = np.array( sorted(range(len(tgt_valid)), key=lambda i: max(len(src_valid[i]), len(tgt_valid[i])))) src_valid = src_valid[val] tgt_valid = tgt_valid[val] def feed(src, tgt, cws=cws, cwt=cwt): src_idx, len_src = cws(src, ret_img=False, ret_idx=True) tgt_img, tgt_idx, len_tgt = cwt(tgt, ret_img=True, ret_idx=True) return src_idx, len_src, tgt_img, tgt_idx, len_tgt def batch(src=src_train, tgt=tgt_train, size=128, seed=0): for bat in batch_sample(len(tgt), size, seed): yield feed(src[bat], tgt[bat]) src_idx, len_src, tgt_img, tgt_idx, len_tgt = pipe( batch, (tf.int32, tf.int32, tf.uint8, tf.int32, tf.int32)) train = model('train', cws.dwh(), cwt.dwh(), src_idx, len_src, tgt_img, tgt_idx, len_tgt) valid = model('valid', cws.dwh(), cwt.dwh()) dummy = tuple(placeholder(tf.float32, ()) for _ in range(3)) def log(step, wtr=tf.summary.FileWriter("../log/{}".format(trial)), log=tf.summary.merge((tf.summary.scalar('step_mae', dummy[0]), tf.summary.scalar('step_xid', dummy[1]), tf.summary.scalar('step_err', dummy[2]))), fet=(valid.mae, valid.xid, valid.err), inp=(valid.src_idx, valid.len_src, valid.tgt_img, valid.tgt_idx, valid.len_tgt), src=src_valid, tgt=tgt_valid,
def train(anomaly_class=8, dataset="cifar", n_dis=1, epochs=25, dim_btlnk=32, batch_size=64, loss="mean", context_weight=1, dim_d=64, dim_g=64, extra_layers=0, gpu="0"): #set gpu os.environ["CUDA_VISIBLE_DEVICES"] = gpu path_log = f"/cache/tensorboard-logdir/{dataset}" path_ckpt = "/project/multi-discriminator-gan/ckpt" path_data = "/project/multi-discriminator-gan/data" #reset graphs and fix seeds tf.reset_default_graph() if 'sess' in globals(): sess.close() rand = RandomState(0) tf.set_random_seed(0) #load data if dataset == "ucsd1": x_train = np.load("./data/ucsd1_train_x.npz")["arr_0"] / 255 y_train = np.load("./data/ucsd1_train_y.npz")["arr_0"] x_test = np.load("./data/ucsd1_test_x.npz")["arr_0"] / 255 y_test = np.load("./data/ucsd1_test_y.npz")["arr_0"] elif dataset == "uscd2": x_train = np.load("./data/ucsd2_train_x.npz")["arr_0"] y_train = np.load("./data/ucsd2_train_y.npz")["arr_0"] x_test = np.load("./data/ucsd2_test_x.npz")["arr_0"] y_test = np.load("./data/ucsd2_test_y.npz")["arr_0"] else: if dataset == "mnist": (train_images, train_labels), ( test_images, test_labels) = tf.keras.datasets.mnist.load_data() train_images = resize_images(train_images) test_images = resize_images(test_images) else: (train_images, train_labels), ( test_images, test_labels) = tf.keras.datasets.cifar10.load_data() train_labels = np.reshape(train_labels, len(train_labels)) test_labels = np.reshape(test_labels, len(test_labels)) inlier = train_images[train_labels != anomaly_class] #data_size = prod(inlier[0].sha x_train = inlier / 255 #x_train = np.reshape(inlier, (len(inlier), data_size))/255 #y_train = train_labels[train_labels!=anomaly_class] y_train = np.zeros(len(x_train), dtype=np.int8) # dummy outlier = train_images[train_labels == anomaly_class] x_test = np.concatenate([outlier, test_images]) / 255 #x_test = np.reshape(np.concatenate([outlier, test_images]) # ,(len(outlier)+len(test_images), data_size))/255 y_test = np.concatenate( [train_labels[train_labels == anomaly_class], test_labels]) y_test = [0 if y != anomaly_class else 1 for y in y_test] x_test, y_test = unison_shfl(x_test, np.array(y_test)) img_size_x = x_train[0].shape[0] img_size_y = x_train[0].shape[1] channel = x_train[0].shape[-1] trial = f"{dataset}_{loss}_dis{n_dis}_{anomaly_class}_w{context_weight}_btlnk{dim_btlnk}_d{dim_d}_g{dim_g}e{extra_layers}" # data pipeline batch_fn = lambda: batch2(x_train, y_train, batch_size) x, y = pipe(batch_fn, (tf.float32, tf.float32), prefetch=4) #z = tf.random_normal((batch_size, z_dim)) # load graph mg_gan = MG_GAN.new(img_size_x, channel, dim_btlnk, dim_d, dim_g, n_dis, extra_layers=0) model = MG_GAN.build(mg_gan, x, y, context_weight, loss) # start session, initialize variables sess = tf.InteractiveSession() saver = tf.train.Saver() wrtr = tf.summary.FileWriter(pform(path_log, trial)) wrtr.add_graph(sess.graph) ### if load pretrained model # pretrain = "modelname" #saver.restore(sess, pform(path_ckpt, pretrain)) ### else: auc_vars = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, scope='AUC') init = tf.group(tf.global_variables_initializer(), tf.variables_initializer(var_list=auc_vars)) sess.run(init) #if "ucsd" in dataset: summary_test = tf.summary.merge([ tf.summary.scalar('g_loss', model.g_loss), tf.summary.scalar("lambda", model.lam), tf.summary.scalar("gl_rec", model.gl_rec), tf.summary.scalar("gl_adv", model.gl_adv), tf.summary.scalar("gl_lam", model.gl_lam), tf.summary.scalar('d_loss_mean', model.d_loss_mean), tf.summary.scalar('d_max', model.d_max) #, tf.summary.scalar('d_loss', model.d_loss) , tf.summary.scalar("AUC_gx", model.auc_gx) ]) if dataset == "ucsd1": summary_images = tf.summary.merge( (tf.summary.image("gx", model.gx, max_outputs=8), tf.summary.image("x", model.x, max_outputs=8), tf.summary.image( 'gx400', spread_image(tf.concat([model.gx, model.x], axis=1), 8, 2, img_size_x, img_size_y, channel)))) else: summary_images = tf.summary.merge( (tf.summary.image("gx", model.gx, max_outputs=8), tf.summary.image( 'gx400', spread_image(model.gx[:400], 20, 20, img_size_x, img_size_y, channel)), tf.summary.image("x", model.x, max_outputs=8))) if n_dis > 1: d_wrtr = { i: tf.summary.FileWriter(pform(path_log, trial + f"d{i}")) for i in range(n_dis) } summary_discr = { i: tf.summary.scalar('d_loss_multi', model.d_loss[i]) for i in range(n_dis) } def summ(step): fetches = model.g_loss, model.lam, model.d_loss_mean, model.auc_gx results = map( np.mean, zip(*(sess.run(fetches, { model['x']: x_test[i:j], model['y']: y_test[i:j] }) for i, j in partition(len(x_test), batch_size, discard=False)))) results = list(results) wrtr.add_summary(sess.run(summary_test, dict(zip(fetches, results))), step) if dataset == "ucsd1": # bike, skateboard, grasswalk, shopping cart, car, normal, normal, grass wrtr.add_summary( sess.run( summary_images, { model.x: x_test[[990, 1851, 2140, 2500, 2780, 2880, 3380, 3580]] }), step) else: wrtr.add_summary(sess.run(summary_images, {model.x: x_test}), step) wrtr.flush() def summ_discr(step): fetches = model.d_loss results = map( np.mean, zip(*(sess.run(fetches, { model['x']: x_test[i:j], model['y']: y_test[i:j] }) for i, j in partition(len(x_test), batch_size, discard=False)))) results = list(results) if n_dis > 1: # put all losses of the discriminators in one plot for i in range(n_dis): d_wrtr[i].add_summary( sess.run(summary_discr[i], dict(zip(fetches, results))), step) #d_wrtr[i].add_summary(sess.run(summary_discr[i], dict([(fetches[i], results[i])])), step) d_wrtr[i].flush() #def log(step # , wrtr= wrtr # , log = tf.summary.merge([tf.summary.scalar('g_loss', model.g_loss) # , tf.summary.scalar('d_loss', tf.reduce_mean(model.d_loss)) # , tf.summary.scalar("lambda", model.lam) # , tf.summary.image("gx", model.gx, max_outputs=5) # , tf.summary.image('gx400', spread_image(model.gx[:400], 20,20, img_size, img_size, channel)) # #, tf.summary.scalar("AUC_dgx", model.auc_dgx) # #, tf.summary.scalar("AUC_dx", model.auc_dx) # , tf.summary.scalar("AUC_gx", model.auc_gx)]) # , y= y_test # , x= x_test): # wrtr.add_summary(sess.run(log, {model["x"]:x # , model["y"]:y}) # , step) # wrtr.flush() steps_per_epoch = len(x_train) // batch_size - 1 for epoch in tqdm(range(epochs)): for i in range(steps_per_epoch): #sess.run(model["train_step"]) sess.run(model['d_step']) sess.run(model['g_step']) # tensorboard writer #if "ucsd" in dataset: summ(sess.run(model["step"]) // steps_per_epoch) #else: # log(sess.run(model["step"])//steps_per_epoch) if n_dis > 1: summ_discr(sess.run(model["step"]) // steps_per_epoch) saver.save(sess, pform(path_ckpt, trial), write_meta_graph=False)