def main(): dataset = dman.Dataset(normalize=FLAGS.datnorm) neuralnet = nn.MemAE(height=dataset.height, width=dataset.width, channel=dataset.channel, leaning_rate=FLAGS.lr) sess_config = tf.compat.v1.ConfigProto() sess_config.gpu_options.allow_growth = True sess = tf.compat.v1.Session(config=sess_config) sess.run(tf.compat.v1.global_variables_initializer()) saver = tf.compat.v1.train.Saver() tfp.training(sess=sess, neuralnet=neuralnet, saver=saver, dataset=dataset, epochs=FLAGS.epoch, batch_size=FLAGS.batch, normalize=True) tfp.test(sess=sess, neuralnet=neuralnet, saver=saver, dataset=dataset, batch_size=FLAGS.batch)
def main(): dataset = dman.Dataset(normalize=FLAGS.datnorm) neuralnet = nn.ADAE(height=dataset.height, width=dataset.width, channel=dataset.channel, \ ksize=FLAGS.ksize, learning_rate=FLAGS.lr, path='Checkpoint') neuralnet.confirm_params(verbose=False) # neuralnet.confirm_bn() tfp.training(neuralnet=neuralnet, dataset=dataset, \ epochs=FLAGS.epoch, batch_size=FLAGS.batch, normalize=True) tfp.test(neuralnet=neuralnet, dataset=dataset, \ batch_size=FLAGS.batch)
def main(): physical_devices = tf.config.list_physical_devices('GPU') tf.config.experimental.set_memory_growth(physical_devices[0], True) dataset = dman.Dataset(normalize=FLAGS.datnorm) neuralnet = nn.CVAE(height=dataset.height, width=dataset.width, channel=dataset.channel, \ ksize=FLAGS.ksize, z_dim=FLAGS.z_dim, leaning_rate=FLAGS.lr) tfp.training(neuralnet=neuralnet, dataset=dataset, epochs=FLAGS.epoch, batch_size=FLAGS.batch) tfp.test(neuralnet=neuralnet, dataset=dataset, batch_size=FLAGS.batch)
def main(): dataset = dman.Dataset(normalize=FLAGS.datnorm) neuralnet = nn.f_AnoGAN(height=dataset.height, width=dataset.width, channel=dataset.channel, \ ksize=FLAGS.ksize, zdim=FLAGS.zdim, learning_rate=FLAGS.lr, path='Checkpoint') neuralnet.confirm_params(verbose=False) # neuralnet.confirm_bn() iteration = tfp.training_wgan(neuralnet=neuralnet, dataset=dataset, \ epochs=FLAGS.epoch, batch_size=FLAGS.batch, normalize=True) tfp.training_izi(neuralnet=neuralnet, dataset=dataset, \ epochs=FLAGS.epoch, batch_size=FLAGS.batch, normalize=True, iteration=iteration) tfp.test(neuralnet=neuralnet, dataset=dataset, \ batch_size=FLAGS.batch)
def main(): try: physical_devices = tf.config.list_physical_devices('GPU') tf.config.experimental.set_memory_growth(physical_devices[0], True) except: pass dataset = dman.Dataset(normalize=FLAGS.datnorm) neuralnet = nn.CNN(height=dataset.height, width=dataset.width, channel=dataset.channel, \ num_class=dataset.num_class, ksize=3, learning_rate=FLAGS.lr, ckpt_dir=CKPT_DIR) tfp.training(neuralnet=neuralnet, dataset=dataset, epochs=FLAGS.epoch, batch_size=FLAGS.batch, normalize=True) tfp.test(neuralnet=neuralnet, dataset=dataset, batch_size=FLAGS.batch)
def main(): dataset = dman.Dataset(normalize=FLAGS.datnorm) neuralnet = nn.Context_Encoder(height=dataset.height, width=dataset.width, channel=dataset.channel, \ z_dim=FLAGS.z_dim, learning_rate=FLAGS.lr) sess_config = tf.compat.v1.ConfigProto() sess_config.gpu_options.allow_growth = True sess = tf.compat.v1.Session(config=sess_config) sess.run(tf.compat.v1.global_variables_initializer()) saver = tf.compat.v1.train.Saver() time_1 = time.time() tfp.training(sess=sess, neuralnet=neuralnet, saver=saver, dataset=dataset, epochs=FLAGS.epoch, batch_size=FLAGS.batch, normalize=True) time_2 = time.time() tfp.test(sess=sess, neuralnet=neuralnet, saver=saver, dataset=dataset, batch_size=FLAGS.batch) time_3 = time.time() print("TR: ", time_2 - time_1, dataset.num_tr) print("TE: ", (time_3 - time_2)/dataset.num_te)
def main(): os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.gpu gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: try: for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs") except RuntimeError as e: print(e) dataset = dman.Dataset() agent = con.connect(nn=FLAGS.nn).Agent(\ dim_h = dataset.height, \ dim_w = dataset.width, \ dim_c = dataset.channel, \ num_class = dataset.num_class, \ ksize = FLAGS.ksize, \ learning_rate = FLAGS.lr, \ path_ckpt = 'Checkpoint') time_tr = time.time() tfp.training(agent=agent, dataset=dataset, \ batch_size=FLAGS.batch, epochs=FLAGS.epochs) time_te = time.time() tfp.test(agent=agent, dataset=dataset, batch_size=FLAGS.batch) time_fin = time.time() print("Time (TR): %.5f [sec]" % (time_te - time_tr)) te_time = time_fin - time_te print("Time (TE): %.5f (%.5f [sec/sample])" % (te_time, te_time / dataset.num_te))
def main(): """Initializing dataset and neural network""" dataset = dman.Dataset(normalize=FLAGS.datnorm) neuralnet = nn.adVAE(height=dataset.height, width=dataset.width, channel=dataset.channel, \ z_dim=FLAGS.z_dim, mx=FLAGS.mx, mz=FLAGS.mz, leaning_rate=FLAGS.lr) sess_config = tf.compat.v1.ConfigProto() sess_config.gpu_options.allow_growth = True sess = tf.compat.v1.Session(config=sess_config) sess.run(tf.compat.v1.global_variables_initializer()) saver = tf.compat.v1.train.Saver() """process of training and test with neural network""" tfp.training(sess=sess, neuralnet=neuralnet, saver=saver, dataset=dataset, epochs=FLAGS.epoch, batch_size=FLAGS.batch, normalize=True) tfp.test(sess=sess, neuralnet=neuralnet, saver=saver, dataset=dataset, batch_size=FLAGS.batch)