import numpy as np import tensorflow as tf from time import time import math from include.data import get_data_set from include.model import model, lr train_x, train_y = get_data_set("train") test_x, test_y = get_data_set("test") tf.set_random_seed(21) x, y, output, y_pred_cls, global_step, learning_rate = model() global_accuracy = 0 epoch_start = 0 # PARAMS _BATCH_SIZE = 128 _EPOCH = 60 _SAVE_PATH = "./tensorboard/cifar-10-v1.0.0/" # LOSS AND OPTIMIZER loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits_v2(logits=output, labels=y)) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08).minimize( loss, global_step=global_step) # PREDICTION AND ACCURACY CALCULATION correct_prediction = tf.equal(y_pred_cls, tf.argmax(y, axis=1))
import numpy as np import tensorflow as tf from time import time import math, pdb from include.data import get_data_set from include.model import model, lr train_x, train_y = get_data_set("train") test_x, test_y = get_data_set("test") tf.set_random_seed(21) _RETAINED_PIXELS = 256 x, y, output, y_pred_cls, global_step, learning_rate, pade_output, pade_x, pade_y, x_image = model( _RETAINED_PIXELS) # x, y, output, y_pred_cls, global_step, learning_rate = model() global_accuracy = 0 epoch_start = 0 # PARAMS _BATCH_SIZE = 128 _EPOCH = 60 _SAVE_PATH = "./tensorboard/cifar-10-v1.0.0/" # LOSS AND OPTIMIZER loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits_v2(logits=output, labels=y)) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08).minimize(
import collections import myo import threading import time import numpy as np import tensorflow as tf from include.model import model x, y, output, global_step, y_pred_cls = model() saver = tf.train.Saver() _SAVE_PATH = "./data/tensorflow_sessions/myo_armband/" sess = tf.Session() try: print("Trying to restore last checkpoint ...") last_chk_path = tf.train.latest_checkpoint(checkpoint_dir=_SAVE_PATH) print(last_chk_path) saver.restore(sess, save_path=last_chk_path) print("Restored checkpoint from:", last_chk_path) except: print("Failed to restore checkpoint. Initializing variables instead.") sess.run(tf.global_variables_initializer()) class MyListener(myo.DeviceListener): def __init__(self, queue_size=8): self.lock = threading.Lock() self.emg_data_queue = collections.deque(maxlen=queue_size) def on_connect(self, device, timestamp, firmware_version):
import numpy as np import tensorflow as tf from time import time import math from include.data import get_data_set from include.model import model, lr train_x, train_y = get_data_set("train") test_x, test_y = get_data_set("test") x, y, output, y_pred_cls, global_step, learning_rate, hlayer_activations = model( ) global_accuracy = 0 print("Finished Loading data!") # PARAMS _BATCH_SIZE = 128 _EPOCH = 5 _SAVE_PATH = "./tensorboard/cifar-10-v1.0.0/" # LOSS AND OPTIMIZER loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=y)) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08).minimize( loss, global_step=global_step) # PREDICTION AND ACCURACY CALCULATION correct_prediction = tf.equal(y_pred_cls, tf.argmax(y, axis=1))
import numpy as np import tensorflow as tf from time import time from include.data import get_data_set from include.model import model train_x, train_y = get_data_set() print(train_x) print(train_y) _BATCH_SIZE = 300 _CLASS_SIZE = 6 _SAVE_PATH = "./data/tensorflow_sessions/myo_armband/" x, y, output, global_step, y_pred_cls = model(_CLASS_SIZE) loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=y)) tf.summary.scalar("Loss", loss) optimizer = tf.train.RMSPropOptimizer(learning_rate=1e-4).minimize( loss, global_step=global_step) correct_prediction = tf.equal(y_pred_cls, tf.argmax(y, dimension=1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) tf.summary.scalar("Accuracy/train", accuracy) init = tf.global_variables_initializer() merged = tf.summary.merge_all() saver = tf.train.Saver() sess = tf.Session()
import numpy as np import tensorflow as tf from sklearn.metrics import confusion_matrix from include.data import get_data_set from include.model import model # test_x, test_y, test_l = get_data_set("test", cifar=10) train_x, train_y, train_l, mu, std = get_data_set(cifar=10, whitten=False) test_x, test_y, test_l, mu, std = get_data_set(name="test", mu=mu, std=std, cifar=10, whitten=False) x, y, output, global_step, y_pred_cls, keep_prob = model() _IMG_SIZE = 32 _NUM_CHANNELS = 3 _BATCH_SIZE = 128 _CLASS_SIZE = 10 _SAVE_PATH = "./tensorboard/aug-decay-RMS/" saver = tf.train.Saver() sess = tf.Session() try: print("Trying to restore last checkpoint ...") last_chk_path = tf.train.latest_checkpoint(checkpoint_dir=_SAVE_PATH) saver.restore(sess, save_path=last_chk_path) print("Restored checkpoint from:", last_chk_path) except:
from tensorflow.examples.tutorials.mnist import input_data import sys # DATASET mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) # PARAMS _BATCH_SIZE = 1000 _STEP = 1000 _SAVE_PATH = "./tensorboard/cifar-10-v1/" _Length = sys.argv print(_Length) filename = 'tanh2_orth_L10' file_ = open("{}.csv".format(filename), 'w') x, y, sigma2, output, y_pred_cls, global_step = model() print(sigma2) print(x) loss = tf.reduce_mean(-tf.reduce_sum(y * tf.log(output), reduction_indices=[1])) # loss = tf.reduce_mean(tf.reduce_sum((y-output)**2,reduction_indices=[1])) optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.003).minimize( loss, global_step=global_step) correct_prediction = tf.equal(y_pred_cls, tf.argmax(y, axis=1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) for sigw2_i in range(0, 31): sigw2 = sigw2_i * 0.1 + 1.0 print(sigw2) global_accuracy = 0 epoch_start = 0
aug_list, aug_prob = convert_str2list(args.aug_list, args.aug_prob) log_filename = get_filename(aug_list, aug_prob, args) outfile = open("./logs/" + log_filename, "w+") outfile.write(str(aug_list) + "\n" + str(aug_prob) + "\n\n") outfile.write("opt: " + args.opt + "\n" + "a0: " + str(args.a0) + "\n") # GENERATORS gen_train = get_mbatch("train", _BATCH_SIZE, num_classes, aug_list, aug_prob) test_x, test_y = get_data_set("test") test_x, test_y = change_num_classes(test_x, test_y, num_classes) tf.set_random_seed(21) # MODEL x, y, logits, softmax, y_pred_cls, global_step, learning_rate = model(num_classes) global_accuracy = 0 epoch_start = 0 ############################################################################################# # LOSS AND OPTIMIZER loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=y)) # EE599: How to select optimization technique and details: if args.opt == "sgd": optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss, global_step=global_step) elif args.opt == "adam": optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.9,
import numpy as np import tensorflow as tf from time import time import math from include.data import get_data_set from include.model import model, lr_decay # dataset X_train, y_train, X_test, y_test = get_data_set() tf.set_random_seed(21) # training examples n_obs = X_train.shape[0] X, Y, output, y_pred_cls, global_step, learning_rate = model("model_ujh_f000") global_accuracy = 0 epoch_start = 0 # parameters _BATCH_SIZE = 512 _EPOCH = 5 save_path = './graph' # loss and optimizer loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits_v2(logits=output, labels=Y)) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08).minimize(
import tensorflow as tf from sklearn.metrics import confusion_matrix from time import time import fast_gradient from include.data import get_data_set from include.model import model import os import scipy.misc alpha = 0.5 beta = 0.1 train_x, train_y, train_l = get_data_set() test_x, test_y, test_l = get_data_set("test") reg_x, reg_y, reg_output, reg_y_pred_cls, adv_x, adv_y, adv_output, global_step, adv_y_pred_cls, discr_reg_final, discr_adv_final, discr_reg_y, discr_adv_y, reg_conv5, adv_conv5, reg_conv2, adv_conv2 = model( ) _IMG_SIZE = 32 _NUM_CHANNELS = 3 _BATCH_SIZE = 128 _CLASS_SIZE = 10 _ITERATION = 10000 _SAVE_PATH = "./tensorboard/cifar-10/" dropout = 0.5 cross_discr_norm = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=discr_reg_final, labels=discr_reg_y)) cross_discr_adv = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=discr_adv_final, labels=discr_adv_y))