def main(model_name, model_type): np.random.seed(0) assert keras.backend.backend() == "tensorflow" set_mnist_flags() flags.DEFINE_bool('NUM_EPOCHS', args.epochs, 'Number of epochs') # Get MNIST test data X_train, Y_train, X_test, Y_test = data_mnist() data_gen = data_gen_mnist(X_train) x = K.placeholder( (None, FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS, FLAGS.NUM_CHANNELS)) y = K.placeholder(shape=(None, FLAGS.NUM_CLASSES)) model = model_mnist(type=model_type) # Train an MNIST model tf_train(x, y, model, X_train, Y_train, data_gen) # Finally print the result! test_error = tf_test_error_rate(model, x, X_test, Y_test) print('Test error: %.1f%%' % test_error) save_model(model, model_name) json_string = model.to_json() with open(model_name + '.json', 'wr') as f: f.write(json_string)
def main(model_name, adv_model_names, model_type): np.random.seed(0) assert keras.backend.backend() == "tensorflow" set_flags(32) config = tf.ConfigProto() config.gpu_options.allow_growth = True K.set_session(tf.Session(config=config)) flags.DEFINE_integer('NUM_EPOCHS', args.epochs, 'Number of epochs') flags.DEFINE_integer('type', args.type, 'model type') # Get MNIST test data X_train, Y_train, X_test, Y_test = load_data() data_gen = data_flow(X_train) x = K.placeholder(shape=(None, FLAGS.NUM_CHANNELS, FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS)) y = K.placeholder(shape=(FLAGS.BATCH_SIZE, FLAGS.NUM_CLASSES)) eps = args.eps # if src_models is not None, we train on adversarial examples that come # from multiple models adv_models = [None] * len(adv_model_names) for i in range(len(adv_model_names)): adv_models[i] = load_model(adv_model_names[i]) model = model_select(type=model_type) x_advs = [None] * (len(adv_models) + 1) for i, m in enumerate(adv_models + [model]): x_noise = x + tf.random_uniform(shape=[FLAGS.BATCH_SIZE, FLAGS.NUM_CHANNELS, FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS], minval= -args.eps, maxval=args.eps) x_noise = tf.clip_by_value(x_noise, 0., 1.) for _ in range(args.k): logits = m(x_noise) grad = gen_grad(x_noise, logits, y, loss='logloss') x_noise = K.stop_gradient(x_noise + args.eps / 4.0 * K.sign(grad)) x_noise = tf.clip_by_value(x_noise, x - args.eps, x + args.eps) x_noise = tf.clip_by_value(x_noise, 0., 1.) x_advs[i] = x_noise # Train an MNIST model tf_train(x, y, model, X_train, Y_train, data_gen, model_name, x_advs=x_advs) # Finally print the result! test_error = tf_test_error_rate(model, x, X_test, Y_test) with open(model_name + '_log.txt', 'a') as log: log.write('Test error: %.1f%%' % test_error) print('Test error: %.1f%%' % test_error) save_model(model, model_name) json_string = model.to_json() with open(model_name+'.json', 'w') as f: f.write(json_string)
def main(model_name, adv_model_names, model_type): np.random.seed(0) assert keras.backend.backend() == "tensorflow" # Get MNIST test data X_train, Y_train, X_test, Y_test = data_mnist() data_gen = data_gen_mnist(X_train) x = K.placeholder(shape=(None, 28, 28, 1)) y = K.placeholder(shape=(BATCH_SIZE, 10)) eps = args.eps norm = args.norm # if src_models is not None, we train on adversarial examples that come # from multiple models adv_models = [None] * len(adv_model_names) ens_str = '' for i in range(len(adv_model_names)): adv_models[i] = load_model(adv_model_names[i]) if len(adv_models) > 0: name = basename(adv_model_names[i]) model_index = name.replace('model', '') ens_str += model_index model = model_mnist(type=model_type) x_advs = [None] * (len(adv_models) + 1) for i, m in enumerate(adv_models + [model]): if args.iter == 0: logits = m(x) grad = gen_grad(x, logits, y, loss='training') x_advs[i] = symbolic_fgs(x, grad, eps=eps) elif args.iter == 1: x_advs[i] = iter_fgs(m, x, y, steps=40, alpha=0.01, eps=args.eps) # Train an MNIST model tf_train(x, y, model, X_train, Y_train, data_gen, x_advs=x_advs, benign=args.ben) # Finally print the result! test_error = tf_test_error_rate(model, x, X_test, Y_test) print('Test error: %.1f%%' % test_error) model_name += '_' + str(eps) + '_' + str(norm) + '_' + ens_str if args.iter == 1: model_name += 'iter' if args.ben == 0: model_name += '_nob' save_model(model, model_name) json_string = model.to_json() with open(model_name + '.json', 'wr') as f: f.write(json_string)
def main(): np.random.seed(0) assert keras.backend.backend() == "tensorflow" set_model_flags(False) tf.reset_default_graph() g = tf.get_default_graph() x = tf.placeholder(tf.float32, shape=[None, FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS, FLAGS.NUM_CHANNELS] ) y = tf.placeholder(tf.float32, shape=[None, FLAGS.NUM_CLASSES] ) train_mode = tf.placeholder(tf.bool) adv_model = adv_models(FLAGS.TYPE) ata = dataset('../Defense_Model/tiny-imagenet-200/', normalize = False) sess, graph_dict = tf_train(g, x, y, data, adv_model, train_mode) #tf_train returns the sess and graph_dict #tf_test_error_rate also need to run the sess and use the feed_dict in the tf_train #graph_dict is the dictiorary that contains all the items that is necessary on the graph # Finally print the result! test_error = tf_test_error_rate(sess, graph_dict, data) print('Test error: %.1f%%' % test_error) sess.close() del(g)
def main(model_name, adv_model_names, model_type): np.random.seed(0) assert keras.backend.backend() == "tensorflow" set_mnist_flags() flags.DEFINE_bool('NUM_EPOCHS', args.epochs, 'Number of epochs') # Get MNIST test data X_train, Y_train, X_test, Y_test = data_mnist() data_gen = data_gen_mnist(X_train) x = K.placeholder(shape=(None, FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS, FLAGS.NUM_CHANNELS)) y = K.placeholder(shape=(FLAGS.BATCH_SIZE, FLAGS.NUM_CLASSES)) eps = args.eps # if src_models is not None, we train on adversarial examples that come # from multiple models adv_models = [None] * len(adv_model_names) for i in range(len(adv_model_names)): adv_models[i] = load_model(adv_model_names[i]) model = model_mnist(type=model_type) x_advs = [None] * (len(adv_models) + 1) for i, m in enumerate(adv_models + [model]): logits = m(x) grad = gen_grad(x, logits, y, loss='training') x_advs[i] = symbolic_fgs(x, grad, eps=eps) # Train an MNIST model tf_train(x, y, model, X_train, Y_train, data_gen, x_advs=x_advs) # Finally print the result! test_error = tf_test_error_rate(model, x, X_test, Y_test) print('Test error: %.1f%%' % test_error) save_model(model, model_name) json_string = model.to_json() with open(model_name + '.json', 'wr') as f: f.write(json_string)
def main(args): np.random.seed(0) assert keras.backend.backend() == "tensorflow" with tf.device('/gpu:0'): x_train, y_train, x_test, y_test = data_mnist() if args.dataset == "mnist" \ else data( path=args.dataset, representation=args.representation, test_path=args.test_path, n=args.n ) data_gen = data_gen_mnist(x_train) x = K.placeholder((None, ) + args.x_dim) y = K.placeholder(shape=(None, 10)) model = model_mnist(type=args.type) tf_train(x, y, model, x_train, y_train, data_gen, None, None) _, _, test_error = tf_test_error_rate(model(x), x, x_test, y_test) print('Test error: %.1f%%' % test_error) accuracy = 100 - test_error with open(args.accuracy_path, 'w+') as f: f.write(accuracy) save_model(model, args.model) json_string = model.to_json() try: with open(args.model + '.json', 'w') as f: f.write(json_string) except Exception: print(json_string)