def main():
    np.random.seed(0)
    assert keras.backend.backend() == "tensorflow"
    set_model_flags(False)

    tf.reset_default_graph()
    g = tf.get_default_graph()

    x = tf.placeholder(tf.float32, 
                       shape=[None, 
                              FLAGS.IMAGE_ROWS, 
                              FLAGS.IMAGE_COLS, 
                              FLAGS.NUM_CHANNELS]
                      ) 
    y = tf.placeholder(tf.float32, 
                       shape=[None, FLAGS.NUM_CLASSES]
                      )
    train_mode = tf.placeholder(tf.bool)
    adv_model = adv_models(FLAGS.TYPE)
    
    ata = dataset('../Defense_Model/tiny-imagenet-200/', normalize = False)
    
    sess, graph_dict = tf_train(g, x, y, data, adv_model, train_mode)
    #tf_train returns the sess and graph_dict
    #tf_test_error_rate also need to run the sess and use the feed_dict in the tf_train
    #graph_dict is the dictiorary that contains all the items that is necessary on the graph

    # Finally print the result!
    test_error = tf_test_error_rate(sess, graph_dict, data)
    print('Test error: %.1f%%' % test_error)
    sess.close()
    del(g)
def main(adv_model_names):
    np.random.seed(0)
    assert keras.backend.backend() == "tensorflow"
    set_model_flags()

    tf.reset_default_graph()
    g = tf.get_default_graph()

    x = tf.placeholder(
        tf.float32,
        shape=[None, FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS, FLAGS.NUM_CHANNELS])
    y = tf.placeholder(tf.float32, shape=[None, FLAGS.NUM_CLASSES])

    train_mode = tf.placeholder(tf.bool)
    eps = FLAGS.EPS

    # if src_models is not None, we train on adversarial examples that come
    # from multiple models
    # load from out source
    adv_models = [None] * len(adv_model_names)
    for i, name in enumerate(adv_model_names):
        adv_models[i] = load_model(name, path="./models/" + name + "-save.npy")
    x_advs = [None] * (len(adv_models))

    for i, m in enumerate(adv_models):
        logits, _ = m(x)
        grad = gen_grad(x, logits, y, loss='training')
        x_advs[i] = symbolic_fgs(x, grad, eps=eps)

    data = dataset(FLAGS.DIR, normalize=False)
    sess, graph_dict = tf_train(g,
                                x,
                                y,
                                data,
                                defense_model,
                                train_mode,
                                x_advs=x_advs)

    # Finally print the result!
    test_error = tf_test_error_rate(sess, graph_dict, data, x_advs)
    print('Test error: %.1f%%' % test_error)
Beispiel #3
0
import numpy as np
import pandas as pd
from sklearn.linear_model import SGDRegressor
import os
import yaml
from data_utils import dataset

with open('config.yaml') as f:
    config = yaml.load(f)
data = dataset(config)
for i in range(config['city_number']):
    tr = data.tr[i]
    te = data.te[i]
    WindowSize = 81
    N, T, C = tr.shape
    trainingFeatureSet = []
    trainingLabelSet = []
    for n in range(N):
        t = 0
        while t + WindowSize < T:
            trainingFeatureSet.append(tr[n, t:t + WindowSize, :].flatten())
            trainingLabelSet.append(tr[n, t + WindowSize, 0])
            t = t + 1
    trainingFeatureSet = np.array(trainingFeatureSet)
    trainingLabelSet = np.array(trainingLabelSet)

    N, T, C = te.shape
    testingFeatureSet = []
    testingLabelSet = []
    for n in range(N):
        t = 0
Beispiel #4
0
def main(_):

    # download and load data sets
    alldata = dataset(FLAGS.trainDir)
    alldata.maybe_download_and_extract()
    train_data, _, train_labels = alldata.load_training_data()
    test_data, _, test_labels = alldata.load_test_data()
    class_names = alldata.load_class_names()

    iterations = int(
        train_data.shape[0] /
        configTrain.batch_size)  # total training iterations in each epoch

    tf.reset_default_graph()
    g = tf.Graph()
    with g.as_default():

        model = build_graph(configModel)

        init = tf.global_variables_initializer()

        with tf.Session() as sess:
            sess.run(init)

            ## start training epochs
            epoch = 1
            while epoch <= configTrain.epochs:

                now = datetime.now()
                train_model_for_one_epoch(iterations,
                                          train_data,
                                          train_labels,
                                          model,
                                          sess,
                                          configTrain,
                                          record_train_loss=True)
                used_time = datetime.now() - now

                print("\nEpoch round ", epoch,
                      ' used {0} seconds. '.format(used_time.seconds))

                val_loss, val_accuracy = generate_prediction(
                    test_data, test_labels, model, sess, ['loss', 'accuracy'])
                validation_loss.append(val_loss)
                validation_accu.append(val_accuracy)
                print("Valiation loss ", val_loss, " and accuracy ",
                      val_accuracy)

                ## if required, visualize activations from image
                if configTrain.vis_weights_every_epoch > 0 and epoch % configTrain.vis_weights_every_epoch == 0:

                    vis_activations_from_model(train_data, model, sess,
                                               FLAGS.trainDir, 10)

                epoch += 1

            ## upon training done, plot training & validation losses and validation accuracy
            print("training done.")
            plot_training_loss(
                training_loss, os.path.join(FLAGS.trainDir,
                                            'train_losses.png'))
            plot_val_loss_n_accuracy(
                validation_loss, validation_accu,
                os.path.join(FLAGS.trainDir, 'val_losses_n_accuracy.png'))

            ## save trained session
            if not os.path.exists(FLAGS.savedSessionDir):
                os.makedirs(FLAGS.savedSessionDir)
            temp_saver = model['saver']()
            save_path = temp_saver.save(
                sess, os.path.join(FLAGS.savedSessionDir, modelName))

        print("\nTraining done. Model saved: ",
              os.path.join(FLAGS.savedSessionDir, modelName))
device = torch.device(opt.device)
model = vdsr.Net(opt).to(device)

if opt.start_epoch > 0:
    model.load_state_dict(torch.load(opt.checkpoint_dir + '%s_epoch_%d_%s.pkl'
                                     % (opt.model, opt.start_epoch, opt.suffix)))
print('# network parameters:', sum(param.numel() for param in model.parameters()))

optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr, weight_decay=1e-5)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.1, patience=10, verbose=True, min_lr=1e-5)
# MAELoss = torch.nn.L1Loss()
MSELoss = torch.nn.MSELoss()

# load dataset
train_dataset = dataset(opt.train_input_dir, opt.train_truth_dir)
train_data_loader = DataLoader(train_dataset, batch_size=opt.batch_size, shuffle=True)
val_dataset = dataset(opt.val_input_dir, opt.val_truth_dir)
val_data_loader = DataLoader(val_dataset, batch_size=opt.batch_size, shuffle=False)

results = {'p_loss': []}
results_val = {'p_loss': []}

for epoch in range(opt.epoch-opt.start_epoch):
    epoch_true = epoch + opt.start_epoch + 1
    # train model
    model.train()

    p_loss = 0
    pic_num = 0

# In[ ]:

#set_model_flags(False)

graph, saver, images, logits = adv_model_resnet()

sess = tf.Session(graph=graph)
#sess.run(tf.global_variables_initializer())
#model.tf_load(sess, "./resnet18/checkpoints/model/")

path = os.path.join('resnet18', 'checkpoints', 'model')
saver.restore(sess, tf.train.latest_checkpoint(path))

data = dataset('../Defense_Model/tiny-imagenet-200/', normalize=False)
batch_size = 256
x_test, y_test = data.next_test_batch(batch_size)

with sess.as_default():
    model = TensorFlowModel(images, logits, bounds=(0, 255))
    y_logits = model.batch_predictions(x_test)
    #y_prob=np.softmax(y_logits)
    y_pred = np.argmax(y_logits, axis=1)
    #print(y_pred)
    y_label = np.argmax(y_test, axis=1)
    print(np.sum(np.equal(y_pred, y_label)) / x_test.shape[0])

# In[ ]:
"""from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file
parser.add_argument('--input_nc', type=int, default=1)
parser.add_argument('--output_nc', type=int, default=9)
parser.add_argument('--suffix', type=str, default='9ch')
# other option
parser.add_argument('--test_epoch', type=int, default=45, help='test epoch')
parser.add_argument('--device', type=str, default='cuda:0', help='gpu name')
# define opt
opt = parser.parse_args()

model = vdsr.Net(opt).cuda()
model.load_state_dict(
    torch.load(opt.checkpoint_dir + '%s_epoch_%d_%s.pkl' %
               (opt.model, opt.test_epoch, opt.suffix)))
MSELoss = torch.nn.MSELoss()

test_dataset = dataset(opt.test_input_dir, opt.test_truth_dir)
test_data_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)

total_pl = 0
pic_num = 0
model.eval()
print('test results of %s_%s_epoch_%s' %
      (opt.model, opt.test_epoch, opt.suffix))
for iter, test_data in enumerate(test_data_loader):
    input, truth = test_data
    input, truth = Variable(input, requires_grad=False).cuda(), Variable(
        truth, requires_grad=False).cuda()
    batch_num = input.size(0)
    pic_num += batch_num

    pred = model(input)