def __init__(self, imagedir, image_limit, filter_name, filter_num,
                 *args, **kwargs):
        super(FilterVisualizer, self).__init__(*args, **kwargs)

        self.images = get_inputs(imagedir, "jpg", image_limit, color=False)
        self.filter_name = filter_name
        self.filter_num = filter_num
        self.ncols = 2
Exemple #2
0
def main(genomes, config):

    global dt, fps

    #NEAT RL:
    snakes = []
    ge = []
    nets = []
    for _, g in genomes:
        net = neat.nn.FeedForwardNetwork.create(g, config)
        nets.append(net)
        snakes.append(Snake(H, W))
        g.fitness = 0
        ge.append(g)

    clock = pygame.time.Clock()
    while True:

        #For pausing, speeding up and down
        manual_control()

        #Training NEAT
        rewards = []
        for x, snake in enumerate(snakes):
            inputs = get_inputs(*snake.get_head(), snake.board)
            output = nets[x].activate(inputs)
            argmax = output.index(max(output))
            move = ["left", "right", "up", "down"][argmax]
            if snake.is_valid_move(move):
                snake.next_move = move
            else:
                ge[x].fitness -= 0.01
            rewards.append(snake.move())

        for x, reward in enumerate(rewards):
            if reward == "Game over" or ge[x].fitness < -1:
                ge[x].fitness -= 10
                rewards.pop(x)
                snakes.pop(x)
                nets.pop(x)
                ge.pop(x)
            elif reward == "Food":
                ge[x].fitness += 1
            else:
                ge[x].fitness -= 0.01

        if len(snakes) == 0:
            break

        snakes[0].draw(screen, SIZE_OF_BLOCK)
        fps += dt
        clock.tick(fps)
Exemple #3
0
def main():
    args = get_args()

    ver_logdir = args.load_model[:-3] + '_ver'
    if not os.path.exists(ver_logdir):
        os.makedirs(ver_logdir)

    num_train, _, test_loader, input_size, input_channel, n_class = get_loaders(
        args)
    net = get_network(device, args, input_size, input_channel, n_class)
    print(net)

    args.test_domains = []
    # with torch.no_grad():
    #     test(device, 0, args, net, test_loader, layers=[-1, args.layer_idx])
    args.test_batch = 1
    num_train, _, test_loader, input_size, input_channel, n_class = get_loaders(
        args)
    latent_idx = args.layer_idx if args.latent_idx is None else args.latent_idx
    img_file = open(args.unverified_imgs_file, 'w')

    with torch.no_grad():
        tot_verified_corr, tot_nat_ok, tot_attack_ok, tot_pgd_ok, tot_tests = 0, 0, 0, 0, 0
        for test_idx, (inputs, targets) in enumerate(test_loader):
            if test_idx < args.start_idx or test_idx >= args.end_idx:
                continue
            tot_tests += 1
            test_file = os.path.join(ver_logdir, '{}.p'.format(test_idx))
            test_data = pickle.load(open(test_file, 'rb')) if (
                not args.no_load) and os.path.isfile(test_file) else {}
            print('Verify test_idx =', test_idx)

            net.reset_bounds()

            inputs, targets = inputs.to(device), targets.to(device)
            abs_inputs = get_inputs(args.test_domain,
                                    inputs,
                                    args.test_eps,
                                    device,
                                    dtype=dtype)
            nat_out = net(inputs)
            nat_ok = targets.eq(nat_out.max(dim=1)[1]).item()
            tot_nat_ok += float(nat_ok)
            test_data['ok'] = nat_ok
            if not nat_ok:
                report(ver_logdir, tot_verified_corr, tot_nat_ok,
                       tot_attack_ok, tot_pgd_ok, test_idx, tot_tests,
                       test_data)
                continue

            for _ in range(args.attack_restarts):
                with torch.enable_grad():
                    pgd_loss, pgd_ok = get_adv_loss(device, args.test_eps, -1,
                                                    net, None, inputs, targets,
                                                    args.test_att_n_steps,
                                                    args.test_att_step_size)
                    if not pgd_ok:
                        break

            if pgd_ok:
                test_data['pgd_ok'] = 1
                tot_pgd_ok += 1
            else:
                test_data['pgd_ok'] = 0
                report(ver_logdir, tot_verified_corr, tot_nat_ok,
                       tot_attack_ok, tot_pgd_ok, test_idx, tot_tests,
                       test_data)
                continue

            if 'verified' in test_data and test_data['verified']:
                tot_verified_corr += 1
                tot_attack_ok += 1
                report(ver_logdir, tot_verified_corr, tot_nat_ok,
                       tot_attack_ok, tot_pgd_ok, test_idx, tot_tests,
                       test_data)
                continue
            if args.no_milp:
                report(ver_logdir, tot_verified_corr, tot_nat_ok,
                       tot_attack_ok, tot_pgd_ok, test_idx, tot_tests,
                       test_data)
                continue

            zono_inputs = get_inputs('zono_iter',
                                     inputs,
                                     args.test_eps,
                                     device,
                                     dtype=dtype)
            bounds = compute_bounds(net, device,
                                    len(net.blocks) - 1, args, zono_inputs)
            relu_params = reset_params(args, net, dtype)
            with torch.enable_grad():
                learn_slopes(device, relu_params, bounds, args,
                             len(net.blocks), net, inputs, targets, abs_inputs,
                             None, None)
            bounds = compute_bounds(net, device,
                                    len(net.blocks) - 1, args, zono_inputs)

            for _ in range(args.attack_restarts):
                with torch.enable_grad():
                    latent_loss, latent_ok = get_adv_loss(
                        device, args.test_eps, latent_idx, net, bounds, inputs,
                        targets, args.test_att_n_steps,
                        args.test_att_step_size)
                    # print('-> ', latent_idx, latent_loss, latent_ok)
                    if not latent_ok:
                        break

            if latent_ok:
                tot_attack_ok += 1
            zono_out = net(zono_inputs)
            verified, verified_corr = zono_out.verify(targets)
            test_data['verified'] = int(verified_corr.item())
            if verified_corr:
                tot_verified_corr += 1
                report(ver_logdir, tot_verified_corr, tot_nat_ok,
                       tot_attack_ok, tot_pgd_ok, test_idx, tot_tests,
                       test_data)
                continue

            loss_after = net(abs_inputs).ce_loss(targets)
            if args.refine_lidx is not None:
                bounds = compute_bounds(net, device,
                                        len(net.blocks) - 1, args, abs_inputs)
                for lidx in range(0, args.layer_idx + 2):
                    net.blocks[lidx].bounds = bounds[lidx]

                print('loss before refine: ', net(abs_inputs).ce_loss(targets))
                refine_dim = bounds[args.refine_lidx + 1][0].shape[2]
                pbar = tqdm(total=refine_dim * refine_dim, dynamic_ncols=True)
                for refine_i in range(refine_dim):
                    for refine_j in range(refine_dim):
                        refine(args, bounds, net, refine_i, refine_j,
                               abs_inputs, input_size)
                        pbar.update(1)
                pbar.close()
                loss_after = net(abs_inputs).ce_loss(targets)
                print('loss after refine: ', loss_after)

            if loss_after < args.loss_threshold:
                if args.refine_opt is not None:
                    with torch.enable_grad():
                        learn_bounds(net, bounds, relu_params, zono_inputs,
                                     args.refine_opt)
                if verify_test(args, net, inputs, targets, abs_inputs, bounds,
                               test_data, test_idx):
                    tot_verified_corr += 1
                    test_data['verified'] = True
            report(ver_logdir, tot_verified_corr, tot_nat_ok, tot_attack_ok,
                   tot_pgd_ok, test_idx, tot_tests, test_data)
    img_file.close()
Exemple #4
0
train_source_int = source_int[BATCH_SIZE:]
train_target_int = target_int[BATCH_SIZE:]
test_source_int = source_int[:BATCH_SIZE]
test_target_int = target_int[:BATCH_SIZE]
test_generator = utils.get_batch(BATCH_SIZE, test_source_int, test_target_int,
                                 source_vocab_char_to_int['<PAD>'],
                                 target_vocab_char_to_int['<PAD>'])
test_pad_source, test_pad_target, test_target_lengths, test_source_lengths = next(
    test_generator)

graph = tf.Graph()
with graph.as_default():

    batch_source_input_ids, batch_target_input_ids, \
        target_sequences_lengths, max_target_sequences_length = utils.get_inputs()

    with tf.name_scope('Train'):
        with tf.variable_scope('Model'):
            train_model = seq2seq_model.Seq2seqModel(
                source_input_ids=batch_source_input_ids,
                target_input_ids=batch_target_input_ids,
                num_units=NUM_UNITS,
                num_layers=NUM_LAYERS,
                source_vocab_size=len(source_vocab_char_to_int),
                target_vocab_size=len(target_vocab_char_to_int),
                target_sequences_lengths=target_sequences_lengths,
                train_test_predict='train',
                grad_clip_norm=GRAD_CLIP_NORM,
                learning_rate=LEARNING_RATE,
                target_vocab_char_to_int=target_vocab_char_to_int,
Exemple #5
0
import utils
from generations import FamilyTree

params = utils.get_inputs()
family = FamilyTree(params)

family.main()
Exemple #6
0
def test(device, epoch, args, net, test_loader, layers):
    net.eval()
    test_nat_loss, test_nat_ok, test_pgd_loss, test_pgd_ok, n_batches = 0, 0, {}, {}, 0
    test_abs_width, test_abs_ok, test_abs_loss, test_abs_n, test_abs_ex = {}, {}, {}, {}, {}
    for domain in args.test_domains:
        test_abs_width[domain], test_abs_ok[domain], test_abs_loss[
            domain], test_abs_n[domain], test_abs_ex[domain] = 0, 0, 0, 0, 0
    for layer_idx in layers:
        test_pgd_loss[layer_idx], test_pgd_ok[layer_idx] = 0, 0
    pbar = tqdm(test_loader)

    relu_params = []
    for param_name, param_value in net.named_parameters():
        if 'deepz_lambda' in param_name:
            relu_params.append(param_value)
            param_value.requires_grad_(True)

    for inputs, targets in pbar:
        inputs, targets = inputs.to(device), targets.to(device)
        for param in relu_params:
            param.data = 1.0 * torch.ones(param.size()).to(device)

        bounds = compute_bounds_approx(args.test_eps, net.blocks, layers[-1],
                                       inputs, args.n_rand_proj)

        pgd_loss, pgd_ok = {}, {}
        for layer_idx in layers:
            with torch.enable_grad():
                pgd_loss[layer_idx], pgd_ok[layer_idx] = get_adv_loss(
                    device,
                    args.test_eps,
                    layer_idx,
                    net,
                    bounds,
                    inputs,
                    targets,
                    args.test_att_n_steps,
                    args.test_att_step_size,
                    avg=False)
                test_pgd_loss[layer_idx] += pgd_loss[layer_idx].item()
                test_pgd_ok[layer_idx] += pgd_ok[layer_idx].mean().item()

        for domain in args.test_domains:
            abs_inputs = get_inputs(
                'zono' if domain == 'zono_iter' else domain, inputs,
                args.test_eps, device)
            abs_out = net(abs_inputs)
            abs_loss = abs_out.ce_loss(targets)
            abs_width = abs_out.avg_width().item()
            verified, verified_corr = abs_out.verify(targets)
            test_abs_loss[domain] += abs_loss.item()
            test_abs_width[domain] += abs_width
            test_abs_ok[domain] += verified_corr.float().mean().item()
            test_abs_n[domain] += 1
            for layer_idx in layers:
                # print(verified_corr, pgd_ok[layer_idx])
                assert (verified_corr <= pgd_ok[layer_idx]).all()
        nat_outs = net(inputs)
        nat_loss = F.cross_entropy(nat_outs, targets)
        test_nat_loss += nat_loss.item()
        test_nat_ok += targets.eq(nat_outs.max(dim=1)[1]).float().mean().item()
        n_batches += 1
        abs_ok_str = ', '.join([
            '%s: %.4f' % (domain, test_abs_ok[domain] / n_batches)
            for domain in args.test_domains
        ])
        abs_width_str = ', '.join([
            '%s: %.4f' % (domain, -1 if test_abs_n[domain] == 0 else
                          test_abs_width[domain] / test_abs_n[domain])
            for domain in args.test_domains
        ])
        abs_pgd_ok_str = ', '.join([
            '%d: %.4f' % (layer_idx, test_pgd_ok[layer_idx] / n_batches)
            for layer_idx in layers
        ])
        abs_pgd_loss_str = ', '.join([
            '%d: %.4f' % (layer_idx, test_pgd_loss[layer_idx] / n_batches)
            for layer_idx in layers
        ])
        pbar.set_description(
            '[V] nat_loss=%.4f, nat_ok=%.4f, pgd_loss={%s}, pgd_ok={%s}' %
            (test_nat_loss / n_batches, test_nat_ok / n_batches,
             abs_pgd_loss_str, abs_pgd_ok_str))
    return test_nat_loss / n_batches, test_nat_ok / n_batches, test_pgd_loss[
        layers[0]] / n_batches, test_pgd_ok[layers[0]] / n_batches
Exemple #7
0
 def get_move(self, x, y, board):
     inputs = get_inputs(x, y, board)
     output = net.activate(inputs)
     argmax = output.index(max(output))
     move = ["left", "right", "up", "down"][argmax]
     return move
Exemple #8
0
from keras.models import Sequential
from keras.layers import Dense
import numpy as np
import utils
from sklearn import metrics
# fix random seed reproducibility
from sklearn.model_selection import KFold

np.random.seed(7)

k1, k2 = utils.get_inputs()

dataset = np.loadtxt("spambase.txt", delimiter=',')
total_accuracy = 0
total_f1_score = 0
kfold = KFold(10, True, 1)

for train, test in kfold.split(dataset):
    data, target = utils.prepare_data_mlp(dataset, train)
    test_data, expected = utils.prepare_data_mlp(dataset, test)

    # Create Model
    model = Sequential()
    model.add(Dense(12, input_dim=57, activation='sigmoid'))
    model.add(Dense(k1, activation='sigmoid'))
    model.add(Dense(k2, activation='sigmoid'))
    model.add(Dense(1, activation='sigmoid'))

    # Compile model
    model.compile(loss='mean_squared_error',
                  optimizer='sgd',
Exemple #9
0
    def define_graph(self):
        """
        Setup the model graph in TensorFlow.
        """

        with tf.name_scope('Model'):
            with tf.name_scope('Data'):
                self.frames, self.angles = get_inputs(True, c.BATCH_SIZE,
                                                      c.NUM_EPOCHS)
                self.frames_test, self.angles_test = get_inputs(
                    False, c.NUM_VALIDATION, 1)

            with tf.name_scope('Variables'):
                with tf.name_scope('Conv'):
                    self.conv_ws = []
                    self.conv_bs = []

                    with tf.name_scope('1'):
                        self.conv_ws.append(
                            tf.Variable(
                                tf.truncated_normal([5, 5, 1, 32],
                                                    stddev=0.01)))
                        self.conv_bs.append(
                            tf.Variable(tf.truncated_normal([32],
                                                            stddev=0.01)))

                    with tf.name_scope('2'):
                        self.conv_ws.append(
                            tf.Variable(
                                tf.truncated_normal([5, 5, 32, 64],
                                                    stddev=0.01)))
                        self.conv_bs.append(
                            tf.Variable(tf.truncated_normal([64],
                                                            stddev=0.01)))

                with tf.name_scope('FC'):
                    self.fc_ws = []
                    self.fc_bs = []

                    with tf.name_scope('1'):
                        # TODO (Matt): Make sure these dimensions line up.
                        self.fc_ws.append(
                            tf.Variable(
                                tf.truncated_normal([3136, 1024],
                                                    stddev=0.01)))
                        self.fc_bs.append(
                            tf.Variable(
                                tf.truncated_normal([1024], stddev=0.01)))

                    with tf.name_scope('2'):
                        self.fc_ws.append(
                            tf.Variable(
                                tf.truncated_normal([1024, 1], stddev=0.01)))
                        self.fc_bs.append(
                            tf.Variable(tf.truncated_normal([1], stddev=0.01)))

            with tf.name_scope('Training'):
                self.global_step = tf.Variable(0, trainable=False)
                self.loss = MSE_loss(self.get_preds(self.frames), self.angles)
                self.optimizer = tf.train.AdamOptimizer(
                    learning_rate=self.l_rate)
                self.train_op = self.optimizer.minimize(
                    self.loss, global_step=self.global_step)

                loss_summary = tf.scalar_summary('train_loss', self.loss)
                self.summaries_train.append(loss_summary)

            with tf.name_scope('Testing'):
                self.preds_test = self.get_preds(
                    self.frames_test), self.angles_test
                self.loss_test = MSE_loss(self.preds_test, self.angles_test)

                loss_summary = tf.scalar_summary('test_loss', self.loss_test)
                self.summaries_test.append(loss_summary)
    def __init__(self, modelfile, deployfile, imagedir, image_limit, **kwargs):
        shuffle = kwargs.pop("shuffle")
        super(OutputVisualizer, self).__init__("Output", modelfile, deployfile, tight_layout=True, **kwargs)

        self.images = get_inputs(imagedir, "jpg", limit=image_limit, color=False, shuffle=shuffle)
        self.ncols = 2
Exemple #11
0
from processors import process_image_folder
from processors import process_video
from utils import get_inputs
from pathlib import Path
import sys

args = get_inputs()

if not args["input_video"] and not args["input_folder"]:
	print('--input_video or --input_folder is required')
	sys.exit()

path = Path(args["output"])
path.mkdir(parents=True, exist_ok=True)

if args["input_video"]:
	process_video(args["input_video"], args["output"], args["skip"], verbose=args["verbose"])
if args["input_folder"]:
	process_image_folder(args["input_folder"], args["output"], verbose=args["verbose"])
Exemple #12
0
def run_training(tfrecords_path, batch_size, epoch, model_path, log_dir,
                 start_lr, wd, kp):
    with tf.Graph().as_default():
        sess = tf.Session()

        features, age_labels, gender_labels, _ = get_inputs(
            path=tfrecords_path, batch_size=batch_size, num_epochs=epoch)

        net, gender_logits, age_logits = inference(features,
                                                   age_labels,
                                                   gender_labels,
                                                   training=True)

        # Add to the Graph the loss calculation.
        age_cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=age_labels, logits=age_logits)
        age_cross_entropy_mean = tf.reduce_mean(age_cross_entropy)

        gender_cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=gender_labels, logits=gender_logits)
        gender_cross_entropy_mean = tf.reduce_mean(gender_cross_entropy)

        # 35번째 인덱스 가 구멍이 없으니깐 안된다 -> [20,21,,,,39] 인덱스 순서 번호 20개 밖에 없음

        # 20,21,22,23,,,39 => [0,1,2,3,4,,,,19]
        # l2 regularization
        total_loss = tf.add_n(
            [gender_cross_entropy_mean, age_cross_entropy_mean] +
            tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))

        # age_ = tf.cast(tf.constant([i for i in range(0, 20)]), tf.float32)
        # age = tf.reduce_sum(tf.multiply(tf.nn.softmax(age_logits), age_), axis=1)

        prob_age = tf.argmax(tf.nn.softmax(age_logits), 1)
        age_acc = tf.reduce_mean(
            tf.to_float(tf.equal(tf.to_int64(prob_age), age_labels)))
        # abs_loss = tf.losses.absolute_difference(age_labels, age)

        prob_gender = tf.argmax(tf.nn.softmax(gender_logits), 1)
        gender_acc = tf.reduce_mean(
            tf.to_float(tf.equal(tf.to_int64(prob_gender), gender_labels)))

        tf.summary.scalar("age_cross_entropy", age_cross_entropy_mean)
        tf.summary.scalar("gender_cross_entropy", gender_cross_entropy_mean)
        tf.summary.scalar("total loss", total_loss)
        tf.summary.scalar("age_accuracy", age_acc)
        tf.summary.scalar("gender_accuracy", gender_acc)

        # Add to the Graph operations that train the model.
        global_step = tf.Variable(0, name="global_step", trainable=False)
        lr = tf.train.exponential_decay(start_lr,
                                        global_step=global_step,
                                        decay_steps=2000,
                                        decay_rate=0.1,
                                        staircase=True)
        #   decay_steps=4500, decay_rate=0.05, staircase=True)
        # 20000 / 0.000001 = 0.65
        # 2000 / 0.1 = 0.45
        # 4500 / 0.05 = 0.5
        optimizer = tf.train.AdamOptimizer(lr)
        tf.summary.scalar("lr", lr)

        # # only train age branch
        # trainable = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Net/Branch1') + tf.get_collection(
        #     tf.GraphKeys.GLOBAL_VARIABLES, scope='Logits/Age')

        all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)

        update_ops = tf.get_collection(
            tf.GraphKeys.UPDATE_OPS)  # update batch normalization layer
        with tf.control_dependencies(update_ops):
            train_op = optimizer.minimize(total_loss,
                                          global_step)  #, var_list=trainable

        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())
        sess.run(init_op)

        merged = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter(log_dir, sess.graph)

        new_saver = tf.train.Saver(all_vars, max_to_keep=100)
        ckpt = tf.train.get_checkpoint_state(model_path)
        if ckpt and ckpt.model_checkpoint_path:
            new_saver.restore(sess, ckpt.model_checkpoint_path)
            print("restore and continue training!")
        else:
            pass

        # Start input enqueue threads.
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        try:
            step = sess.run(global_step)
            start_time = time.time()
            while not coord.should_stop():
                # start_time = time.time()
                # Run one step of the model.  The return values are
                # the activations from the `train_op` (which is
                # discarded) and the `loss` op.  To inspect the values
                # of your ops or variables, you may include them in
                # the list passed to sess.run() and the value tensors
                # will be returned in the tuple from the call.
                _, summary = sess.run([train_op, merged])
                train_writer.add_summary(summary, step)
                # duration = time.time() - start_time
                # Print an overview fairly often.
                if step % 100 == 0:
                    duration = time.time() - start_time
                    print('%.3f sec' % duration)
                    start_time = time.time()
                if step % 1000 == 0:
                    save_path = new_saver.save(sess,
                                               os.path.join(
                                                   model_path, "model.ckpt"),
                                               global_step=global_step)
                    print("Model saved in file: %s" % save_path)
                step = sess.run(global_step)
        except tf.errors.OutOfRangeError:
            print('Done training for %d epochs, %d steps.' % (epoch, step))
        finally:
            # When done, ask the threads to stop.
            save_path = new_saver.save(sess,
                                       os.path.join(model_path, "model.ckpt"),
                                       global_step=global_step)
            print("Model saved in file: %s" % save_path)
            coord.request_stop()
        # Wait for threads to finish.
        coord.join(threads)
        sess.close()
Exemple #13
0
def test_once(tfrecords_path, batch_size, model_checkpoint_path):
    with tf.Graph().as_default():
        sess = tf.Session()

        features, age_labels, gender_labels, file_paths = get_inputs(
            path=tfrecords_path, batch_size=batch_size, num_epochs=1)

        net, gender_logits, age_logits = inference(features,
                                                   age_labels,
                                                   gender_labels,
                                                   training=False)

        age_cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=age_labels, logits=age_logits)
        age_cross_entropy_mean = tf.reduce_mean(age_cross_entropy)

        gender_cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=gender_labels, logits=gender_logits)
        gender_cross_entropy_mean = tf.reduce_mean(gender_cross_entropy)
        total_loss = tf.add_n(
            [gender_cross_entropy_mean, age_cross_entropy_mean] +
            tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES),
            name="total_loss")

        # age_ = tf.cast(tf.constant([i for i in range(0, 20)]), tf.float32)
        # test=tf.convert_to_tensor(tf.nn.softmax(age_logits)) # modify
        prob_age = tf.argmax(tf.nn.softmax(age_logits), 1)
        age_acc = tf.reduce_mean(
            tf.to_float(tf.equal(tf.to_int64(prob_age), age_labels)))

        # prob_age = tf.reduce_sum(tf.multiply(tf.nn.softmax(age_logits), age_), axis=1)
        # abs_age_error = tf.losses.absolute_difference(prob_age, age_labels)
        # test=tf.nn.softmax(gender_logits)
        prob_gender = tf.argmax(tf.nn.softmax(gender_logits), 1)
        gender_acc = tf.reduce_mean(
            tf.to_float(tf.equal(tf.to_int64(prob_gender), gender_labels)))
        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())
        sess.run(init_op)
        saver = tf.train.Saver()
        saver.restore(sess, model_checkpoint_path)

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        mean_age_acc, mean_gender_acc, mean_loss = [], [], []
        try:
            while not coord.should_stop():
                prob_gender_val, real_gender, prob_age_val, real_age, image_val, gender_acc_val, age_acc_val, cross_entropy_mean_val, file_names = sess.run(
                    [
                        prob_gender, gender_labels, prob_age, age_labels,
                        features, gender_acc, age_acc, total_loss, file_paths
                    ])
                # print(type(prob_gender_val[0]))

                if prob_gender_val[0] == 0:
                    # prob_gender_val = "M"
                    # print('gender_logits',gender_logits)
                    # print('prob_gender_val',prob_gender_val,type(prob_gender_val))
                    # print("prob_age_val, real_age, prob_gender_val, real_gender",tar_age_dir[int(prob_age_val[0])], tar_age_dir[int(real_age[0])], prob_gender_val, real_gender)
                    # print(tar_age_dir[int(prob_age_val)],',', tar_age_dir[int(real_age)],',', prob_gender_val,',', real_gender)
                    print(prob_age_val, ',', real_age, ',', prob_gender_val,
                          ',', real_gender)
                else:
                    # prob_gender_val = "F"
                    # print("prob_age_val, real_age, prob_gender_val, real_gender",tar_age_dir[int(prob_age_val[0])], tar_age_dir[int(real_age[0])], prob_gender_val, real_gender)
                    print(prob_age_val, ',', real_age, ',', prob_gender_val,
                          ',', real_gender)

                mean_age_acc.append(age_acc_val)
                mean_gender_acc.append(gender_acc_val)
                mean_loss.append(cross_entropy_mean_val)
                # print("Age_acc:%s, Gender_Acc:%s, Loss:%.2f" % (
                #     age_acc_val, gender_acc_val, cross_entropy_mean_val))
        except tf.errors.OutOfRangeError:
            print('Summary:')
        finally:

            # When done, ask the threads to stop.
            coord.request_stop()
        coord.join(threads)
        sess.close()
        return prob_age_val, real_age, prob_gender_val, real_gender, image_val, np.mean(
            mean_age_acc), np.mean(mean_gender_acc), np.mean(
                mean_loss), file_names
Exemple #14
0
def main():
    parser = argparse.ArgumentParser(description='Perform greedy layerwise training.')
    parser.add_argument('--prune_p', default=None, type=float, help='percentage of weights to prune in each layer')
    parser.add_argument('--dataset', default='cifar10', help='dataset to use')
    parser.add_argument('--net', required=True, type=str, help='network to use')
    parser.add_argument('--load_model', type=str, help='model to load')
    parser.add_argument('--layer_idx', default=1, type=int, help='layer index of flattened vector')
    parser.add_argument('--n_valid', default=1000, type=int, help='number of test samples')
    parser.add_argument('--n_train', default=None, type=int, help='number of training samples to use')
    parser.add_argument('--train_batch', default=1, type=int, help='batch size for training')
    parser.add_argument('--test_batch', default=128, type=int, help='batch size for testing')
    parser.add_argument('--test_domain', default='zono', type=str, help='domain to test with')
    parser.add_argument('--test_eps', default=None, type=float, help='epsilon to verify')
    parser.add_argument('--debug', action='store_true', help='debug mode')
    parser.add_argument('--no_milp', action='store_true', help='no MILP mode')
    parser.add_argument('--no_load', action='store_true', help='verify from scratch')
    parser.add_argument('--no_smart', action='store_true', help='bla')
    parser.add_argument('--milp_timeout', default=10, type=int, help='timeout for MILP')
    parser.add_argument('--eval_train', action='store_true', help='evaluate on training set')
    parser.add_argument('--test_idx', default=None, type=int, help='specific index to test')
    parser.add_argument('--start_idx', default=0, type=int, help='specific index to start')
    parser.add_argument('--end_idx', default=1000, type=int, help='specific index to end')
    parser.add_argument('--max_binary', default=None, type=int, help='number of neurons to encode as binary variable in MILP (per layer)')
    parser.add_argument('--num_iters', default=50, type=int, help='number of iterations to find slopes')
    parser.add_argument('--max_refine_triples', default=0, type=int, help='number of triples to refine')
    parser.add_argument('--refine_lidx', default=None, type=int, help='layer to refine')
    parser.add_argument('--save_models', action='store_true', help='whether to only store models')
    parser.add_argument('--refine_milp', default=0, type=int, help='number of neurons to refine using MILP')
    parser.add_argument('--obj_threshold', default=None, type=float, help='threshold to consider for MILP verification')
    parser.add_argument('--attack_type', default='pgd', type=str, help='attack')
    parser.add_argument('--attack_n_steps', default=10, type=int, help='number of steps for the attack')
    parser.add_argument('--attack_step_size', default=0.25, type=float, help='step size for the attack (relative to epsilon)')
    parser.add_argument('--layers', required=False, default=None, type=int, nargs='+', help='layer indices for training')
    args = parser.parse_args()

    ver_logdir = args.load_model[:-3] + '_ver'
    if not os.path.exists(ver_logdir):
        os.makedirs(ver_logdir)
    grb_modelsdir = args.load_model[:-3] + '_grb'
    if not os.path.exists(grb_modelsdir):
        os.makedirs(grb_modelsdir)

    num_train, _, test_loader, input_size, input_channel = get_loaders(args)
    net = get_network(device, args, input_size, input_channel)
    n_layers = len(net.blocks)
    
    # net.to_double()

    args.test_domains = ['box']
    with torch.no_grad():
        test(device, 0, args, net, test_loader)

    args.test_batch = 1
    num_train, _, test_loader, input_size, input_channel = get_loaders(args)

    num_relu = 0
    for lidx in range(args.layer_idx+1, n_layers):
        print(net.blocks[lidx])
        if isinstance(net.blocks[lidx], ReLU):
            num_relu += 1

    with torch.no_grad():
        tot_verified_corr, tot_nat_ok, tot_attack_ok, tot_pgd_ok, tot_tests = 0, 0, 0, 0, 0
        for test_idx, (inputs, targets) in enumerate(test_loader):
            if test_idx < args.start_idx or test_idx >= args.end_idx or test_idx >= args.n_valid:
                continue
            if args.test_idx is not None and test_idx != args.test_idx:
                continue
            tot_tests += 1
            test_file = os.path.join(ver_logdir, '{}.p'.format(test_idx))
            test_data = pickle.load(open(test_file, 'rb')) if (not args.no_load) and os.path.isfile(test_file) else {}
            print('Verify test_idx =', test_idx)

            for lidx in range(n_layers):
                net.blocks[lidx].bounds = None

            inputs, targets = inputs.to(device), targets.to(device)
            abs_inputs = get_inputs(args.test_domain, inputs, args.test_eps, device, dtype=dtype)
            nat_out = net(inputs)
            nat_ok = targets.eq(nat_out.max(dim=1)[1]).item()
            tot_nat_ok += float(nat_ok)
            test_data['ok'] = nat_ok
            if not nat_ok:
                report(ver_logdir, tot_verified_corr, tot_nat_ok, tot_attack_ok, tot_pgd_ok, test_idx, tot_tests, test_data)
                continue

            with torch.enable_grad():
                pgd_loss, pgd_ok = get_adv_loss(device, args.test_eps, -1, net, None, inputs, targets, args)
            if pgd_ok:
                test_data['pgd_ok'] = 1
                tot_pgd_ok += 1
            else:
                test_data['pgd_ok'] = 0
                report(ver_logdir, tot_verified_corr, tot_nat_ok, tot_attack_ok, tot_pgd_ok, test_idx, tot_tests, test_data)
                continue
            if 'verified' in test_data and test_data['verified']:
                tot_verified_corr += 1
                tot_attack_ok += 1
                report(ver_logdir, tot_verified_corr, tot_nat_ok, tot_attack_ok, tot_pgd_ok, test_idx, tot_tests, test_data)
                continue

            relu_params = reset_params(args, net, dtype)

            bounds = compute_bounds(net, device, args.layer_idx, args, abs_inputs)
            if args.test_domain == 'zono_iter':
                with torch.enable_grad():
                    learn_slopes(relu_params, bounds, args, n_layers, net, inputs, targets, abs_inputs, None, None)

            with torch.enable_grad():
                abs_loss, abs_ok = get_adv_loss(device, args.test_eps, args.layer_idx, net, bounds, inputs, targets, args)

            refined_triples = []
            if args.refine_lidx is not None:
                bounds = compute_bounds(net, device, args.layer_idx+1, args, abs_inputs)
                for lidx in range(0, args.layer_idx+2):
                    net.blocks[lidx].bounds = bounds[lidx]
                print('loss before refine: ', abs_loss)
                refine_dim = bounds[args.refine_lidx+1][0].shape[2]
                pbar = tqdm(total=refine_dim*refine_dim, dynamic_ncols=True)
                for refine_i in range(refine_dim):
                    for refine_j in range(refine_dim):
                        # refine(args, bounds, net, 0, 15, abs_inputs, input_size)
                        refine(args, bounds, net, refine_i, refine_j, abs_inputs, input_size)
                        pbar.update(1)
                pbar.close()
                with torch.enable_grad():
                    abs_loss, abs_ok = get_adv_loss(device, args.test_eps, args.layer_idx, net, bounds, inputs, targets, args)
                print('loss after refine: ', abs_loss)

            if abs_ok:
                tot_attack_ok += 1
            abs_out = net(abs_inputs)
            verified, verified_corr = abs_out.verify(targets)
            test_data['verified'] = int(verified_corr.item())
            print('abs_loss: ', abs_loss.item(), '\tabs_ok: ', abs_ok.item(), '\tverified_corr: ', verified_corr.item())
            if verified_corr:
                tot_verified_corr += 1
                report(ver_logdir, tot_verified_corr, tot_nat_ok, tot_attack_ok, tot_pgd_ok, test_idx, tot_tests, test_data)
                continue
            if args.no_milp or (not abs_ok):
                report(ver_logdir, tot_verified_corr, tot_nat_ok, tot_attack_ok, tot_pgd_ok, test_idx, tot_tests, test_data)
                continue

            if verify_test(args, net, num_relu, inputs, targets, abs_inputs, bounds, refined_triples, test_data, grb_modelsdir, test_idx):
                tot_verified_corr += 1
                test_data['verified'] = True
            report(ver_logdir, tot_verified_corr, tot_nat_ok, tot_attack_ok, tot_pgd_ok, test_idx, tot_tests, test_data)