clean_batch_x = x_train[index[i*batch_size:(i*batch_size + num_clean_samples)]]

                    train_feed[xx] = np.concatenate([clean_batch_x, adv_batch])
                    train_feed[yy] = clean_batch_y

                else:
                    train_feed[xx] = clean_batch_x
                    train_feed[yy] = clean_batch_y

                results = sess.run(train_calc, feed_dict=train_feed)
                performance.add_loss(results[1])
                #performance.progress_bar(i+1., (len(x_train) // batch_size), metric/(i+1))

            predictions = nntrainer.get_activations(sess, valid, 'output')
            print(metrics.accuracy(valid['targets'], predictions))

            if print_adv_test and epoch >= num_clean_epochs:
                adv_test['inputs'] = perturb(test['inputs'], test['targets'], sess, nnmodel, train_feed, grad_tensor)
        #       adv_test['inputs'] = test['inputs']
                predictions = nntrainer.get_activations(sess, adv_test, 'output')
                roc, roc_curves = metrics.roc(test['targets'], predictions)
                print('Adversarial Accuracy')
                print(metrics.accuracy(test['targets'], predictions))

            # save cross-validcation metrics
            loss, mean_vals, error_vals = nntrainer.test_model(sess, valid,
                                                                    name="valid",
                                                                    batch_size=batch_size,
                                                                    verbose=verbose)
        
コード例 #2
0
        # save cross-validcation metrics
        loss, mean_vals, error_vals = nntrainer.test_model(
            sess, valid, name="valid", batch_size=batch_size, verbose=verbose)
    for eps in eps_list:
        res_dict[eps] = []
        for idx in range(num_trials):

            # get performance metrics
            noisy_train = {
                'inputs':
                train['inputs'] + np.random.uniform(
                    low=-eps, high=eps, size=train['inputs'].shape),
                'targets':
                train['targets']
            }
            predictions = nntrainer.get_activations(sess, noisy_train,
                                                    'output')
            acc = metrics.accuracy(train['targets'], predictions)
            print('Epsilon: ' + str(eps))
            print('Trial: ' + str(idx + 1))
            print(acc[0])
            res_dict[eps].append(acc)
        print('Mean for eps=' + str(eps))
        print(np.mean(res_dict[eps]))

        with open(os.path.join(results_path, model_name + '_acc.pickle'),
                  'wb') as f:
            cPickle.dump(res_dict, f, protocol=cPickle.HIGHEST_PROTOCOL)

        print(res_dict)