예제 #1
0
    def check_error(self):
        while True:
            self.imp.fresh_randomness(self.n_samples)
            self.s = self.imp.run_all()
            error = get_confidence_interval(self.s.pas, self.s.pbs,
                                            self.confidence, self.eps_err_goal)

            if error * 4 < self.eps_err_goal and self.n_samples / 1.4 >= self.min_n_samples:
                self.n_samples = int(self.n_samples / 1.4)
                logger.debug(
                    "Error too small:%s, decreasing size of network to %s...",
                    error, self.n_samples)
            elif error > self.eps_err_goal and self.n_samples < self.max_n_samples:
                self.n_samples = self.n_samples * 2
                logger.debug(
                    "Error too large:%s, increasing size of network to %s...",
                    error, self.n_samples)
            elif math.isnan(error):
                logger.warning(
                    "Error is nan, resetting size of network to %s...",
                    self.n_samples)
                break
            else:
                break
        logger.info("Tensorflow: eps=%s+-%s", self.s.eps, error)
예제 #2
0
    def __init__(self, confirming, confirmer, min_n_samples, max_n_samples,
                 confidence, eps_err_goal, alg: Algorithm):
        super().__init__(confirming, confirmer, alg)
        self.min_n_samples = min_n_samples
        self.n_samples = self.min_n_samples
        self.max_n_samples = max_n_samples
        self.confidence = confidence
        self.eps_err_goal = eps_err_goal
        self.alg = alg

        # set seed for randomness
        np.random.seed(0)

        # build graph
        with time_measure('build_graph'):
            self.imp = alg.get_tensorflow_implementation()
            self.imp.build_fresh_graph()
            self.imp.fresh_randomness(self.n_samples)
            with time_measure('init_optimizer'):
                logger.info("Started setting up optimizer")
                self.optimizer = self.imp.get_optimizer(n_opt_steps, min_p)
                logger.info("Finished setting up optimizer")

        # internal variables
        self.s = None
예제 #3
0
    def build_fresh_graph(self, label, build_graph):
        """
		build a fresh compute graph (resets the current graph)
		:param label: label of the value returned by build_network
		:param build_graph: function that builds the graph and returns a value to be monitered with tensorboard
		"""

        # cleanup
        tf.reset_default_graph()

        logger.info('Started building graph')

        # build graph
        value = build_graph()

        # tensorboard
        tf.summary.scalar(label, value)

        # prepare session
        self.session = tf.Session()
        self.merged = tf.summary.merge_all()

        # prepare logging
        self.writer = tf.summary.FileWriter(self.log + '/train',
                                            self.session.graph)

        logger.info('Finished building graph')
예제 #4
0
    def test(self):
        # prepare logger
        log_file = logger.get_log_file('tensorflow_searcher_test')
        logger.set_logfile(log_file)
        logger.info('Starting test of TensorFlowSearcher')

        # prepare arguments
        confirming = 10
        alg = get_algorithm('aboveThreshold', 4)
        confirmer = PSIScorer(alg)
        min_n_samples = 2000
        max_n_samples = 200 * 1000
        confidence = 0.9
        eps_err_goal = 0.01
        s = TensorFlowSearcher(confirming, confirmer, min_n_samples,
                               max_n_samples, confidence, eps_err_goal, alg)

        # search
        eps = s.search(4)

        # check
        self.assertGreaterEqual(eps, 0)

        # finish logging
        logger.info('Finished test of TensorFlowSearcher')
        logger.shutdown()
예제 #5
0
    def random_start(self, s):
        self.alg.set_random_start(self.imp)

        self.check_error()
        logger.data('n_samples', self.n_samples)
        logger.info("Result after step (random,%s):\n%s", s,
                    self.current_state())
        return self.s.eps
예제 #6
0
    def score_internal(self, a, b, o):
        logger.debug('Scorer: Computing probabilities')
        pa = self.get_prob(a, o)
        pb = self.get_prob(b, o)

        logger.info("Scorer: Comparing probabilities pa and pb:\t%s\t%s", pa,
                    pb)

        eps = self.score_from_probability(pa, pb)
        return eps
예제 #7
0
    def optimize(self, s):
        if np.isnan(self.s.a).any() or np.isnan(self.s.d).any() or np.isnan(
                self.s.o).any():
            logger.warning(
                "Parameters contain 'nan', will not run gradient descent. Returning 0.0 instead..."
            )
        elif np.isnan(self.s.eps):
            logger.warning(
                "eps is 'nan', will not run gradient descent. Returning 0.0 instead..."
            )
        elif np.isinf(self.s.eps):
            logger.warning(
                "eps is already 'inf', will not run gradient descent....")
        else:
            logger.debug("Starting optimization step")
            self.imp.minimize(self.optimizer)
            logger.debug("Finished optimization step")
            self.check_error()

        logger.data('n_samples', self.n_samples)
        logger.info("Result after step (optimized,%s):\n%s", s,
                    self.current_state())

        return self.s.eps
예제 #8
0
#
# ==END LICENSE==

import os
import datetime
import tensorflow as tf
from tensorflow.contrib.opt import ScipyOptimizerInterface

from dpfinder.utils.redirect.redirect_stdout import redirect
from dpfinder.logging import logger

# provide information on logging
path = os.path.dirname(os.path.abspath(__file__))
loading_timestamp = '{:%Y-%m-%d_%H-%M-%S}'.format(datetime.datetime.now())
logs = os.path.join(path, "./logs/tensorflow", loading_timestamp)
logger.info("Run tensorboard using $ tensorboard --logdir=%s", logs)


class TensorFlowWrapper:
    def __init__(self, label):
        # logging
        timestamp = '{:%Y-%m-%d_%H-%M-%S}'.format(datetime.datetime.now())
        log_dir = '{}_{}'.format(label, timestamp)
        self.log = os.path.join(logs, log_dir)

        # internal fields
        self.session = None
        self.merged = None
        self.writer = None

    def close(self):
예제 #9
0
 def build_fresh_graph(self):
     # build graph
     logger.info("Started building graph for algorithm")
     self.tf_wrapper = TensorFlowWrapper(self.alg_name)
     self.tf_wrapper.build_fresh_graph('eps', self.build_graph_internal)
     logger.info("Finished building graph for algorithm")
예제 #10
0
 def confirm(self, a, b, o):
     eps_confirmed = self.confirmer.score(a, b, o)
     logger.info("eps according to confirmer:%s", eps_confirmed)
     logger.data('eps-confirmed', eps_confirmed)
     return eps_confirmed
예제 #11
0
    def search(self, max_steps):
        logger.info('Starting search for %s', self.alg.name)
        logger.data('algname', self.alg.name)
        for s in range(0, max_steps):

            logger.info('Step %s start', s)
            self.a, self.b, self.o, self.eps = self.step(s)
            logger.info('Step %s end', s)

            logger.info('eps:%s', self.eps)
            logger.data('eps-empirical', self.eps)

            if self.confirming == 10:
                self.confirm(self.a, self.b, self.o)

            if self.eps > self.best_eps:
                self.best_a = self.a
                self.best_b = self.b
                self.best_o = self.o
                self.best_eps = self.eps

        logger.info('Finished search')
        logger.info('Best eps:%s', self.best_eps)

        if self.confirming == 5:
            logger.info('Confirming best eps...')
            self.confirm(self.best_a, self.best_b, self.best_o)

        return self.best_eps