Ejemplo n.º 1
0
        def body(batch_num, mean):
            if mc_sampler == tfp.mcmc.sample_halton_sequence:
                start_idx = batch_num * batch_size
                end_idx = start_idx + batch_size
                indices = tf.range(start_idx, end_idx, dtype=tf.int32)
                sample = mc_sampler(space.n_obs,
                                    sequence_indices=indices,
                                    dtype=ztypes.float,
                                    randomized=False)
            else:
                sample = mc_sampler(shape=(batch_size, space.n_obs),
                                    dtype=ztypes.float)
            sample = tf.guarantee_const(sample)
            sample = (np.array(upper[0]) -
                      np.array(lower[0])) * sample + lower[0]
            sample = tf.transpose(a=sample)
            sample = func(sample)
            sample = tf.guarantee_const(sample)

            batch_mean = tf.reduce_mean(input_tensor=sample)
            batch_mean = tf.guarantee_const(batch_mean)
            err_weight = 1 / tf.cast(batch_num + 1, dtype=tf.float64)
            # err_weight /= err_weight + 1
            # print_op = tf.print(batch_mean)
            do_print = False
            if do_print:
                tf.print(batch_num + 1, mean, err_weight * (batch_mean - mean))

            return batch_num + 1, mean + err_weight * (batch_mean - mean)
Ejemplo n.º 2
0
        def body(batch_num, mean):
            if mc_sampler == tfp.mcmc.sample_halton_sequence:
                start_idx = batch_num * batch_size
                end_idx = start_idx + batch_size
                indices = tf.range(start_idx, end_idx, dtype=tf.int32)
                sample = mc_sampler(space.n_obs,
                                    sequence_indices=indices,
                                    dtype=ztypes.float,
                                    randomized=False)
            else:
                sample = mc_sampler(shape=(batch_size, space.n_obs),
                                    dtype=ztypes.float)
            sample = tf.guarantee_const(sample)
            sample = (np.array(upper[0]) -
                      np.array(lower[0])) * sample + lower[0]
            sample = tf.transpose(sample)
            sample = func(sample)
            sample = tf.guarantee_const(sample)

            batch_mean = tf.reduce_mean(sample)
            batch_mean = tf.guarantee_const(batch_mean)
            # with tf.control_dependencies([batch_mean]):
            err_weight = 1 / tf.to_double(batch_num + 1)
            # err_weight /= err_weight + 1
            # print_op = tf.print(batch_mean)
            print_op = tf.print(batch_num + 1, mean,
                                err_weight * (batch_mean - mean))
            with tf.control_dependencies([print_op]):
                return batch_num + 1, mean + err_weight * (batch_mean - mean)
Ejemplo n.º 3
0
def MaybeGuaranteeConstGetter(getter, name, *args, **kwargs):
    global _CONST_GUARANTEE
    if _CONST_GUARANTEE:
        with tf.control_dependencies(None):
            return tf.guarantee_const(getter(name, *args, **kwargs),
                                      name=name + '/GuaranteeConst')
    else:
        return getter(name, *args, **kwargs)
Ejemplo n.º 4
0
    def __init__(self, name, value, dtype=ztypes.float):
        super().__init__(name=name, params={}, dtype=dtype)
        static_value = tf.get_static_value(value, partial=True)
        if static_value is None:
            raise RuntimeError(
                "Cannot convert input to static value. If you encounter this, please open a bug report"
                " on Github: https://github.com/zfit/zfit")
        self._value_np = static_value

        self._value = tf.guarantee_const(
            tf.convert_to_tensor(value, dtype=dtype))
Ejemplo n.º 5
0
    def __init__(self, name, value, dtype=ztypes.float):
        """

        Args:
            name:
            value:
            dtype:
        """
        super().__init__(name=name, params={}, dtype=dtype)
        self._value_np = tf.get_static_value(value, partial=True)
        self._value = tf.guarantee_const(
            tf.convert_to_tensor(value, dtype=dtype))
Ejemplo n.º 6
0
 def __init__(self, name, K, N, L, initial_weights):
     """
     Args:
         K (int): The number of hidden neurons.
         N (int): The number of input neurons that each hidden
             neuron has.
         L (int): The synaptic depth of each input neuron's weights.
     """
     super(TPM, self).__init__(name=name)
     self.type = 'basic'
     with self.name_scope:
         self.K = tf.guarantee_const(tf.constant(K, name='K'))
         self.N = tf.guarantee_const(tf.constant(N, name='N'))
         self.L = tf.guarantee_const(tf.constant(L, name='L'))
         self.w = initial_weights
         self.sigma = tf.Variable(tf.zeros([K], dtype=tf.int32),
                                  trainable=False,
                                  name='sigma')
         self.tau = tf.Variable(0, name='tau')
         self.key = tf.Variable("", name='key')
         self.iv = tf.Variable("", name='iv')
Ejemplo n.º 7
0
    def compute_key(self, key_length, iv_length):
        """Creates a key and IV based on the weights of this TPM.

        Args:
            key_length (int): Length of the key.
                Must be 128, 192, or 256.
            iv_length (int): Length of the independent variable.
                Must be a multiple of 4 between 0 and 256, inclusive.
        Returns:
            The key and IV based on the TPM's weights.
        """
        main_diagonal = tf.guarantee_const(
            tf.range(tf.math.minimum(self.K, self.N)))
        iv_indices = tf.guarantee_const(
            tf.stack([main_diagonal, main_diagonal], axis=1))
        iv_weights = tf.strings.format("{}", tf.gather_nd(self.w, iv_indices))
        key_weights = tf.strings.format("{}", self.w)

        def convert_to_hex_dig(input, length):
            return sha512(input.numpy().decode('utf-8').encode(
                'utf-8')).hexdigest()[0:length]

        # TODO: figure out a way to do this without using py_function
        # py_function is currently needed since we need to get the value from
        # the tf.Tensor
        current_key = tf.py_function(
            convert_to_hex_dig, [key_weights, int(key_length / 4)],
            Tout=tf.string)
        current_iv = tf.py_function(
            convert_to_hex_dig, [iv_weights, int(iv_length / 4)],
            Tout=tf.string)
        self.key.assign(current_key)
        self.iv.assign(current_iv)
        with self.name_scope:
            tf.summary.text('key', data=current_key)
            tf.summary.text('independent variable', data=current_iv)
        return current_key, current_iv
Ejemplo n.º 8
0
 def custom_getter(getter, name, *args, **kwargs):
     with tf.control_dependencies(None):
         return tf.guarantee_const(getter(name, *args, **kwargs),
                                   name=name + '/GuaranteeConst')
Ejemplo n.º 9
0
        def train_step():
            # Create random vector, X, with dimensions [K, N] and values
            # bound by [-1, 1]
            X = tf.random.uniform(
                (K, N), minval=-1, maxval=1 + 1, dtype=tf.int32)

            tpm_update_rules = [
                'hebbian',
                'anti_hebbian',
                'random_walk'
            ]

            if update_rule == 'random-same':
                # use tf.random so that the same update rule is used for
                # each iteration across attacks

                current_update_rule = select_random_from_list(
                    tpm_update_rules,
                    op_name='iteration-ur-A-B-E'
                )
                update_rule_A = current_update_rule
                update_rule_B = current_update_rule
                update_rule_E = current_update_rule
            elif update_rule.startswith('random-different'):
                update_rule_A = select_random_from_list(
                    tpm_update_rules,
                    op_name='iteration-ur-A'
                )
                update_rule_B = select_random_from_list(
                    tpm_update_rules,
                    op_name='iteration-ur-B'
                )
                if update_rule == 'random-different-A-B-E':
                    update_rule_E = select_random_from_list(
                        tpm_update_rules,
                        op_name='iteration-ur-E'
                    )
                elif update_rule == 'random-different-A-B':
                    update_rule_E = update_rule_A
                else:
                    raise ValueError(
                        f"'{update_rule}' is an invalid update rule. "
                        "Valid update rules are: "
                        "'hebbian', "
                        "'anti_hebbian', "
                        "'random_walk', "
                        "'random-same', "
                        "'random-different-A-B', and "
                        "'random-different-A-B-E'"
                    )
            # elif tf.reduce_any(tf.math.equal(update_rule, tpm_update_rules)):
            elif not isinstance(tpm_update_rules, tf.Tensor) and \
                    update_rule in tpm_update_rules:
                current_update_rule = tf.guarantee_const(update_rule)
                update_rule_A = current_update_rule
                update_rule_B = current_update_rule
                update_rule_E = current_update_rule
            else:
                raise ValueError(
                    f"'{update_rule}' is an invalid update rule. "
                    "Valid update rules are: "
                    "'hebbian', "
                    "'anti_hebbian', "
                    "'random_walk', "
                    "'random-same', "
                    "'random-different-A-B', and "
                    "'random-different-A-B-E'"
                )
            iterate(
                X,
                Alice, Bob, Eve,
                update_rule_A, update_rule_B, update_rule_E,
                nb_updates, nb_eve_updates,
                score, score_eve,
                key_length, iv_length
            )
            tf.summary.experimental.set_step(nb_updates)
Ejemplo n.º 10
0
def run(
    update_rule, K, N, L,
    attack,
    initial_weights,
    key_length=256, iv_length=128
):
    with tf.experimental.async_scope():

        tf.print(
            "\n\n\n",
            "Creating machines: K=", K, ", N=", N, ", L=", L, ", ",
            "update-rule=", update_rule, ", ",
            "attack=", attack,
            "\n",
            sep='',
            name='log-run-initialization'
        )
        Alice = TPM('Alice', K, N, L, initial_weights['Alice'])

        Bob = TPM('Bob', K, N, L, initial_weights['Bob'])

        if attack == 'probabilistic':
            from mlencrypt_research.tpms import ProbabilisticTPM
            Eve = ProbabilisticTPM('Eve', K, N, L)
        elif attack == 'geometric':
            from mlencrypt_research.tpms import GeometricTPM
            Eve = GeometricTPM('Eve', K, N, L, initial_weights['Eve'])
        elif attack == 'none':
            Eve = TPM('Eve', K, N, L, initial_weights['Eve'])
        else:
            # TODO: better message for ValueError
            raise ValueError

        # try:
        # synchronization score of Alice and Bob
        score = tf.Variable(0., trainable=False,
                            name='score-A-B', dtype=tf.float32)

        # synchronization score of Alice and Eve
        score_eve = tf.Variable(0., trainable=False,
                                name='score-A-E', dtype=tf.float32)
        # except ValueError:
        #     # tf.function-decorated function tried to create variables
        #     # on non-first call.
        #     score = 0.
        #     score_eve = 0.

        # https://www.tensorflow.org/tutorials/customization/performance#zero_iterations
        with Alice.name_scope:
            Alice.key = tf.Variable("", trainable=False, name='key')
            Alice.iv = tf.Variable("", trainable=False, name='iv')
        with Bob.name_scope:
            Bob.key = tf.Variable("", trainable=False, name='key')
            Bob.iv = tf.Variable("", trainable=False, name='iv')
        with Eve.name_scope:
            Eve.key = tf.Variable("", trainable=False, name='key')
            Eve.iv = tf.Variable("", trainable=False, name='iv')

        # try:
        nb_updates = tf.Variable(
            0, name='updates-A-B', trainable=False, dtype=tf.int64)
        nb_eve_updates = tf.Variable(
            0, name='updates-E', trainable=False, dtype=tf.int64)
        # except ValueError:
        #     # tf.function-decorated function tried to create variables
        #     # on non-first call.
        #     pass

        autograph_features = tf.autograph.experimental.Feature.all_but([
            tf.autograph.experimental.Feature.AUTO_CONTROL_DEPS,
            tf.autograph.experimental.Feature.NAME_SCOPES,
        ])

        @tf.function(
            experimental_autograph_options=autograph_features,
            experimental_relax_shapes=True,
        )
        def train_step():
            # Create random vector, X, with dimensions [K, N] and values
            # bound by [-1, 1]
            X = tf.random.uniform(
                (K, N), minval=-1, maxval=1 + 1, dtype=tf.int32)

            tpm_update_rules = [
                'hebbian',
                'anti_hebbian',
                'random_walk'
            ]

            if update_rule == 'random-same':
                # use tf.random so that the same update rule is used for
                # each iteration across attacks

                current_update_rule = select_random_from_list(
                    tpm_update_rules,
                    op_name='iteration-ur-A-B-E'
                )
                update_rule_A = current_update_rule
                update_rule_B = current_update_rule
                update_rule_E = current_update_rule
            elif update_rule.startswith('random-different'):
                update_rule_A = select_random_from_list(
                    tpm_update_rules,
                    op_name='iteration-ur-A'
                )
                update_rule_B = select_random_from_list(
                    tpm_update_rules,
                    op_name='iteration-ur-B'
                )
                if update_rule == 'random-different-A-B-E':
                    update_rule_E = select_random_from_list(
                        tpm_update_rules,
                        op_name='iteration-ur-E'
                    )
                elif update_rule == 'random-different-A-B':
                    update_rule_E = update_rule_A
                else:
                    raise ValueError(
                        f"'{update_rule}' is an invalid update rule. "
                        "Valid update rules are: "
                        "'hebbian', "
                        "'anti_hebbian', "
                        "'random_walk', "
                        "'random-same', "
                        "'random-different-A-B', and "
                        "'random-different-A-B-E'"
                    )
            # elif tf.reduce_any(tf.math.equal(update_rule, tpm_update_rules)):
            elif not isinstance(tpm_update_rules, tf.Tensor) and \
                    update_rule in tpm_update_rules:
                current_update_rule = tf.guarantee_const(update_rule)
                update_rule_A = current_update_rule
                update_rule_B = current_update_rule
                update_rule_E = current_update_rule
            else:
                raise ValueError(
                    f"'{update_rule}' is an invalid update rule. "
                    "Valid update rules are: "
                    "'hebbian', "
                    "'anti_hebbian', "
                    "'random_walk', "
                    "'random-same', "
                    "'random-different-A-B', and "
                    "'random-different-A-B-E'"
                )
            iterate(
                X,
                Alice, Bob, Eve,
                update_rule_A, update_rule_B, update_rule_E,
                nb_updates, nb_eve_updates,
                score, score_eve,
                key_length, iv_length
            )
            tf.summary.experimental.set_step(nb_updates)

        # In the long run, perf_counter doesn't help because different CPUs
        # have different performance. For the sake of prosperity, we use the
        # number of training steps, although training time would be more useful
        # in a practical setting.
        # start_time = perf_counter()
        while not tf.reduce_all(tf.math.equal(Alice.w, Bob.w)):
            # TODO: instead of while, use for until L^4*K*N and break
            train_step()

        # end_time = perf_counter()
        # training_time = end_time - start_time
        # loss = (tf.math.sigmoid(training_time) + score_eve / 100.) / 2.
        # for reference, log(120) = 2.08 and log(43000) = 4.63
        loss = tf.math.log(tf.cast(nb_updates, tf.float32)) * score_eve / 100.
        key_length = tf.guarantee_const(tf.constant(key_length))
        iv_length = tf.guarantee_const(tf.constant(iv_length))
        if getenv('MLENCRYPT_BARE', 'FALSE') == 'TRUE':
            # because score_eve hasn't been calculated yet in bare mode
            if attack == 'probabilistic':
                eve_w = Eve.mpW
            else:
                eve_w = Eve.w
            score_eve.assign(
                100. * sync_score(Alice.w, eve_w, Alice.name, Eve.name),
                name='calc-sync-A-E'
            )
        else:
            if getenv('MLENCRYPT_TB', 'FALSE') == 'TRUE':
                tf.print(
                    "\n\n",
                    "Alice's key: ", Alice.key, " iv: ", Alice.iv, "\n",
                    "Bob's key: ", Bob.key, " iv: ", Bob.iv, "\n",
                    "Eve's key: ", Eve.key, " iv: ", Eve.iv,
                    sep='',
                    name='log-run-final'
                )
        if tf.math.equal(getenv('MLENCRYPT_TB', 'FALSE'), 'TRUE', name='log-tb'):
            # create scatterplots (in scalars dashboard) of metric vs steps
            # tf.summary.scalar('training_time', training_time)
            tf.summary.scalar('eve_score', score_eve)
            tf.summary.scalar('loss', loss)

    return nb_updates, score_eve, loss
Ejemplo n.º 11
0
def sync_score(tpm1_w, tpm2_w, tpm1_name, tpm2_name):
    """
    Args:
        TPM1: Tree Parity Machine 1. Has same parameters as TPM2.
        TPM2: Tree Parity Machine 2. Has same parameters as TPM1.
    Returns:
        The synchronization score between TPM1 and TPM2.
    """
    tpm1_id, tpm2_id = tpm1_name[0], tpm2_name[0]

    # adapted from:
    # https://github.com/tensorflow/tensorflow/blob/r2.2/tensorflow/python/keras/losses.py#L1672-L1716
    # TODO: am I using experimental_implements correctly?
    @tf.function(experimental_implements="cosine_similarity")
    def cosine_similarity(weights1, weights2):
        """Computes the cosine similarity between labels and predictions.

        Note that it is a negative quantity between 0 and 1, where 0 indicates
        orthogonality and values closer to 1 indicate greater similarity.

        `loss = sum(y_true * y_pred)`

        Args:
            y_true: Tensor of true targets.
            y_pred: Tensor of predicted targets.
            axis: Axis along which to determine similarity.

        Returns:
            Cosine similarity tensor.
        """
        weights1_float = tf.cast(weights1, tf.float32,
                                 name=f'weights-{tpm1_id}-float')
        weights2_float = tf.cast(weights2, tf.float32,
                                 name=f'weights-{tpm2_id}-float')
        weights1_norm = tf.math.l2_normalize(weights1_float)
        weights2_norm = tf.math.l2_normalize(weights2_float)

        # this doesn't work well; cos_sim is occasionally greater than 1.
        # this is also marginally slower:
        # cos_sim = tf.tensordot(weights1_norm, weights2_norm,
        #                        [[0, 1], [0, 1]])
        # return cos_sim

        # cos_sim is bound by [-1, 1] (for the most part); note that with
        # cos_sim is still occasionally greater than 1:
        cos_sim = -tf.math.reduce_sum(weights1_norm * weights2_norm)
        # we change cos_sim's range to [0, 1] according to:
        # https://arxiv.org/pdf/0711.2411.pdf#page=62
        return -cos_sim / 2. + .5
    rho = cosine_similarity(tpm1_w, tpm2_w)

    if getenv('MLENCRYPT_TB', 'FALSE') == 'TRUE':
        # the generalization error, epsilon, is the probability that a
        # repulsive step occurs if two corresponding hidden units have
        # different sigma (Ruttor, 2006).
        epsilon = tf.math.multiply(
            tf.guarantee_const(tf.constant(1. / pi), name='reciprocal-pi'),
            tf.bitcast(
                tf.math.acos(rho, name=f'angle-{tpm1_id}-{tpm2_id}'),
                tf.float32,
                name=f'angle-{tpm1_id}-{tpm2_id}'
            ),
            name=f'scale-angle-{tpm1_id}-{tpm2_id}-to-0-1'
        )

        with tf.name_scope(f'{tpm1_name}-{tpm2_name}'):
            tf.summary.scalar('sync', data=rho)
            tf.summary.scalar('generalization-error', data=epsilon)

    return rho
Ejemplo n.º 12
0
    def dummy_func(fake_x):  # to make working with custom_gradient
        if x is not None:
            raise DueToLazynessNotImplementedError(
                "partial not yet implemented")

        def body(batch_num, mean):
            if mc_sampler == tfp.mcmc.sample_halton_sequence:
                start_idx = batch_num * batch_size
                end_idx = start_idx + batch_size
                indices = tf.range(start_idx, end_idx, dtype=tf.int32)
                sample = mc_sampler(space.n_obs,
                                    sequence_indices=indices,
                                    dtype=ztypes.float,
                                    randomized=False)
            else:
                sample = mc_sampler(shape=(batch_size, space.n_obs),
                                    dtype=ztypes.float)
            sample = tf.guarantee_const(sample)
            sample = (np.array(upper[0]) -
                      np.array(lower[0])) * sample + lower[0]
            sample = tf.transpose(sample)
            sample = func(sample)
            sample = tf.guarantee_const(sample)

            batch_mean = tf.reduce_mean(sample)
            batch_mean = tf.guarantee_const(batch_mean)
            # with tf.control_dependencies([batch_mean]):
            err_weight = 1 / tf.to_double(batch_num + 1)
            # err_weight /= err_weight + 1
            # print_op = tf.print(batch_mean)
            print_op = tf.print(batch_num + 1, mean,
                                err_weight * (batch_mean - mean))
            with tf.control_dependencies([print_op]):
                return batch_num + 1, mean + err_weight * (batch_mean - mean)
            # return batch_num + 1, tf.guarantee_const(mean + err_weight * (batch_mean - mean))

        cond = lambda batch_num, _: batch_num < num_batches

        initial_mean = tf.convert_to_tensor(0, dtype=ztypes.float)
        _, final_mean = tf.while_loop(cond,
                                      body, (0, initial_mean),
                                      parallel_iterations=1,
                                      swap_memory=False,
                                      back_prop=False,
                                      maximum_iterations=num_batches)

        def dummy_grad_with_var(dy, variables=None):
            raise DueToLazynessNotImplementedError("Who called me? Mayou36")
            if variables is None:
                raise DueToLazynessNotImplementedError(
                    "Is this needed? Why? It's not a NN. Please make an issue."
                )

            def dummy_grad_func(x):
                values = func(x)
                if variables:
                    gradients = tf.gradients(values, variables, grad_ys=dy)
                else:
                    gradients = None
                return gradients

            return chunked_average(func=dummy_grad_func,
                                   x=x,
                                   num_batches=num_batches,
                                   batch_size=batch_size,
                                   space=space,
                                   mc_sampler=mc_sampler)

        def dummy_grad_without_var(dy):
            return dummy_grad_with_var(dy=dy, variables=None)

        print_op = tf.print(final_mean)
        with tf.control_dependencies([print_op]):
            return tf.guarantee_const(final_mean), dummy_grad_with_var