def check_weights_test():
	x = TPM(10, 5, 5)
	y = TPM(10, 5, 5)
	x.weights= [1,1,1,1,1,1,1,1,1,1]
	y.weights= [1,1,1,1,1,1,1,1,1,1]

	assert check_weights(x,y) == 0
	y.weights = [0,0,0,0,0,0,0,0,0,0]

	assert check_weights(x, y) == -1

	y.weights = [1,1,1,1,0,1,1,1,1,1]
	assert check_weights(x, y) == -1
	print "check_weights_test passed"
def nkep():
    input_size = int(raw_input("Enter the desired number of inputs for the networks\n"))
    hidden_node_num = int(
        raw_input("Enter the desired number of hidden nodes. Must be able to divide the input size.\n")
    )
    weight_range = int(
        raw_input(
            "What weight range would you like to use? Note: Enter only one number, the range will be made up of the positive and negative versions of that number.\n"
        )
    )
    cutoff = int(raw_input("Enter a cutoff for the number of correct outputs needed to be synchronized:\n"))
    x = TPM(input_size, hidden_node_num, weight_range)
    y = TPM(input_size, hidden_node_num, weight_range)

    while check_weights(x, y) < 0:
        synchronize(x, y, cutoff)

    print "Weights are now synced, printing them now\n"
    print "First TPM's weights"
    x.print_weights()
    print "\n"
    print "Second TPM's weights"
    y.print_weights()
Exemple #3
0
import os
import sys
import subprocess
import argparse
from shutil import copyfile

sys.path.append("../")
import TPM

# Clear the TPMs at the beginning
my_tpm = TPM.TPM("TPM.csv")
#my_tpm.clearTPM()

tpm = TPM.TPM("../TPM.csv")
#tpm.clearTPM()

NO_PROMPT = False


def prompt_key(prompt):
    if NO_PROMPT:
        print "\n" + prompt
        return
    inp = False
    while inp != "":
        try:
            inp = raw_input("\n%s -- press any key to continue" % prompt)
        except Exception, e:
            pass

  ```
  toto-verify.py --layout <root.layout> --layout-keys <layout-key>
  ```

"""
import sys
import argparse
import toto.util
import toto.verifylib
import toto.log as log
from toto.models.layout import Layout

sys.path.append("../")
import TPM

tpm = TPM.TPM("../TPM2.csv")
my_tpm = TPM.TPM("../TPM.csv")

#my_tpm = TPM.TPM("TPM.csv")
#my_tpm.clearTPM()


def _die(msg, exitcode=1):
    log.failing(msg)
    sys.exit(exitcode)


def in_toto_verify(layout_path, layout_key_paths):
    """Loads layout file and layout keys from disk and performs all in-toto
  verifications."""
def print_weights_test():
	x = TPM(5, 2, 10)
	x.weights = [1,2,1,2,1]
	x.print_weights() #Check this by seeing what it prints out
	print "print_weights_test passed? Look to check for sure."
	key_exchange_one_only(TPM, TPM_other)
	return xor_strings(plaintext, key)

def decrypt(TPM, TPM_other, ciphertext):
	length_remaining = len(ciphertext)
	key = ""
	while length_remaining > TPM.input_num:
		key += weight_to_string(TPM)
		length_remaining -= TPM.input_num
		key_exchange_one_only(TPM, TPM_other)
	key += weight_to_string(TPM, length_remaining)
	key_exchange_one_only(TPM, TPM_other)
	return xor_strings(key, ciphertext)


y = TPM(500, 25, 1000)
x = TPM(500, 25, 1000)
synchronize(x, y, 500)

teststring = raw_input("Enter string to be encrypted: (Must be less than 500 characters):\n")
x_c = TPM(1, 1, 1)
y_c = TPM(1, 1, 1)
x.fullcopy(x_c)
y.fullcopy(y_c)
key_exchange_one_only(x_c, y_c)
testenc = encrypt(y, x_c, teststring)

print "Encrypted text is: ", testenc, "\n"
raw_input("Press enter to continue.")

new_plain = decrypt(x, y_c, testenc)
      -- vi foo.py
  ```

"""

import os
import sys
import argparse
import toto.util
import toto.runlib
import toto.log as log

sys.path.append("../")
import TPM

tpm = TPM.TPM("../TPM.csv")
#tpm = TPM.TPM("TPM.csv")


def _die(msg, exitcode=1):
    log.error(msg)
    sys.exit(exitcode)


def in_toto_run(step_name,
                key_path,
                material_list,
                product_list,
                link_cmd_args,
                record_byproducts=False):
    """Load link signing private keys from disk and runs passed command, storing
Exemple #8
0
def fetch_output(training_file, dev_set):

    # Defines

    tags_file = r'POSTagList.txt'
    delimiter = '\t'
    word_column = 1
    tag_column = 2

    # Initialization

    answers = []

    # Parsing sentences from training data and generating word-tag bigrams

    tag_frequency_count = CountFrequency.give_freq_counts(
        training_file, delimiter, tag_column)
    word_frequency_count = CountFrequency.give_freq_counts(
        training_file, delimiter, word_column)
    sentence_seq_word_list = TPM.construct_sentence_sequence(
        training_file, delimiter, word_column, 0)
    sentence_seq_tag_list = TPM.construct_sentence_sequence(
        training_file, delimiter, tag_column, 0)
    unked_sequence_word_list = Input_Generation.define_training_unk_words(
        word_frequency_count, sentence_seq_word_list)
    word_tag_pairs = EPM.get_epm_bigrams(sentence_seq_tag_list,
                                         unked_sequence_word_list)
    tag_tag_pairs = TPM.get_bigrams(sentence_seq_tag_list)
    vocabulary = set(unked_sequence_word_list)

    # Creating the master parameter list

    # master_a = Smoothing.get_backoff_smoothed_tpm(tags_file, tag_tag_pairs, tag_frequency_count)
    master_a = Smoothing.get_add_k_smoothed_tpm(tags_file, tag_tag_pairs,
                                                tag_frequency_count)
    # master_a = TPM.get_transition_probability_matrix(tags_file, tag_tag_pairs, tag_frequency_count)
    master_b = EPM.get_emission_probability_matrix(tags_file, vocabulary,
                                                   word_tag_pairs,
                                                   tag_frequency_count)
    master_pie_1 = TPM.get_initial_pi_matrix(tags_file, tag_tag_pairs,
                                             unked_sequence_word_list)
    master_pie_2 = TPM.get_end_pi_matrix(tags_file, tag_tag_pairs,
                                         tag_frequency_count)

    # Apply smoothing to Transition probability matrix
    # Generating the list of sentences to be fed

    all_inputs = TPM.construct_sentence_sequence(dev_set, delimiter, 1, 0)

    # Find out the state_sequence_list and observation_sequence_list

    extracted_inputs = Input_Generation.extract_input_sentences_list(
        all_inputs)
    unked_extracted_inputs = Input_Generation.define_extracted_unk_words(
        unked_sequence_word_list, extracted_inputs)
    #extracted_tags = Input_Generation.extract_possible_tags_list(extracted_inputs, word_tag_pairs)

    # loop this in for all sentences
    for index, observation_sequence in enumerate(unked_extracted_inputs):

        if len(observation_sequence):

            state_sequence = ['I', 'O', 'B']

            # construct matrix A
            a = LocalParamters.construct_local_transition(
                state_sequence, master_a)

            # construct matrix B
            b = LocalParamters.construct_local_emission(
                state_sequence, observation_sequence, vocabulary, master_b)

            # construct matrix pie_1
            pie_1 = LocalParamters.construct_local_pie_start(
                state_sequence, master_pie_1)

            # construct matrix pie_2
            pie_2 = LocalParamters.construct_local_pie_end(
                state_sequence, master_pie_2)

            # input it to viterbi
            answer_string = ViterbiDecoding.viterbi_decode(
                observation_sequence, state_sequence, a, b, pie_1, pie_2)

            # fetch the answer strings
            answers.extend(answer_string)
            answers.extend(" ")

    return answers