Пример #1
0
def comparaison():
    text = 'Nous sommes en guerre. Aussi, comme je vous l’ai dit jeudi, pour nous protéger et contenir la dissémination du virus, mais aussi préserver nos systèmes de soins, nous avons pris ce matin, entre Européens, une décision commune. Dès demain midi, les frontières à l’entrée de l’Union européenne et de l’espace Schengen seront fermées z w . Concrètement, tous les voyages entre les pays non européens et l’Union européenne seront suspendus pendant trente jours. Les Françaises et les Français qui sont actuellement à l’étranger et souhaitent rentrer pourront bien entendu rejoindre leur pays. Nous devons prendre cette décision parce que je vous demande ce soir d’importants efforts et que nous devons, dans la durée, nous protéger. Et je veux dire à tous nos compatriotes qui vivent à l’étranger que là aussi, en bon ordre, ils doivent se rapprocher des ambassades et consulats et que nous organiserons pour celles et ceux qui le souhaitent, et là où c’est nécessaire, le rapatriement. ? ok '.lower(
    )
    # Désolé pour le texte en ligne c'est aps très PEP-8 friendly
    builder = TreeBuilder(text)
    binary_tree = builder.tree()
    codec = Codec(binary_tree)

    texte_french = 'Mes chers compatriotes, alors que je vous parle, les résultats connus nous montrent que vous avez décidé de me confier la plus haute charge de l Etat. J exprime ma profonde gratitude à toutes celles et à tous ceux qui m ont accordé leur confiance et je salue tous les autres avec respect. Mes chers compatriotes, je serai le Président de tous les français'.lower(
    )
    texte_allemand = 'Die Stadt Paris ist fur viele ein Traumziel, das sie wenigstens einmal im Leben besuchen mochten. Wer mochte nicht einmal unter dem Eiffelturm stehen und in den Pariser Himmel hoch schauen? Und dann zu Fuss und mit dem Aufzug wenigstens auf halbe Hohe gelangen und den Blick uber die ganze franzosische Hauptstadt geniessen?'.lower(
    )
    compress_fr, compress_de = [], []
    for i in range(max(len(texte_french), len(texte_allemand))):
        texte_fr = texte_french[:i]  # Création d'un texte
        texte_de = texte_allemand[:i]
        taille_fr = sys.getsizeof(texte_fr)  # Espace système de ce texte
        taille_de = sys.getsizeof(texte_de)
        compress_fr.append(taille_fr /
                           sys.getsizeof(codec.encode_bin(texte_fr)))
        compress_de.append(taille_de /
                           sys.getsizeof(codec.encode_bin(texte_de)))
    plt.close()
    plt.title("Comparaison de l'effet de la langue sur la taux de compression")
    plt.plot(compress_fr,
             label=" facteur de compression du texte en français ")
    plt.plot(compress_de, label=" facteur de compression du texte en allemand")
    plt.legend()
    plt.show()
Пример #2
0
def cli():
    """\
    Codec CLI for instant usage: Read and write.
    """
    argv = parse_args()
    c = Codec(**vars(argv))
    c.run()
Пример #3
0
 def find_codec(self, name):
     cmdstr = "adb shell find " + self.path + " -maxdepth 1"
     #print cmdstr
     result = os.popen(cmdstr)
     ret = result.read()
     lines = ret.split('\n')
     #self.codecs = codecs
     for i in range(len(lines)):
         if (-1 != lines[i].find(name)):
             #print lines[i]
             codec = Codec(name, lines[i])
             codec.find_widgets()
             #print codec
             self.codecs.append(codec)
Пример #4
0
 def __init__(self, engine, protocol, sock=None, addr=None, observer=None, auth=None):
     self.codec = Codec(protocol, auth)
     self.engine = engine
     self.connectionState = ConnectionState.CONNECTED
     self.session = None
     self.addr = addr
     self.observer = observer
     self.msgBuffer = b''
     self.heartbeatPeriod = 30.0
     self.msgHandlers = []
     self.sock = sock
     self.heartbeatTimerRegistration = None
     self.expectedHeartbeatRegistration = None
     self.socketEvent = FileDescriptorEventRegistration(self.handle_read, sock, EventType.READ)
     self.engine.eventManager.registerHandler(self.socketEvent)
Пример #5
0
def main():
    with open(CONFIG_FN) as f:
        conf = json.load(f)

    global sent_groups

    with open(conf["sent_groups"]) as f:
        sent_groups = json.load(f)["groups"]

    kb = load_kb(conf["kb"], 'name')
    sys_vocab, sys_word2idx = load_sys_vocab(conf["sys_vocab"])

    sys_codec = Codec(sys_vocab, sys_word2idx)

    onto, onto_idx = load_ontology(conf["ontology"])

    word2idx, embed = load_embed(**conf)

    usr_codec = Codec([], word2idx)

    trk_model, slot_len_sum = load_tracker_model(onto, embed, conf, kb)

    trk_model.eval()

    hidden = trk_model.state_tracker.init_hidden()
    kb_vec = Variable(torch.zeros(1, conf["kb_indicator_len"]))

    sentence_generator = SentenceGenerator(kb, onto, sent_groups)

    for line in iter(sys.stdin.readline, ''):
        inp = usr_codec.encode(line.strip())

        inp = Variable(torch.LongTensor([
            inp,
        ]))

        sentvecs, states_reps, states_preds, hidden, sent_grp_preds = trk_model(
            inp, None, hidden)

        criteria = to_search_criteria(states_preds, onto)
        ret, kb_vec = get_kb_result(kb, criteria, conf["kb_indicator_len"])

        # print criteria, kb_vec

        sentvecs = sentvecs.view(1, -1)
        states_reps = states_reps.view(1, -1)

        print_ret(states_preds, sent_grp_preds, onto, sentence_generator)
Пример #6
0
    def __init__(self):
        from codec import Codec
        from window import Window
        from system import System
        from datajar import DataJar
        from filesystem import FileSystem

        try:
            manifest = json.load(codecs.open('manifest.json', 'r', 'utf-8'))
        except:
            manifest = {}

        for key in assets.manifest:
            if key in manifest:
                assets.manifest[key] = manifest[key]

        self.app = QApplication(sys.argv)
        self.app.setApplicationName(assets.manifest['name'])
        self.app.setApplicationVersion(assets.manifest['version'])

        assets.sys = System()
        assets.codec = Codec()
        assets.fs = FileSystem()
        assets.dataJar = DataJar()

        translator = QTranslator()
        if translator.load("zh_CN.qm"):
            self.app.installTranslator(translator)

        self.window = Window(None, assets.manifest['path'] + 'index.html')

        sys.exit(self.app.exec_())
Пример #7
0
def check_add_codec(ip):
    net = get_codec_by_ip(ip)
    if net:
        return net

    net = Codec(ip)
    audio_codecs.append(net)
    return net
Пример #8
0
def main():
    text = "a dead dad ceded a bad babe a beaded abaca bed"

    # on analyse les fréquences d'occurrence dans text
    # pour fabriquer un arbre binaire
    builder = TreeBuilder(text)
    binary_tree = builder.tree()

    # on passe l'arbre binaire à un encodeur/décodeur
    codec = Codec(binary_tree)
    # qui permet d'encoder
    encoded = codec.encode_bin(text)
    # et de décoder
    decoded = codec.decode_bin(encoded)
    # si cette assertion est fausse il y a un gros problème avec le code
    assert text == decoded

    # on affiche le résultat
    print(f"{text}\n{encoded}")
    if decoded != text:
        print("OOPS")
    text = 'ab'
    compress_encode = []
    compress_huffman = []
    compress_binary = []

    for rang in range(300):

        texte = text * rang  # Création d'un texte
        taille = sys.getsizeof(texte)  # Espace système de ce texte
        compress_encode.append(taille / sys.getsizeof(codec.encode(texte)))
        compress_huffman.append(taille /
                                sys.getsizeof(codec.encode_bin(texte)))
        compress_binary.append(
            sys.getsizeof(codec.encode(texte)) /
            sys.getsizeof(codec.encode_bin(texte)))

    # Affichage
    plt.close()
    plt.title('Facteurs de compression')
    plt.plot(compress_encode,
             label="Facteur de compression par \n  Codec.encode sortie str")
    plt.plot(
        compress_huffman,
        label=
        "Facteur de compression de \n l'algorithme de Huffman (Avec Binaire)")
    plt.plot(compress_binary,
             label="Facteur de compression \n d'une str encoded  en binaire ")
    plt.legend()
    plt.show()
Пример #9
0
def recall_trial(num_episodes, save_file):

    # Configuration
    layer_sizes = {"rinp": 64, "rout": 64, "rtmp": 64}
    hidden_size = 64
    rho = .99
    plastic = ["rout<rtmp"]
    # remove_pathways = ["rinp<rout", "rinp<rtmp", "rtmp<rout"]
    remove_pathways = []

    # Setup GHU
    num_symbols = 5
    chars = "abcdefghi"
    numbs = "123456789"
    symbols = chars[:num_symbols] + "0" + numbs[:num_symbols - 1]
    pathways, associations = default_initializer(  # all to all
        layer_sizes.keys(), symbols)
    for p in remove_pathways:
        pathways.pop(p)
    associations = list(
        filter(lambda x: x[0] not in remove_pathways, associations))
    codec = Codec(layer_sizes, symbols, rho=rho, ortho=True)
    controller = Controller(layer_sizes,
                            pathways,
                            hidden_size,
                            plastic,
                            nonlinearity='relu')
    # controller = Controller(layer_sizes, pathways, hidden_size, plastic, nonlinearity='tanh')
    ghu = GatedHebbianUnit(layer_sizes,
                           pathways,
                           controller,
                           codec,
                           batch_size=num_episodes,
                           plastic=plastic)
    ghu.associate(associations)

    # Initialize layers
    separator = "0"
    ghu.fill_layers(separator)

    # Dataset of all possible input lists
    all_inputs = [
        np.array([k1, v1, k2, v2, k, "0"])
        for (k1, k2) in it.permutations(chars[:num_symbols], 2)
        for (v1, v2) in it.permutations(numbs[:num_symbols - 1], 2)
        for k in [k1, k2]
    ]
    input_length = 6
    output_window = 2
    episode_duration = input_length + output_window
    split = int(.80 * len(all_inputs))

    # example generation
    def example(dataset):
        inputs = dataset[np.random.randint(len(dataset))]
        targets = inputs[[1 if inputs[0] == inputs[4] else 3]]
        return inputs, targets

    def training_example():
        return example(all_inputs[:split])

    def testing_example():
        return example(all_inputs[split:])

    # reward calculation based on leading LVD at individual steps
    def reward(ghu, targets, outputs):
        # all or nothing at final time-step
        r = np.zeros(len(outputs))
        outputs = np.array(
            [out for out in outputs[input_length - 1:] if out != separator])
        if len(outputs) == len(targets): r[-1] = (targets == outputs).all()
        return r

    # ################### Sanity check
    correct_choices = [
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<rout",
            "rtmp": "rtmp<rinp"
        }, []),
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<rinp",
            "rtmp": "rtmp<rtmp"
        }, []),
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<rout",
            "rtmp": "rtmp<rinp"
        }, ["rout<rtmp"]),
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<rinp",
            "rtmp": "rtmp<rtmp"
        }, []),
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<rout",
            "rtmp": "rtmp<rinp"
        }, ["rout<rtmp"]),
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<rtmp",
            "rtmp": "rtmp<rtmp"
        }, []),
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<rinp",
            "rtmp": "rtmp<rtmp"
        }, []),
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<rinp",
            "rtmp": "rtmp<rtmp"
        }, []),
    ]
    # ################### Sanity check

    # Run optimization
    avg_rewards, avg_general, grad_norms = reinforce(
        ghu,
        num_epochs=500,
        episode_duration=episode_duration,
        training_example=training_example,
        testing_example=None,
        reward=reward,
        task="recall",
        learning_rate=.1,
        # line_search_iterations = 5,
        # distribution_cap = .1,
        # likelihood_cap = .7,
        distribution_variance_coefficient=.05,
        # choices=correct_choices, # perfect rewards with this
        verbose=1,
        save_file=save_file)

    # Assess generalization similarly after run
    print("Cloning GHU for generalization...")
    ghu_gen = ghu.clone()
    print("Sampling problem instances...")
    inputs, targets = zip(
        *[testing_example() for b in range(ghu_gen.batch_size)])
    print("Running on test data...")
    outputs, rewards = ghu_gen.run(episode_duration,
                                   inputs,
                                   targets,
                                   reward,
                                   verbose=1)
    R_gen = rewards.sum(axis=1)

    # Overwrite file dump with R_gen in place of avg_general
    with open(save_file, "rb") as f:
        result = pk.load(f)
    result = list(result)
    result[2] = R_gen
    result = tuple(result)
    with open(save_file, "wb") as f:
        pk.dump(result, f)
Пример #10
0
intranoise_levels = np.arange(0.05, 0.35, 0.05)
num_cs = [4, 8, 12, 16, 20]

for inter in tqdm(internoise_levels, desc="eta_1"):
    for intra in tqdm(intranoise_levels, desc="eta_2"):
        for num_c in tqdm(num_cs, desc="num_c"):

            #print(f"\n### n:{n} num_c:{num_c} ###")

            #print("Generation of G ...")
            clusters = pd.fixed_clusters(n, num_c)
            G, GT, labels = pd.custom_cluster_matrix(n, clusters, inter, 1,
                                                     intra, 0)

            # epsilon parameter for the approx Alon et al
            c = Codec(0.285, 0.285, 1)
            c.verbose = False

            #print("Generation of compressed/reduced graph ...")
            tm = time.time()
            k, epsilon, classes, sze_idx, reg_list, nirr = c.compress(
                G, "indeg_guided")
            red = c.reduced_matrix(G, k, epsilon, classes, reg_list)
            t_compression = time.time() - tm
            #print(f"s:{t_compression:.2f}")

            # Precomputation of the eigenvalues
            #print("Eigenvalues of G ... ")
            tm = time.time()
            G_eig, aux = metrics.eigs(G)
            t_G_eig = time.time() - tm
Пример #11
0
def basic_plastic_trial(num_episodes, save_file):

    # Configuration
    register_names = ["rinp", "rout", "m"]
    layer_sizes = {q: 32 for q in register_names}
    hidden_size = 32
    rho = .99
    plastic = ["rinp<m"]
    remove_pathways = ["rinp<rout", "m<rinp", "m<rout", "rout<m"]

    # Setup GHU
    num_symbols = 3
    symbols = [str(a) for a in range(num_symbols)]
    pathways, associations = default_initializer(register_names, symbols)
    for p in remove_pathways:
        pathways.pop(p)
    associations = list(
        filter(lambda x: x[0] not in remove_pathways, associations))
    codec = Codec(layer_sizes, symbols, rho=rho, ortho=True)
    controller = Controller(layer_sizes,
                            pathways,
                            hidden_size,
                            plastic,
                            nonlinearity='relu')
    # controller = Controller(layer_sizes, pathways, hidden_size, plastic, nonlinearity='tanh')
    ghu = GatedHebbianUnit(layer_sizes,
                           pathways,
                           controller,
                           codec,
                           batch_size=num_episodes,
                           plastic=plastic)
    ghu.associate(associations)

    # Initialize layers
    separator = "0"
    ghu.fill_layers(separator)

    # training example generation
    episode_duration = 3

    def training_example():
        inputs = np.random.choice(symbols[1:], size=2, replace=False)
        targets = np.array(["0", "0", inputs[0]])
        return inputs, targets

    def reward(ghu, targets, outputs):
        # All or nothing
        r = np.zeros(len(outputs))
        if lvd(outputs, targets)[0] == 0: r[-1] = +1.
        return r

    # ################### Sanity check
    inputs = [["2", "1"]]
    correct_choices = [
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<rout",
            "m": "m<m"
        }, [1.0]),
        ({
            "rinp": "rinp<m",
            "rout": "rout<rout",
            "m": "m<m"
        }, [0.0]),
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<rinp",
            "m": "m<m"
        }, [0.0]),
    ]
    # ghu.clone().dbg_run(inputs, episode_duration, correct_choices)
    # input("???????")
    # ################### Sanity check

    # Run optimization
    avg_rewards, grad_norms = reinforce(
        ghu,
        num_epochs=500,
        episode_duration=episode_duration,
        training_example=training_example,
        reward=reward,
        task="basic_plastic",
        learning_rate=.05,
        # line_search_iterations = 5,
        # distribution_cap = .1,
        # likelihood_cap = .7,
        # distribution_variance_coefficient = 0.05,
        # choices=correct_choices, # perfect rewards with this
        verbose=1,
        save_file=save_file)
Пример #12
0
from codec import TreeBuilder, Codec

text = "a dead dad ceded a bad babe a beaded abaca bed"

builder = TreeBuilder(text)
binary_tree = builder.tree()

# on passe l'arbre binaire à un encodeur/décodeur
codec = Codec(binary_tree)
# qui permet d'encoder
encoded = codec.encode(text)
# et de décoder
decoded = codec.decode(encoded)
# si cette assertion est fausse il y a un gros problème avec le code

# on affiche le résultat
print(f"{text}\n{decoded}")
if decoded != text:
    print("OOPS")
Пример #13
0
def echo_trial(episode_duration, save_file):

    # Configuration
    num_symbols = 10
    layer_sizes = {"rinp": 32, "rout": 32, "rtmp": 32}
    hidden_size = 32
    rho = .99
    plastic = []
    num_episodes = 250

    # Setup GHU
    symbols = [str(a) for a in range(num_symbols)]
    pathways, associations = default_initializer(  # all to all
        layer_sizes.keys(), symbols)
    codec = Codec(layer_sizes, symbols, rho=rho, ortho=True)
    controller = Controller(layer_sizes, pathways, hidden_size, plastic)
    ghu = GatedHebbianUnit(layer_sizes,
                           pathways,
                           controller,
                           codec,
                           batch_size=num_episodes,
                           plastic=plastic)
    ghu.associate(associations)

    # Initialize layers
    separator = "0"
    ghu.fill_layers(separator)

    # Generate dataset
    input_length = int(episode_duration / 2)
    all_inputs = []
    for symbol in symbols[1:]:
        for t in range(input_length):
            inputs = [separator] * input_length
            inputs[t] = symbol
            all_inputs.append(inputs)
    split = int(.80 * len(all_inputs))

    # example generation
    def example(dataset):
        # Randomly choose echo symbol (excluding 0 separator)
        inputs = dataset[np.random.randint(len(dataset))]
        targets = [inp for inp in inputs if inp != separator]
        return inputs, targets

    def training_example():
        return example(all_inputs[:split])

    def testing_example():
        return example(all_inputs[split:])

    # all or nothing reward
    def reward(ghu, targets, outputs):
        r = np.zeros(len(outputs))
        outputs = np.array(
            [out for out in outputs[input_length:] if out != separator])
        if len(outputs) == len(targets): r[-1] = (targets == outputs).all()
        return r

    # # correct choices for debugging
    # correct_choices = \
    #     [({"rinp": "rinp<rout", "rout": "rout<rinp"}, [])]*2 + \
    #     [({"rinp": "rinp<rinp", "rout": "rout<rout"}, [])]*(episode_duration-2)
    # # run it to debug:
    # inputs, targets = zip(*[training_example() for b in range(ghu.batch_size)])
    # ghu.run(episode_duration, inputs, targets, reward, choices=correct_choices, verbose=3)
    # input("????")

    # Run optimization
    avg_rewards, avg_general, grad_norms = reinforce(
        ghu,
        num_epochs=200,
        episode_duration=episode_duration,
        training_example=training_example,
        testing_example=testing_example,
        reward=reward,
        task="echo",
        learning_rate=.1,
        verbose=1,
        save_file=save_file)

    return avg_rewards, avg_general, grad_norms
Пример #14
0
    pathways.update({k: ("m", "m") for k in ["inc-m", "dec-m"]})
    associations += [(k, str(a), str((a + x) % num_addresses))
                     for k, x in [("inc-m", 1), ("dec-m", -1)]
                     for a in range(num_addresses)]

    return pathways, associations


if __name__ == "__main__":

    layer_sizes = {"r0": 3, "r1": 3}
    pathways = {0: ("r0", "r0"), 1: ("r1", "r0"), 2: ("r1", "r1")}
    hidden_size = 5
    batch_size = 2

    codec = Codec(layer_sizes, "01")
    controller = Controller(layer_sizes, pathways, hidden_size)
    ghu = GatedHebbianUnit(layer_sizes,
                           pathways,
                           controller,
                           codec,
                           batch_size=batch_size)

    ghu.associate([
        (0, "0", "0"),
        (1, "0", "0"),
        (2, "0", "0"),
        (0, "1", "1"),
        (1, "1", "1"),
        (2, "1", "1"),
    ])
Пример #15
0
def main():
    with open(CONFIG_FN) as f:
        conf = json.load(f)

    args = parse_args()

    kb = load_kb(conf["kb"], 'name')
    sys_vocab, sys_word2idx = load_sys_vocab(conf["sys_vocab"])

    sys_codec = Codec(sys_vocab, sys_word2idx)

    onto, onto_idx = load_ontology(conf["ontology"])

    word2idx, embed = load_embed(**conf)

    usr_codec = Codec([], word2idx)

    trk_model, slot_len_sum = load_tracker_model(onto, embed, conf)
    cond_net, generator = load_generator_model(conf, args.cond_net,
                                               args.gen_net, slot_len_sum,
                                               len(sys_vocab))

    cond_net.eval()
    generator.eval()

    trk_model.eval()

    hidden = trk_model.state_tracker.init_hidden()
    kb_found = Variable(torch.zeros(1, conf["kb_indicator_len"]))

    def gen_sent(cond):
        '''train one sentence'''
        hidden = generator.init_hidden()
        inp = Variable(torch.LongTensor([[sys_word2idx['<sos>']]]))

        sent_out = []

        for i in range(MAX_SENT_LEN):
            out, hidden = generator(inp, cond, hidden)

            topv, topi = out.data.topk(1)
            out_word = int(topi[0][0])

            if out_word == sys_word2idx['<eos>']:
                break
            inp = Variable(torch.LongTensor([[out_word]]))

            sent_out.append(out_word)

        return sys_codec.decode(sent_out)

    for line in iter(sys.stdin.readline, ''):
        inp = usr_codec.encode(line.strip())

        inp = Variable(torch.LongTensor([
            inp,
        ]))

        sentvecs, states_reps, states_preds, hidden = trk_model(
            inp, kb_found, hidden)

        # print_ret(states_preds)
        criteria = to_search_criteria(states_preds, onto)
        ret, kb_vec = get_kb_result(kb, criteria, conf["kb_indicator_len"])

        sentvecs = sentvecs.view(1, -1)
        states_reps = states_reps.view(1, -1)

        for slot in states_preds:
            states_preds[slot] = states_preds[slot].view(1, -1)

        cond = cond_net(sentvecs, states_reps, states_preds, kb_found)

        print gen_sent(cond)
def test():
    minibatch_size = 100
    num_words = 20
    tuplesize = 3
    num_visible = tuplesize*num_words
    num_hidden = 30

    codec = Codec(tuplesize, num_words)
    tuples = codec.tuples
    words = codec.words
    encoded = codec.tuples_to_matrix()
    (num_data, _) = encoded.shape

    print(words)
    print('data count: ', num_data)

    rbm = RBM(num_visible = num_visible,
              num_hidden = num_hidden,
              minibatch_size = minibatch_size)
    id_indices = numpy.random.randint(low=0, high=num_data, size=minibatch_size)
    input_data = T.constant(encoded[id_indices])

    #print(input_data)
    
    #print(rbm.propup(input_data).eval())

    h1samples = rbm.sample_h_given_v(input_data).eval()
    #print(h1samples)

    #print(rbm.propdown(h1samples).eval())

    v2samples = rbm.sample_v_given_h(h1samples).eval()
    #print(v2samples)

    (W,H,V) = rbm.contrastive_divergence_1(input_data)
    #print(W.eval())
    #print(H.eval())
    #print(V.eval())


    xvis = T.fmatrix('xvis')
    h1samples = rbm.sample_h_given_v(xvis)
    v2samples = rbm.sample_v_given_h(h1samples)
    sample_vhv = theano.function([xvis], v2samples)

    example_indices = numpy.random.randint(low=0, high=num_data, size=minibatch_size)
    example_input_data = encoded[example_indices]
    num_examples = min(10, minibatch_size)
    def show_examples():
        rec = sample_vhv(example_input_data)
        for example in range(num_examples):
            print('input words:',
                  [(t+1, words[idx])
                   for t in range(tuplesize)
                   for idx in range(num_words)
                   if example_input_data[example, t*num_words + idx]])
            print('reconstructed words:',
                  [(t+1, words[idx])
                   for t in range(tuplesize)
                   for idx in range(num_words)
                   if rec[example, t*num_words + idx]])

        
    vis = T.fmatrix('vis')
    train = rbm.cd1_fun(vis)

    draw = VisualizeWeights('Minibatches', rbm, tuplesize, words, num_hidden)
    for epoch in range(1000):
        show_examples()
        all_vdiffs = numpy.zeros(num_visible)
        print('epoch ', epoch)
        numpy.random.shuffle(encoded)
        for minibatch in range(num_data // minibatch_size):
            mb_start = minibatch * minibatch_size;
            mb_end = mb_start + minibatch_size;
            input_data_indices = numpy.arange(mb_start, mb_end)
            encoded_input = encoded[input_data_indices]
            input_data = encoded_input
            vdiffs = train(input_data)
            all_vdiffs = all_vdiffs + numpy.abs(vdiffs)
        print('reconstruction error: ', numpy.sum(all_vdiffs) * minibatch_size)
        print(numpy.ndarray.astype(rbm.weights.get_value()*100, numpy.int32))
        print(numpy.ndarray.astype(rbm.vbias.get_value()*100, numpy.int32))
        print(numpy.ndarray.astype(rbm.hbias.get_value()*100, numpy.int32))
        draw.epoch_finished(epoch)
Пример #17
0
def trials(i, avgrew, gradnorm):
    print("***************************** Trial ", str(i + 1),
          "*******************************")

    num_addresses = 4
    register_names = ["rinp", "rout"]
    num_episodes = 5000

    layer_sizes = {q: 128 for q in register_names + ["m"]}
    hidden_size = 32
    # plastic = ["%s<m"%q for q in register_names]
    plastic = ["rinp<m"]

    symbols = [str(a) for a in range(num_addresses)]
    pathways, associations = turing_initializer(register_names, num_addresses)

    # constrain pathways inductive bias
    remove_pathways = ["rout<m", "m<rout"]
    for p in remove_pathways:
        pathways.pop(p)
    associations = list(
        filter(lambda x: x[0] not in remove_pathways, associations))

    codec = Codec(layer_sizes, symbols, rho=.999)
    controller = Controller(layer_sizes, pathways, hidden_size, plastic)

    # Sanity check
    ghu = GatedHebbianUnit(layer_sizes,
                           pathways,
                           controller,
                           codec,
                           plastic=plastic,
                           batch_size=num_episodes)
    ghu.associate(associations)

    # Initialize layers
    separator = "0"
    ghu.fill_layers(separator)

    # training example generation
    list_symbols = 4
    min_length = 3
    max_length = 3

    def training_example():

        list_length = np.random.randint(min_length, max_length + 1)
        inputs = np.array([separator] * (list_length))
        inputs[:] = np.random.choice(symbols[1:list_symbols],
                                     size=list_length,
                                     replace=False)
        targets = inputs[::-1]
        return inputs, targets

    # # reward calculation from LVD
    # def reward(ghu, targets, outputs):
    #     # Assess reward: negative LVD after separator filtering
    #     outputs_ = [out for out in outputs if out != separator]
    #     l, _ = lvd(outputs_, targets)
    #     return -l

    def reward(ghu, targets, outputs):
        outputs_ = outputs[len(targets) - 1:]
        #zeros = [o for o in outputs[len(targets)-1:] if o==separator]
        #totzeros = len(zeros)
        r = np.zeros(len(outputs))
        # if len(outputs_)==0:
        #     r[-1] -= 2*(len(outputs[len(targets)-1:])+1)
        # else:
        _, d = lvd(outputs_, targets)
        for i in range(1, d.shape[0]):
            r[-1] += 1. if (i < d.shape[1]
                            and d[i, i] == d[i - 1, i - 1]) else -2.
            #r[-1] -= 0.1*totzeros
        return r

    filename = "reverse" + str(i + 1) + ".png"
    avg_rewards, grad_norms = reinforce(ghu,
                                        num_epochs=1000,
                                        episode_duration=2 * max_length - 1,
                                        training_example=training_example,
                                        reward=reward,
                                        task="reverse",
                                        learning_rate=.03,
                                        verbose=2)

    gradnorm[i + 1] = grad_norms.tolist()
    avgrew[i + 1] = avg_rewards.tolist()

    pt.subplot(2, 1, 1)
    pt.plot(avg_rewards)
    pt.title("Learning curve of reverse")
    pt.ylabel("Avg Reward")
    pt.subplot(2, 1, 2)
    pt.plot(grad_norms)
    pt.xlabel("Epoch")
    pt.ylabel("||Grad||")
    pt.savefig(filename)
Пример #18
0
    def build(self, filename):

        # check the package has .desktop files
        pkg = Package(filename)

        for b in self.cfg.get_package_blacklist():
            if fnmatch.fnmatch(pkg.name, b):
                pkg.log.write(LoggerItem.INFO, "package is blacklisted")
                return

        # set up state
        if not os.path.exists('./appstream'):
            os.makedirs('./appstream')
        if not os.path.exists('./icons'):
            os.makedirs('./icons')
        if not os.path.exists('./screenshot-cache'):
            os.makedirs('./screenshot-cache')
        if not os.path.exists('./screenshots'):
            os.makedirs('./screenshots')
        if not os.path.exists('./screenshots/source'):
            os.makedirs('./screenshots/source')
        for size in self.cfg.get_screenshot_thumbnail_sizes():
            path = './screenshots/' + str(size[0]) + 'x' + str(size[1])
            if not os.path.exists(path):
                os.makedirs(path)

        # remove tmp
        if os.path.exists('./tmp'):
            shutil.rmtree('./tmp')
        os.makedirs('./tmp')

        # decompress main file and search for desktop files
        package_decompress(pkg)
        files = []
        for f in self.cfg.get_interesting_installed_files():
            files.extend(glob.glob("./tmp" + f))
        files.sort()

        # we only need to install additional files if we're not running on
        # the builders
        for c in self.cfg.get_package_data_list():
            if fnmatch.fnmatch(pkg.name, c[0]):
                extra_files = glob.glob("./packages/%s*.rpm" % c[1])
                for f in extra_files:
                    extra_pkg = Package(f)
                    pkg.log.write(LoggerItem.INFO, "adding extra package %s" % extra_pkg.name)
                    package_decompress(extra_pkg)

        # check for duplicate apps in the package
        self.has_valid_content = False

        # check for codecs
        if pkg.name.startswith('gstreamer'):
            app = Codec(pkg, self.cfg)
            if app.parse_files(files):
                if self.add_application(app):
                    self.add_completed(pkg, app)
        else:
            # process each desktop file in the original package
            for f in files:

                pkg.log.write(LoggerItem.INFO, "reading %s" % f)
                fi = Gio.file_new_for_path(f)
                info = fi.query_info('standard::content-type', 0, None)

                # create the right object depending on the content type
                content_type = info.get_content_type()
                if content_type == 'inode/symlink':
                    continue
                if content_type == 'application/x-font-ttf':
                    app = FontFile(pkg, self.cfg)
                elif content_type == 'application/x-font-otf':
                    app = FontFile(pkg, self.cfg)
                elif content_type == 'application/x-desktop':
                    app = DesktopFile(pkg, self.cfg)
                elif content_type == 'application/xml':
                    app = InputMethodComponent(pkg, self.cfg)
                elif content_type == 'application/x-sqlite3':
                    app = InputMethodTable(pkg, self.cfg)
                else:
                    pkg.log.write(LoggerItem.INFO, "content type %s not supported" % content_type)
                    continue

                # the ID is the filename
                app_id = os.path.basename(f).decode('utf-8')
                app.set_id(app_id)

                # parse file
                if not app.parse_file(f):
                    continue

                # write the application
                if self.add_application(app):
                    self.add_completed(pkg, app)
def main(args, arch):
    adv_models = None
    train_loader, test_loader = create_loaders(args, root='../data')
    if args.dataset == 'cifar':
        args.nc, args.h, args.w = 3, 32, 32
        args.input_size = 32
        model, l_test_classif_paths = load_all_classifiers(args, load_archs=[args.source_arch])
        model_type = args.source_arch
        if args.target_arch is not None:
            model_target, l_test_classif_paths = load_all_classifiers(args, load_archs=[args.target_arch])
            model_type = args.target_arch
            del model_target
            torch.cuda.empty_cache()
    elif args.dataset == 'mnist':
        args.input_size = 28
        if args.source_arch == 'natural':
            model, l_test_classif_paths = load_all_classifiers(args, load_archs=["natural"])
            model_type = 'natural'
        elif args.source_arch == 'ens_adv':
            adv_model_names = args.adv_models
            adv_models = [None] * len(adv_model_names)
            for i in range(len(adv_model_names)):
                type = get_model_type(adv_model_names[i])
                adv_models[i] = load_model(args, adv_model_names[i], type=type).to(args.dev)

            path = os.path.join(args.dir_test_models, "pretrained_classifiers",
                                args.dataset, "ensemble_adv_trained", args.model)
            model = load_model(args, args.model, type=args.type)
            l_test_classif_paths = [path]
            model_type = 'Ensemble Adversarial'

    model.to(args.dev)
    model.eval()
    test_classifier(args, model, args.dev, test_loader, epoch=1)
    print("Testing on %d Test Classifiers" %(len(l_test_classif_paths)))
    # attack related settings
    if args.attack_method == "zoo" or args.attack_method == "autozoom_bilin":
        if args.img_resize is None:
            args.img_resize = args.input_size
            print("Argument img_resize is not set and not using autoencoder, set to image original size:{}".format(
                args.img_resize))

    codec = None
    if args.attack_method == "zoo_ae" or args.attack_method == "autozoom_ae":
        codec = Codec(args.input_size, IN_CHANNELS[args.dataset],
                      args.compress_mode, args.resize, use_tanh=args.use_tanh)
        codec.load_codec(args,codec_path)
        codec.cuda()
        decoder = codec.decoder
        args.img_resize = decoder.input_shape[1]
        print("Loading autoencoder: {}, set the attack image size to:{}".format(args.codec_path, args.img_resize))

    # setup attack
    if args.attack_method == "zoo":
        blackbox_attack = ZOO(model, args.dataset, args)
    elif args.attack_method == "zoo_ae":
        blackbox_attack = ZOO_AE(model, args.dataset, args, decoder)
    elif args.attack_method == "autozoom_bilin":
        blackbox_attack = AutoZOOM_BiLIN(model, args.dataset, args)
    elif args.attack_method == "autozoom_ae":
        blackbox_attack = AutoZOOM_AE(model, args["dataset"], args, decoder)
    target_str = "untargeted" if  args.attack_type!="targeted" else "targeted_{}".format(args.target_type)
    attack_framework = AutoZoomAttackFramework(args, test_loader)
    attack_framework.attack_dataset_images(args, blackbox_attack, arch, model,
            codec, l_test_classif_paths=l_test_classif_paths,
            adv_models=adv_models)
    model.cpu()
Пример #20
0
import time

n = 100
repetitions = 5
inter_s = [0, 0.05, 0.1, 0.2, 0.3]
intra_s = inter_s
num_cs = [2, 4, 8, 10, 16, 20, 40]
ksize = 23
imbalanced = True

inter_v = 1
intra_v = 0

refinement = 'indeg_guided'

cdc = Codec(0, 0.5, 20)
cdc.fast_search = False

s = Stats("/tmp/test.csv")

for repetition in range(repetitions):
    for inter in inter_s:
        for intra in intra_s:
            for num_c in num_cs:
                print(
                    pu.to_header(
                        f"r:{repetition+1}/{repetitions} n:{n} num_c:{num_c} inter:{inter} intra:{intra}"
                    ))
                tm = time.time()
                clusters = pd.random_clusters(n, num_c)
                G, GT, labeling = pd.custom_cluster_matrix(
Пример #21
0
try:
    GT = data['GT']
    labeling = data['labels']
except:
    print("[x] No GT found, No labeling found")

G = data['G']

n = G.shape[0]
repetitions = 1
refinement = 'indeg_guided'
ksize = 23

s = Stats(f"/tmp/{dset_name}.csv")
c = Codec(0.1, 0.4, 40)

for r in range(repetitions):
    print(f"### r={r} ###")

    tm = time.time()
    k, epsilon, classes, sze_idx, reg_list, nirr = c.compress(G, refinement)
    tcompression = time.time() - tm

    sze = c.decompress(G, 0, classes, k, reg_list)
    tdecompression = time.time() - tm

    fsze = c.post_decompression(sze, ksize)
    tpostdecompression = time.time() - tm

    red = c.reduced_matrix(G, k, epsilon, classes, reg_list)
for n in tqdm([500, 1000, 1500, 2000, 2500, 3000, 3500, 4000, 4500, 5000, 5500, 6000, 6500, 7000, 7500, 8000, 8500, 9000, 9500, 10000], desc="Dimension G"):

        # First graph
        tm = time.time()
        clusters1 = proc_d.random_clusters(n, 8)
        G, GT, labeling = proc_d.custom_cluster_matrix(n, clusters1, 0.3, 1, 0.2, 0)
        tgen = time.time() -tm
        #print(f"TIME: tgen:{tgen:.2f}")

        # Second graph
        clusters2 = proc_d.random_clusters(n, 8)
        G2, GT2, labeling2 = proc_d.custom_cluster_matrix(n, clusters2, 0.6, 1, 0.1, 0)


        # Compression G1
        c = Codec(0.285, 0.285, 1)
        c.verbose = False
        tm = time.time()
        k, epsilon, classes, sze_idx, reg_list, nirr = c.compress(G, 'indeg_guided')
        tcompression1 = time.time() - tm
        #print(f"TIME: tcompression1:{tcompression1:.2f}")

        # Compression G1
        c = Codec(0.285, 0.285, 1)
        c.verbose = False
        tm = time.time()
        k2, epsilon, classes2, sze_idx, reg_list2, nirr2 = c.compress(G2, 'indeg_guided')
        tcompression2 = time.time() - tm
        #print(f"TIME: tcompression2:{tcompression2:.2f}")       

        # Reduced graph
Пример #23
0
    GT = []
    labels = []

    try:
        GT = data['GT']
        labeling = data['labels']
    except:
        print("[x] No GT found, No labeling found")

    refinement = 'indeg_guided'
    ksize = 23

    s = Stats(f"/tmp/{dset_name}.csv")

    c = Codec(0, 0.5, 20)
    c.verbose = False

    #f = []
    #g = []
    #thresholds = np.arange(0,0.7, 0.05)

    thresholds = np.arange(0, 0.5, 0.05)
    for t in thresholds:
        print(f"### t={t} ###")
        #G = (data['G']*10**4)>(t*np.max(data['G']))
        G = data['G'] > 0

        n = G.shape[0]

        k, epsilon, classes, sze_idx, reg_list, nirr = c.compress(
Пример #24
0
    return s


if __name__ == "__main__":
    # GHU settings
    num_symbols = 3
    layer_sizes = {"rinp": 64, "rout": 64}
    hidden_size = 16
    plastic = []

    symbols = [str(a) for a in range(num_symbols)]
    pathways, associations = default_initializer(  # all to all
        layer_sizes.keys(), symbols)

    codec = Codec(layer_sizes, symbols, rho=.9)
    controller = Controller(layer_sizes, pathways, hidden_size, plastic)

    # Sanity check
    ghu = GatedHebbianUnit(layer_sizes,
                           pathways,
                           controller,
                           codec,
                           plastic=plastic)
    ghu.associate(associations)
    for p, s, t in associations:
        q, r = ghu.pathways[p]
        assert (codec.decode(q, tr.mv(ghu.W[p], codec.encode(r, s))) == t)

    # Initialize layers
    ghu.v[0]["rinp"] = codec.encode("rinp", "0")
Пример #25
0
##############################################
############## Main script code ##############
##############################################

refinement = 'indeg_guided'

vals = [0.1, 0.2, 0.5, 0.7]
first = True
n = 1000

# Query graph, the bigger one
clusters = pd.random_clusters(n, 8)
inter = 0.1
intra = 0.1
oG, oGT, olabeling = pd.custom_cluster_matrix(n, clusters, inter, 1, intra, 0)
c = Codec(0.15, 0.5, 5)
k, epsilon, classes, sze_idx, reg_list, nirr = c.compress(oG, refinement)
query = c.reduced_matrix(oG, k, epsilon, classes, reg_list)

# Database
c = Codec(0.2, 0.5, 5)
num_c = 8
for r in range(5):
    for i in vals:

        clusters = pd.random_clusters(n, num_c)
        G, GT, labeling = pd.custom_cluster_matrix(n, clusters, i, 1, i, 0)
        k, epsilon, classes, sze_idx, reg_list, nirr = c.compress(
            G, refinement)
        red = c.reduced_matrix(G, k, epsilon, classes, reg_list)
Пример #26
0
def test():
    minibatch_size = 100
    num_words = 40
    tuplesize = 5
    num_visible = tuplesize*num_words
    num_hidden = 140

    codec = Codec(tuplesize, num_words)
    tuples = codec.tuples
    words = codec.words
    encoded = codec.tuples_to_matrix()
    (num_data, _) = encoded.shape

    print(words)
    print('data count: ', num_data)

    rbm = RBM(num_visible = num_visible,
              num_hidden = num_hidden,
              minibatch_size = minibatch_size,
              venabledp=1.0,
              henabledp=0.7)
    id_indices = numpy.random.randint(low=0, high=num_data, size=minibatch_size)
    input_data = T.constant(encoded[id_indices])

    #print(input_data)
    
    #print(rbm.propup(input_data).eval())

    #h1samples = rbm.sample_h_given_v(input_data).eval()
    #print(h1samples)

    #print(rbm.propdown(h1samples).eval())

    #v2samples = rbm.sample_v_given_h(h1samples).eval()
    #print(v2samples)

    #(W,H,V) = rbm.contrastive_divergence_1(input_data)
    #print(W.eval())
    #print(H.eval())
    #print(V.eval())


    all_h_enabled = numpy.ones(num_hidden)
    all_v_enabled = numpy.ones(num_visible)

    xvis = T.fmatrix('xvis')
    h1samples = rbm.sample_h_given_v(xvis, all_h_enabled)
    v2samples = rbm.sample_v_given_h(h1samples, all_v_enabled)
    sample_vhv = theano.function([xvis], v2samples)

    example_indices = numpy.random.randint(low=0, high=num_data, size=minibatch_size)
    example_input_data = encoded[example_indices]
    num_examples = min(10, minibatch_size)
    def show_examples():
        rec = sample_vhv(example_input_data)
        for example in range(num_examples):
            print('input words:',
                  [(t+1, words[idx])
                   for t in range(tuplesize)
                   for idx in range(num_words)
                   if example_input_data[example, t*num_words + idx]])
            print('reconstructed words:',
                  [(t+1, words[idx])
                   for t in range(tuplesize)
                   for idx in range(num_words)
                   if rec[example, t*num_words + idx]])

    def report_hidden():
        weights = rbm.weights.get_value()
        for h in range(num_hidden):
            print('hidden ', h)
            for block in range(tuplesize):
                for word in range(num_words):
                    w = weights[block*num_words+word, h]
                    if w > 0.5:
                        print('   %2i %8s  %4.1f' % (block, words[word], w))
        
    vis = T.fmatrix('vis')
    train = rbm.cd1_fun(vis)

    draw = VisualizeWeights('Dropout (vp:%4.2f, hp:%4.2f)' % (rbm.venabledp, rbm.henabledp),
                            rbm, tuplesize, words, num_hidden,
                            num_visible)
    for epoch in range(1000):
        show_examples()
        all_vdiffs = numpy.zeros(num_visible)
        print('epoch ', epoch)
        numpy.random.shuffle(encoded)
        for minibatch in range(num_data // minibatch_size):
            mb_start = minibatch * minibatch_size;
            mb_end = mb_start + minibatch_size;
            input_data_indices = numpy.arange(mb_start, mb_end)
            encoded_input = encoded[input_data_indices]
            input_data = encoded_input
            (vdiffs, venabled, henabled) = train(input_data)
            all_vdiffs = all_vdiffs + numpy.abs(vdiffs)
            #print('venabled', venabled)
            #print('henabled', henabled)
        print('reconstruction error: ', numpy.sum(all_vdiffs) * minibatch_size)
        #print(numpy.ndarray.astype(rbm.weights.get_value()*100, numpy.int32))
        #print(numpy.ndarray.astype(rbm.vbias.get_value()*100, numpy.int32))
        #print(numpy.ndarray.astype(rbm.hbias.get_value()*100, numpy.int32))
        draw.epoch_finished(epoch)
        report_hidden()
Пример #27
0
    # plastic = ["%s<m"%q for q in register_names]
    plastic = ["rinp<m"]

    symbols = [str(a) for a in range(num_addresses)]
    pathways, associations = turing_initializer(register_names, num_addresses)

    # constrain pathways inductive bias
    remove_pathways = ["rout<m", "m<rout"]
    for p in remove_pathways:
        pathways.pop(p)
    associations = list(
        filter(lambda x: x[0] not in remove_pathways, associations))
    # print(pathways)
    # print(associations)

    codec = Codec(layer_sizes, symbols, rho=.999)
    controller = Controller(layer_sizes, pathways, hidden_size, plastic)
    # controller = Controller(layer_sizes, pathways, hidden_size, input_keys=["m","rinp"])
    # controller.rnn.weight_hh_l0.data = 1*tr.eye(hidden_size) # favor repeated actions

    # Sanity check
    ghu = GatedHebbianUnit(layer_sizes,
                           pathways,
                           controller,
                           codec,
                           plastic=plastic)
    ghu.associate(associations)
    for p, s, t in associations:
        q, r = ghu.pathways[p]
        assert (codec.decode(q, tr.mv(ghu.W[p], codec.encode(r, s))) == t)
    ghu_init = ghu
class FIXConnectionHandler(object):
    def __init__(self, engine, protocol, sock=None, addr=None, observer=None):
        self.codec = Codec(protocol)
        self.engine = engine
        self.connectionState = ConnectionState.CONNECTED
        self.session = None
        self.addr = addr
        self.observer = observer
        self.msgBuffer = b''
        self.heartbeatPeriod = 30.0
        self.msgHandlers = []
        self.sock = sock
        self.heartbeatTimerRegistration = None
        self.expectedHeartbeatRegistration = None
        self.socketEvent = FileDescriptorEventRegistration(
            self.handle_read, sock, EventType.READ)
        self.engine.eventManager.registerHandler(self.socketEvent)

    def address(self):
        return self.addr

    def disconnect(self):
        self.handle_close()

    def _notifyMessageObservers(self, msg, direction, persistMessage=True):
        if persistMessage is True:
            self.engine.journaller.persistMsg(msg, self.session, direction)
        for handler in filter(
                lambda x: (x[1] is None or x[1] == direction) and
            (x[2] is None or x[2] == msg.msgType), self.msgHandlers):
            handler[0](self, msg)

    def addMessageHandler(self, handler, direction=None, msgType=None):
        self.msgHandlers.append((handler, direction, msgType))

    def removeMessageHandler(self, handler, direction=None, msgType=None):
        remove = filter(
            lambda x: x[0] == handler and
            (x[1] == direction or direction is None) and
            (x[2] == msgType or msgType is None), self.msgHandlers)
        for h in remove:
            self.msgHandlers.remove(h)

    def _sendHeartbeat(self):
        self.sendMsg(self.codec.protocol.messages.Messages.heartbeat())

    def _expectedHeartbeat(self, type, closure):
        logging.warning("Expected heartbeat from peer %s" %
                        (self.expectedHeartbeatRegistration, ))
        self.sendMsg(self.codec.protocol.messages.Messages.test_request())

    def registerLoggedIn(self):
        self.heartbeatTimerRegistration = TimerEventRegistration(
            lambda type, closure: self._sendHeartbeat(), self.heartbeatPeriod)
        self.engine.eventManager.registerHandler(
            self.heartbeatTimerRegistration)
        # register timeout for 10% more than we expect
        self.expectedHeartbeatRegistration = TimerEventRegistration(
            self._expectedHeartbeat, self.heartbeatPeriod * 1.10)
        self.engine.eventManager.registerHandler(
            self.expectedHeartbeatRegistration)

    def registerLoggedOut(self):
        if self.heartbeatTimerRegistration is not None:
            self.engine.eventManager.unregisterHandler(
                self.heartbeatTimerRegistration)
            self.heartbeatTimerRegistration = None
        if self.expectedHeartbeatRegistration is not None:
            self.engine.eventManager.unregisterHandler(
                self.expectedHeartbeatRegistration)
            self.expectedHeartbeatRegistration = None

    def _handleResendRequest(self, msg):
        protocol = self.codec.protocol
        responses = []

        beginSeqNo = msg[protocol.fixtags.BeginSeqNo]
        endSeqNo = msg[protocol.fixtags.EndSeqNo]
        if int(endSeqNo) == 0:
            endSeqNo = sys.maxsize
        logging.info("Received resent request from %s to %s", beginSeqNo,
                     endSeqNo)
        replayMsgs = self.engine.journaller.recoverMsgs(
            self.session, MessageDirection.OUTBOUND, beginSeqNo, endSeqNo)
        gapFillBegin = int(beginSeqNo)
        gapFillEnd = int(beginSeqNo)
        for replayMsg in replayMsgs:
            msgSeqNum = int(replayMsg[protocol.fixtags.MsgSeqNum])
            if replayMsg[protocol.fixtags.
                         MsgType] in protocol.msgtype.sessionMessageTypes:
                gapFillEnd = msgSeqNum + 1
            else:
                if self.engine.shouldResendMessage(self.session, replayMsg):
                    if gapFillBegin < gapFillEnd:
                        # we need to send a gap fill message
                        gapFillMsg = FIXMessage(protocol.msgtype.SEQUENCERESET)
                        gapFillMsg.setField(protocol.fixtags.GapFillFlag, 'Y')
                        gapFillMsg.setField(protocol.fixtags.MsgSeqNum,
                                            gapFillBegin)
                        gapFillMsg.setField(protocol.fixtags.NewSeqNo,
                                            str(gapFillEnd))
                        responses.append(gapFillMsg)

                    # and then resent the replayMsg
                    replayMsg.removeField(protocol.fixtags.BeginString)
                    replayMsg.removeField(protocol.fixtags.BodyLength)
                    replayMsg.removeField(protocol.fixtags.SendingTime)
                    replayMsg.removeField(protocol.fixtags.SenderCompID)
                    replayMsg.removeField(protocol.fixtags.TargetCompID)
                    replayMsg.removeField(protocol.fixtags.CheckSum)
                    replayMsg.setField(protocol.fixtags.PossDupFlag, "Y")
                    responses.append(replayMsg)

                    gapFillBegin = msgSeqNum + 1
                else:
                    gapFillEnd = msgSeqNum + 1
                    responses.append(replayMsg)

        if gapFillBegin < gapFillEnd:
            # we need to send a gap fill message
            gapFillMsg = FIXMessage(protocol.msgtype.SEQUENCERESET)
            gapFillMsg.setField(protocol.fixtags.GapFillFlag, 'Y')
            gapFillMsg.setField(protocol.fixtags.MsgSeqNum, gapFillBegin)
            gapFillMsg.setField(protocol.fixtags.NewSeqNo, str(gapFillEnd))
            responses.append(gapFillMsg)

        return responses

    def handle_read(self, type, closure):
        protocol = self.codec.protocol
        try:
            msg = self.sock.recv(8192)
            if msg:
                self.msgBuffer = self.msgBuffer + msg
                (decodedMsg, parsedLength) = self.codec.decode(self.msgBuffer)
                self.msgBuffer = self.msgBuffer[parsedLength:]
                while decodedMsg is not None and self.connectionState != ConnectionState.DISCONNECTED:
                    self.processMessage(decodedMsg)
                    (decodedMsg,
                     parsedLength) = self.codec.decode(self.msgBuffer)
                    self.msgBuffer = self.msgBuffer[parsedLength:]
                if self.expectedHeartbeatRegistration is not None:
                    self.expectedHeartbeatRegistration.reset()
            else:
                logging.debug("Connection has been closed")
                self.disconnect()
        except ConnectionError as why:
            logging.debug("Connection has been closed %s" % (why, ))
            self.disconnect()

    def handleSessionMessage(self, msg):
        return -1

    def processMessage(self, decodedMsg):
        protocol = self.codec.protocol

        beginString = decodedMsg[protocol.fixtags.BeginString]
        if beginString != protocol.beginstring:
            logging.warning(
                "FIX BeginString is incorrect (expected: %s received: %s)",
                (protocol.beginstring, beginString))
            self.disconnect()
            return

        msgType = decodedMsg[protocol.fixtags.MsgType]

        try:
            responses = []
            if msgType in protocol.msgtype.sessionMessageTypes:
                (recvSeqNo, responses) = self.handleSessionMessage(decodedMsg)
            else:
                recvSeqNo = decodedMsg[protocol.fixtags.MsgSeqNum]

            # validate the seq number
            (seqNoState,
             lastKnownSeqNo) = self.session.validateRecvSeqNo(recvSeqNo)

            if seqNoState is False:
                # We should send a resend request
                logging.info("Requesting resend of messages: %s to %s" %
                             (lastKnownSeqNo, 0))
                responses.append(
                    protocol.messages.Messages.resend_request(
                        lastKnownSeqNo, 0))
                # we still need to notify if we are processing Logon message
                if msgType == protocol.msgtype.LOGON:
                    self._notifyMessageObservers(decodedMsg,
                                                 MessageDirection.INBOUND,
                                                 False)
            else:
                self.session.setRecvSeqNo(recvSeqNo)
                self._notifyMessageObservers(decodedMsg,
                                             MessageDirection.INBOUND)

            for m in responses:
                self.sendMsg(m)

        except SessionWarning as sw:
            logging.warning(sw)
        except SessionError as se:
            logging.error(se)
            self.disconnect()
        except DuplicateSeqNoError:
            try:
                if decodedMsg[protocol.fixtags.PossDupFlag] == "Y":
                    logging.debug(
                        "Received duplicate message with PossDupFlag set")
            except KeyError:
                pass
            finally:
                logging.error(
                    "Failed to process message with duplicate seq no (MsgSeqNum: %s) (and no PossDupFlag='Y') - disconnecting"
                    % (recvSeqNo, ))
                self.disconnect()

    def handle_close(self):
        if self.connectionState != ConnectionState.DISCONNECTED:
            logging.info("Client disconnected")
            self.registerLoggedOut()
            self.sock.close()
            self.connectionState = ConnectionState.DISCONNECTED
            self.msgHandlers.clear()
            if self.observer is not None:
                self.observer.notifyDisconnect(self)
            self.engine.eventManager.unregisterHandler(self.socketEvent)

    def sendMsg(self, msg):
        if self.connectionState != ConnectionState.CONNECTED and self.connectionState != ConnectionState.LOGGED_IN:
            raise FIXException(FIXException.FIXExceptionReason.NOT_CONNECTED)

        encodedMsg = self.codec.encode(msg, self.session).encode('utf-8')
        self.sock.send(encodedMsg)
        if self.heartbeatTimerRegistration is not None:
            self.heartbeatTimerRegistration.reset()

        decodedMsg, junk = self.codec.decode(encodedMsg)

        try:
            self._notifyMessageObservers(decodedMsg, MessageDirection.OUTBOUND)
        except DuplicateSeqNoError:
            logging.error(
                "We have sent a message with a duplicate seq no, failed to persist it (MsgSeqNum: %s)"
                % (decodedMsg[self.codec.protocol.fixtags.MsgSeqNum]))
Пример #29
0
if __name__ == "__main__":
    print("*******************************************************")

    # Configuration
    num_symbols = 4
    layer_sizes = {"rinp": 64, "rout": 64, "rtmp": 64}
    hidden_size = 32
    rho = .99
    plastic = []
    num_episodes = 1000

    # Setup GHU
    symbols = [str(a) for a in range(num_symbols)]
    pathways, associations = default_initializer(  # all to all
        layer_sizes.keys(), symbols)
    codec = Codec(layer_sizes, symbols, rho=rho)
    controller = Controller(layer_sizes, pathways, hidden_size, plastic)
    ghu = GatedHebbianUnit(layer_sizes,
                           pathways,
                           controller,
                           codec,
                           batch_size=num_episodes,
                           plastic=plastic)
    ghu.associate(associations)

    # Initialize layers
    separator = "0"
    ghu.fill_layers(separator)

    # training example generation
    def training_example():
Пример #30
0
def reverse_trial(num_episodes, save_file):

    # Configuration
    register_names = ["rinp", "rout"]
    layer_sizes = {q: 64 for q in register_names + ["m"]}
    hidden_size = 64
    rho = .99
    plastic = ["rout<m"]
    # remove_pathways = ["rinp<rout", "m<rinp", "m<rout", "rout<m"]
    remove_pathways = ["rinp<rout", "rinp<m", "m<rinp", "m<rout"]
    # remove_pathways = []
    input_keys = None

    # Setup GHU
    num_addresses = 4
    symbols = [str(a) for a in range(num_addresses)]
    pathways, associations = turing_initializer(register_names, num_addresses)
    for p in remove_pathways:
        pathways.pop(p)
    associations = list(
        filter(lambda x: x[0] not in remove_pathways, associations))
    codec = Codec(layer_sizes, symbols, rho=rho, ortho=True)
    controller = Controller(layer_sizes,
                            pathways,
                            hidden_size,
                            plastic,
                            input_keys=input_keys,
                            nonlinearity='relu')
    # controller = Controller(layer_sizes, pathways, hidden_size, plastic, nonlinearity='tanh')
    ghu = GatedHebbianUnit(layer_sizes,
                           pathways,
                           controller,
                           codec,
                           batch_size=num_episodes,
                           plastic=plastic)
    ghu.associate(associations)

    # Initialize layers
    separator = "0"
    ghu.fill_layers(separator)

    # Dataset of all possible input lists
    min_length = 4
    max_length = 4
    episode_duration = 2 * max_length
    all_inputs = [
        np.array(inputs + (separator, ))
        for list_length in range(min_length, max_length + 1)
        for inputs in it.product(symbols[1:], repeat=list_length)
    ]
    split = int(.80 * len(all_inputs))

    # example generation
    def example(dataset):
        inputs = dataset[np.random.randint(len(dataset))]
        targets = inputs[:-1][::-1]
        return inputs, targets

    def training_example():
        return example(all_inputs[:split])

    def testing_example():
        return example(all_inputs[split:])

    # all or nothing reward calculation
    def reward(ghu, targets, outputs):
        r = np.zeros(len(outputs))
        outputs = outputs[len(targets) - 1:]
        outputs = np.array([out for out in outputs if out != separator])
        if len(outputs) == len(targets): r[-1] = (outputs == targets).all()
        return r

    # ################### Sanity check
    correct_choices = [
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<rinp",
            "m": "inc-m"
        }, []),
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<rinp",
            "m": "inc-m"
        }, ["rout<m"]),
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<rinp",
            "m": "inc-m"
        }, ["rout<m"]),
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<rinp",
            "m": "inc-m"
        }, ["rout<m"]),
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<rout",
            "m": "dec-m"
        }, []),
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<m",
            "m": "dec-m"
        }, []),
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<m",
            "m": "dec-m"
        }, []),
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<m",
            "m": "dec-m"
        }, []),
    ]
    # ################### Sanity check

    # Run optimization
    avg_rewards, _, grad_norms = reinforce(
        ghu,
        num_epochs=500,
        episode_duration=episode_duration,
        training_example=training_example,
        # testing_example = testing_example,
        testing_example=None,
        reward=reward,
        task="reverse",
        learning_rate=.1,
        # line_search_iterations = 5,
        # distribution_cap = .1,
        # likelihood_cap = .7,
        distribution_variance_coefficient=0.05,
        # choices = correct_choices, # perfect reward with this
        verbose=1,
        save_file=save_file)

    # Assess generalization similarly after run
    print("Cloning GHU for generalization...")
    ghu_gen = ghu.clone()
    print("Sampling problem instances...")
    inputs, targets = zip(
        *[testing_example() for b in range(ghu_gen.batch_size)])
    print("Running on test data...")
    outputs, rewards = ghu_gen.run(episode_duration,
                                   inputs,
                                   targets,
                                   reward,
                                   verbose=1)
    R_gen = rewards.sum(axis=1)

    # Overwrite file dump with R_gen in place of avg_general
    with open(save_file, "rb") as f:
        result = pk.load(f)
    result = list(result)
    result[2] = R_gen
    result = tuple(result)
    with open(save_file, "wb") as f:
        pk.dump(result, f)
Пример #31
0
def test():
    num_words = 20
    tuplesize = 3
    num_visible = tuplesize*num_words
    num_hidden = 30
    codec = Codec(tuplesize, num_words)
    tuples = codec.tuples
    words = codec.words
    encoded = codec.tuples_to_matrix()
    (num_data, _) = encoded.shape
    print('data count: ', num_data)

    rbm = RBM1(num_visible = num_visible,
               num_hidden = num_hidden)
    input_data = T.constant(encoded[2])
    #print(pairs[2])
    #print(encoded[2])

    #print(input_data)
    
    #print(rbm.propup(input_data).eval())

    h1samples = rbm.sample_h_given_v(input_data).eval()
    #print(h1samples)

    #print(rbm.propdown(h1samples).eval())

    v2samples = rbm.sample_v_given_h(h1samples).eval()
    #print(v2samples)

    (W,H,V) = rbm.contrastive_divergence_1(input_data)
    #print(W.eval())

    

    xvis = T.fvector('xvis')
    h1samples = rbm.sample_h_given_v(xvis)
    v2samples = rbm.sample_v_given_h(h1samples)
    sample_vhv = theano.function([xvis], v2samples)

    num_examples = 20
    example_indices = numpy.random.randint(low=0, high=num_data, size=num_examples)
    def show_examples():
        for example in example_indices:
            dat = encoded[example]
            v2samples = sample_vhv(dat)
            print('input words:',
                  [(t+1, words[idx])
                   for t in range(tuplesize)
                   for idx in range(num_words)
                   if encoded[example, t*num_words + idx]])
            print('reconstructed words:',
                  [(t+1, words[idx])
                   for t in range(tuplesize)
                   for idx in range(num_words)
                   if v2samples[t*num_words + idx]])
            print('')
    def report_hidden():
        weights = rbm.weights.get_value()
        for h in range(num_hidden):
            print('hidden ', h)
            for block in range(tuplesize):
                for word in range(num_words):
                    w = weights[block*num_words+word, h]
                    if w > 0.5:
                        print('   %2i %8s  %4.1f' % (block, words[word], w))


    vis = T.fvector('vis')
    train = rbm.cd1_fun(vis)
    input_data = numpy.reshape(encoded[2],
                               num_visible)
    train(input_data)
    print(rbm.weights.get_value())

    draw = VisualizeWeights('', rbm, tuplesize, words, num_hidden)

    for epoch in range(500):
        show_examples()
        all_vdiffs = numpy.zeros(num_visible)
        print('epoch ', epoch)
        for i in range(num_data):
            input_data = numpy.reshape(encoded[i],
                                       num_visible)
            vdiffs = train(input_data)
            all_vdiffs = all_vdiffs + numpy.abs(vdiffs)
        print('reconstruction error: ', numpy.sum(all_vdiffs))
        print(T.cast(rbm.weights.get_value()*100, 'int32').eval())
        draw.epoch_finished(epoch)
        report_hidden()
Пример #32
0
file_handler_except = log.FileHandler(sigs.except_path)
file_handler_except.setLevel(log.ERROR)
file_handler_except.setFormatter(formatter_except)
logger_except.addHandler(file_handler_except)
# </editor-fold>

# <editor-fold desc="globals"
BP = g.BREAK_POINT
SET = g.OP_SET

PUZZLE_DEFAULT =\
'.....627........5....54..311.....5..97.8.2.64..6.....764..29....8........526.....'
# </editor-fold>

codec = Codec()
encoded_puzzle = ''
solved_puzzle = ''


def load(cb, puz=PUZZLE_DEFAULT):
    try:
        global codec
        global encoded_puzzle
        global solved_puzzle

        if sigs.is_grid:
            puzzle_list = puz
            num_list = [
                item if len(item) == 1 else '.' for item in puzzle_list
            ]
Пример #33
0
def swap_trial(distribution_variance_coefficient, save_file):

    # Configuration
    num_symbols = 10
    layer_sizes = {"rinp": 32, "rout": 32, "rtmp": 32}
    hidden_size = 32
    rho = .99
    plastic = []
    num_episodes = 100

    # Setup GHU
    symbols = [str(a) for a in range(num_symbols)]
    pathways, associations = default_initializer(  # all to all
        layer_sizes.keys(), symbols)
    codec = Codec(layer_sizes, symbols, rho=rho, ortho=True)
    controller = Controller(layer_sizes, pathways, hidden_size, plastic)
    ghu = GatedHebbianUnit(layer_sizes,
                           pathways,
                           controller,
                           codec,
                           batch_size=num_episodes,
                           plastic=plastic)
    ghu.associate(associations)

    # Initialize layers
    separator = "0"
    ghu.fill_layers(separator)

    # Generate dataset
    all_inputs = list(it.permutations(symbols[1:], 2))
    split = int(.80 * len(all_inputs))

    # example generation
    def example(dataset):
        # Randomly choose echo symbol (excluding 0 separator)
        inputs = dataset[np.random.randint(len(dataset))]
        targets = inputs[::-1]
        return inputs, targets

    def training_example():
        return example(all_inputs[:split])

    def testing_example():
        return example(all_inputs[split:])

    # all or nothing reward
    def reward(ghu, targets, outputs):
        r = np.zeros(len(outputs))
        outputs = np.array([out for out in outputs[1:] if out != separator])
        if len(outputs) == len(targets): r[-1] = (targets == outputs).all()
        return r

    # Run optimization
    avg_rewards, avg_general, grad_norms = reinforce(
        ghu,
        num_epochs=100,
        episode_duration=3,
        training_example=training_example,
        testing_example=testing_example,
        reward=reward,
        task="swap",
        learning_rate=.1,
        distribution_variance_coefficient=distribution_variance_coefficient,
        verbose=1,
        save_file=save_file)
Пример #34
0
def trials(i, avgrew, avggen, gradnorm):
    print("***************************** Trial ", str(i + 1),
          " *******************************")

    # Configuration
    num_symbols = 10
    layer_sizes = {"rinp": 32, "rout": 32, "rtemp": 32}
    hidden_size = 32
    rho = .99
    plastic = []
    num_episodes = 500

    # Setup GHU
    symbols = [str(a) for a in range(num_symbols)]
    pathways, associations = default_initializer(  # all to all
        layer_sizes.keys(), symbols)
    codec = Codec(layer_sizes, symbols, rho=rho, ortho=True)
    controller = Controller(layer_sizes, pathways, hidden_size, plastic)
    ghu = GatedHebbianUnit(layer_sizes,
                           pathways,
                           controller,
                           codec,
                           plastic=plastic,
                           batch_size=num_episodes)
    ghu.associate(associations)

    # Initialize layers
    separator = symbols[0]
    ghu.fill_layers(separator)

    # Generate dataset
    input_length = 5
    all_inputs = [
        np.array(inputs)
        for inputs in it.product(symbols[1:], repeat=input_length)
    ]
    split = int(.80 * len(all_inputs))

    # example generation
    def example(dataset):
        inputs = dataset[np.random.randint(len(dataset))]
        targets = np.array([max(inputs)])
        return inputs, targets

    def training_example():
        return example(all_inputs[:split])

    def testing_example():
        return example(all_inputs[split:])

    # all or nothing reward
    def reward(ghu, targets, outputs):
        r = np.zeros(len(outputs))
        r[-1] = (outputs[-1] == targets[0])
        return r

    # Optimization settings
    avg_rewards, avg_general, grad_norms = reinforce(
        ghu,
        num_epochs=100,
        episode_duration=input_length,
        training_example=training_example,
        testing_example=testing_example,
        reward=reward,
        task="max",
        learning_rate=.1,
        verbose=1)

    gradnorm[i + 1] = grad_norms.tolist()
    avgrew[i + 1] = avg_rewards.tolist()
    avggen[i + 1] = avg_general.tolist()