示例#1
0
def main():
    with open(CONFIG_FN) as f:
        conf = json.load(f)

    global sent_groups

    with open(conf["sent_groups"]) as f:
        sent_groups = json.load(f)["groups"]

    kb = load_kb(conf["kb"], 'name')
    sys_vocab, sys_word2idx = load_sys_vocab(conf["sys_vocab"])

    sys_codec = Codec(sys_vocab, sys_word2idx)

    onto, onto_idx = load_ontology(conf["ontology"])

    word2idx, embed = load_embed(**conf)

    usr_codec = Codec([], word2idx)

    trk_model, slot_len_sum = load_tracker_model(onto, embed, conf, kb)

    trk_model.eval()

    hidden = trk_model.state_tracker.init_hidden()
    kb_vec = Variable(torch.zeros(1, conf["kb_indicator_len"]))

    sentence_generator = SentenceGenerator(kb, onto, sent_groups)

    for line in iter(sys.stdin.readline, ''):
        inp = usr_codec.encode(line.strip())

        inp = Variable(torch.LongTensor([
            inp,
        ]))

        sentvecs, states_reps, states_preds, hidden, sent_grp_preds = trk_model(
            inp, None, hidden)

        criteria = to_search_criteria(states_preds, onto)
        ret, kb_vec = get_kb_result(kb, criteria, conf["kb_indicator_len"])

        # print criteria, kb_vec

        sentvecs = sentvecs.view(1, -1)
        states_reps = states_reps.view(1, -1)

        print_ret(states_preds, sent_grp_preds, onto, sentence_generator)
示例#2
0
def comparaison():
    text = 'Nous sommes en guerre. Aussi, comme je vous l’ai dit jeudi, pour nous protéger et contenir la dissémination du virus, mais aussi préserver nos systèmes de soins, nous avons pris ce matin, entre Européens, une décision commune. Dès demain midi, les frontières à l’entrée de l’Union européenne et de l’espace Schengen seront fermées z w . Concrètement, tous les voyages entre les pays non européens et l’Union européenne seront suspendus pendant trente jours. Les Françaises et les Français qui sont actuellement à l’étranger et souhaitent rentrer pourront bien entendu rejoindre leur pays. Nous devons prendre cette décision parce que je vous demande ce soir d’importants efforts et que nous devons, dans la durée, nous protéger. Et je veux dire à tous nos compatriotes qui vivent à l’étranger que là aussi, en bon ordre, ils doivent se rapprocher des ambassades et consulats et que nous organiserons pour celles et ceux qui le souhaitent, et là où c’est nécessaire, le rapatriement. ? ok '.lower(
    )
    # Désolé pour le texte en ligne c'est aps très PEP-8 friendly
    builder = TreeBuilder(text)
    binary_tree = builder.tree()
    codec = Codec(binary_tree)

    texte_french = 'Mes chers compatriotes, alors que je vous parle, les résultats connus nous montrent que vous avez décidé de me confier la plus haute charge de l Etat. J exprime ma profonde gratitude à toutes celles et à tous ceux qui m ont accordé leur confiance et je salue tous les autres avec respect. Mes chers compatriotes, je serai le Président de tous les français'.lower(
    )
    texte_allemand = 'Die Stadt Paris ist fur viele ein Traumziel, das sie wenigstens einmal im Leben besuchen mochten. Wer mochte nicht einmal unter dem Eiffelturm stehen und in den Pariser Himmel hoch schauen? Und dann zu Fuss und mit dem Aufzug wenigstens auf halbe Hohe gelangen und den Blick uber die ganze franzosische Hauptstadt geniessen?'.lower(
    )
    compress_fr, compress_de = [], []
    for i in range(max(len(texte_french), len(texte_allemand))):
        texte_fr = texte_french[:i]  # Création d'un texte
        texte_de = texte_allemand[:i]
        taille_fr = sys.getsizeof(texte_fr)  # Espace système de ce texte
        taille_de = sys.getsizeof(texte_de)
        compress_fr.append(taille_fr /
                           sys.getsizeof(codec.encode_bin(texte_fr)))
        compress_de.append(taille_de /
                           sys.getsizeof(codec.encode_bin(texte_de)))
    plt.close()
    plt.title("Comparaison de l'effet de la langue sur la taux de compression")
    plt.plot(compress_fr,
             label=" facteur de compression du texte en français ")
    plt.plot(compress_de, label=" facteur de compression du texte en allemand")
    plt.legend()
    plt.show()
示例#3
0
    def __init__(self):
        from codec import Codec
        from window import Window
        from system import System
        from datajar import DataJar
        from filesystem import FileSystem

        try:
            manifest = json.load(codecs.open('manifest.json', 'r', 'utf-8'))
        except:
            manifest = {}

        for key in assets.manifest:
            if key in manifest:
                assets.manifest[key] = manifest[key]

        self.app = QApplication(sys.argv)
        self.app.setApplicationName(assets.manifest['name'])
        self.app.setApplicationVersion(assets.manifest['version'])

        assets.sys = System()
        assets.codec = Codec()
        assets.fs = FileSystem()
        assets.dataJar = DataJar()

        translator = QTranslator()
        if translator.load("zh_CN.qm"):
            self.app.installTranslator(translator)

        self.window = Window(None, assets.manifest['path'] + 'index.html')

        sys.exit(self.app.exec_())
示例#4
0
文件: cli.py 项目: fidonium/s0ngbrew
def cli():
    """\
    Codec CLI for instant usage: Read and write.
    """
    argv = parse_args()
    c = Codec(**vars(argv))
    c.run()
示例#5
0
def check_add_codec(ip):
    net = get_codec_by_ip(ip)
    if net:
        return net

    net = Codec(ip)
    audio_codecs.append(net)
    return net
示例#6
0
 def find_codec(self, name):
     cmdstr = "adb shell find " + self.path + " -maxdepth 1"
     #print cmdstr
     result = os.popen(cmdstr)
     ret = result.read()
     lines = ret.split('\n')
     #self.codecs = codecs
     for i in range(len(lines)):
         if (-1 != lines[i].find(name)):
             #print lines[i]
             codec = Codec(name, lines[i])
             codec.find_widgets()
             #print codec
             self.codecs.append(codec)
示例#7
0
def main():
    text = "a dead dad ceded a bad babe a beaded abaca bed"

    # on analyse les fréquences d'occurrence dans text
    # pour fabriquer un arbre binaire
    builder = TreeBuilder(text)
    binary_tree = builder.tree()

    # on passe l'arbre binaire à un encodeur/décodeur
    codec = Codec(binary_tree)
    # qui permet d'encoder
    encoded = codec.encode_bin(text)
    # et de décoder
    decoded = codec.decode_bin(encoded)
    # si cette assertion est fausse il y a un gros problème avec le code
    assert text == decoded

    # on affiche le résultat
    print(f"{text}\n{encoded}")
    if decoded != text:
        print("OOPS")
    text = 'ab'
    compress_encode = []
    compress_huffman = []
    compress_binary = []

    for rang in range(300):

        texte = text * rang  # Création d'un texte
        taille = sys.getsizeof(texte)  # Espace système de ce texte
        compress_encode.append(taille / sys.getsizeof(codec.encode(texte)))
        compress_huffman.append(taille /
                                sys.getsizeof(codec.encode_bin(texte)))
        compress_binary.append(
            sys.getsizeof(codec.encode(texte)) /
            sys.getsizeof(codec.encode_bin(texte)))

    # Affichage
    plt.close()
    plt.title('Facteurs de compression')
    plt.plot(compress_encode,
             label="Facteur de compression par \n  Codec.encode sortie str")
    plt.plot(
        compress_huffman,
        label=
        "Facteur de compression de \n l'algorithme de Huffman (Avec Binaire)")
    plt.plot(compress_binary,
             label="Facteur de compression \n d'une str encoded  en binaire ")
    plt.legend()
    plt.show()
示例#8
0
 def __init__(self, engine, protocol, sock=None, addr=None, observer=None, auth=None):
     self.codec = Codec(protocol, auth)
     self.engine = engine
     self.connectionState = ConnectionState.CONNECTED
     self.session = None
     self.addr = addr
     self.observer = observer
     self.msgBuffer = b''
     self.heartbeatPeriod = 30.0
     self.msgHandlers = []
     self.sock = sock
     self.heartbeatTimerRegistration = None
     self.expectedHeartbeatRegistration = None
     self.socketEvent = FileDescriptorEventRegistration(self.handle_read, sock, EventType.READ)
     self.engine.eventManager.registerHandler(self.socketEvent)
示例#9
0
文件: swap.py 项目: garrettkatz/ghu
if __name__ == "__main__":
    print("*******************************************************")

    # Configuration
    num_symbols = 4
    layer_sizes = {"rinp": 64, "rout": 64, "rtmp": 64}
    hidden_size = 32
    rho = .99
    plastic = []
    num_episodes = 1000

    # Setup GHU
    symbols = [str(a) for a in range(num_symbols)]
    pathways, associations = default_initializer(  # all to all
        layer_sizes.keys(), symbols)
    codec = Codec(layer_sizes, symbols, rho=rho)
    controller = Controller(layer_sizes, pathways, hidden_size, plastic)
    ghu = GatedHebbianUnit(layer_sizes,
                           pathways,
                           controller,
                           codec,
                           batch_size=num_episodes,
                           plastic=plastic)
    ghu.associate(associations)

    # Initialize layers
    separator = "0"
    ghu.fill_layers(separator)

    # training example generation
    def training_example():
示例#10
0
from codec import TreeBuilder, Codec

text = "a dead dad ceded a bad babe a beaded abaca bed"

builder = TreeBuilder(text)
binary_tree = builder.tree()

# on passe l'arbre binaire à un encodeur/décodeur
codec = Codec(binary_tree)
# qui permet d'encoder
encoded = codec.encode(text)
# et de décoder
decoded = codec.decode(encoded)
# si cette assertion est fausse il y a un gros problème avec le code

# on affiche le résultat
print(f"{text}\n{decoded}")
if decoded != text:
    print("OOPS")
示例#11
0
文件: ghu.py 项目: garrettkatz/ghu
    pathways.update({k: ("m", "m") for k in ["inc-m", "dec-m"]})
    associations += [(k, str(a), str((a + x) % num_addresses))
                     for k, x in [("inc-m", 1), ("dec-m", -1)]
                     for a in range(num_addresses)]

    return pathways, associations


if __name__ == "__main__":

    layer_sizes = {"r0": 3, "r1": 3}
    pathways = {0: ("r0", "r0"), 1: ("r1", "r0"), 2: ("r1", "r1")}
    hidden_size = 5
    batch_size = 2

    codec = Codec(layer_sizes, "01")
    controller = Controller(layer_sizes, pathways, hidden_size)
    ghu = GatedHebbianUnit(layer_sizes,
                           pathways,
                           controller,
                           codec,
                           batch_size=batch_size)

    ghu.associate([
        (0, "0", "0"),
        (1, "0", "0"),
        (2, "0", "0"),
        (0, "1", "1"),
        (1, "1", "1"),
        (2, "1", "1"),
    ])
示例#12
0
def main():
    with open(CONFIG_FN) as f:
        conf = json.load(f)

    args = parse_args()

    kb = load_kb(conf["kb"], 'name')
    sys_vocab, sys_word2idx = load_sys_vocab(conf["sys_vocab"])

    sys_codec = Codec(sys_vocab, sys_word2idx)

    onto, onto_idx = load_ontology(conf["ontology"])

    word2idx, embed = load_embed(**conf)

    usr_codec = Codec([], word2idx)

    trk_model, slot_len_sum = load_tracker_model(onto, embed, conf)
    cond_net, generator = load_generator_model(conf, args.cond_net,
                                               args.gen_net, slot_len_sum,
                                               len(sys_vocab))

    cond_net.eval()
    generator.eval()

    trk_model.eval()

    hidden = trk_model.state_tracker.init_hidden()
    kb_found = Variable(torch.zeros(1, conf["kb_indicator_len"]))

    def gen_sent(cond):
        '''train one sentence'''
        hidden = generator.init_hidden()
        inp = Variable(torch.LongTensor([[sys_word2idx['<sos>']]]))

        sent_out = []

        for i in range(MAX_SENT_LEN):
            out, hidden = generator(inp, cond, hidden)

            topv, topi = out.data.topk(1)
            out_word = int(topi[0][0])

            if out_word == sys_word2idx['<eos>']:
                break
            inp = Variable(torch.LongTensor([[out_word]]))

            sent_out.append(out_word)

        return sys_codec.decode(sent_out)

    for line in iter(sys.stdin.readline, ''):
        inp = usr_codec.encode(line.strip())

        inp = Variable(torch.LongTensor([
            inp,
        ]))

        sentvecs, states_reps, states_preds, hidden = trk_model(
            inp, kb_found, hidden)

        # print_ret(states_preds)
        criteria = to_search_criteria(states_preds, onto)
        ret, kb_vec = get_kb_result(kb, criteria, conf["kb_indicator_len"])

        sentvecs = sentvecs.view(1, -1)
        states_reps = states_reps.view(1, -1)

        for slot in states_preds:
            states_preds[slot] = states_preds[slot].view(1, -1)

        cond = cond_net(sentvecs, states_reps, states_preds, kb_found)

        print gen_sent(cond)
示例#13
0
    GT = []
    labels = []

    try:
        GT = data['GT']
        labeling = data['labels']
    except:
        print("[x] No GT found, No labeling found")

    refinement = 'indeg_guided'
    ksize = 23

    s = Stats(f"/tmp/{dset_name}.csv")

    c = Codec(0, 0.5, 20)
    c.verbose = False

    #f = []
    #g = []
    #thresholds = np.arange(0,0.7, 0.05)

    thresholds = np.arange(0, 0.5, 0.05)
    for t in thresholds:
        print(f"### t={t} ###")
        #G = (data['G']*10**4)>(t*np.max(data['G']))
        G = data['G'] > 0

        n = G.shape[0]

        k, epsilon, classes, sze_idx, reg_list, nirr = c.compress(
示例#14
0
##############################################
############## Main script code ##############
##############################################

refinement = 'indeg_guided'

vals = [0.1, 0.2, 0.5, 0.7]
first = True
n = 1000

# Query graph, the bigger one
clusters = pd.random_clusters(n, 8)
inter = 0.1
intra = 0.1
oG, oGT, olabeling = pd.custom_cluster_matrix(n, clusters, inter, 1, intra, 0)
c = Codec(0.15, 0.5, 5)
k, epsilon, classes, sze_idx, reg_list, nirr = c.compress(oG, refinement)
query = c.reduced_matrix(oG, k, epsilon, classes, reg_list)

# Database
c = Codec(0.2, 0.5, 5)
num_c = 8
for r in range(5):
    for i in vals:

        clusters = pd.random_clusters(n, num_c)
        G, GT, labeling = pd.custom_cluster_matrix(n, clusters, i, 1, i, 0)
        k, epsilon, classes, sze_idx, reg_list, nirr = c.compress(
            G, refinement)
        red = c.reduced_matrix(G, k, epsilon, classes, reg_list)
示例#15
0
def basic_plastic_trial(num_episodes, save_file):

    # Configuration
    register_names = ["rinp", "rout", "m"]
    layer_sizes = {q: 32 for q in register_names}
    hidden_size = 32
    rho = .99
    plastic = ["rinp<m"]
    remove_pathways = ["rinp<rout", "m<rinp", "m<rout", "rout<m"]

    # Setup GHU
    num_symbols = 3
    symbols = [str(a) for a in range(num_symbols)]
    pathways, associations = default_initializer(register_names, symbols)
    for p in remove_pathways:
        pathways.pop(p)
    associations = list(
        filter(lambda x: x[0] not in remove_pathways, associations))
    codec = Codec(layer_sizes, symbols, rho=rho, ortho=True)
    controller = Controller(layer_sizes,
                            pathways,
                            hidden_size,
                            plastic,
                            nonlinearity='relu')
    # controller = Controller(layer_sizes, pathways, hidden_size, plastic, nonlinearity='tanh')
    ghu = GatedHebbianUnit(layer_sizes,
                           pathways,
                           controller,
                           codec,
                           batch_size=num_episodes,
                           plastic=plastic)
    ghu.associate(associations)

    # Initialize layers
    separator = "0"
    ghu.fill_layers(separator)

    # training example generation
    episode_duration = 3

    def training_example():
        inputs = np.random.choice(symbols[1:], size=2, replace=False)
        targets = np.array(["0", "0", inputs[0]])
        return inputs, targets

    def reward(ghu, targets, outputs):
        # All or nothing
        r = np.zeros(len(outputs))
        if lvd(outputs, targets)[0] == 0: r[-1] = +1.
        return r

    # ################### Sanity check
    inputs = [["2", "1"]]
    correct_choices = [
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<rout",
            "m": "m<m"
        }, [1.0]),
        ({
            "rinp": "rinp<m",
            "rout": "rout<rout",
            "m": "m<m"
        }, [0.0]),
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<rinp",
            "m": "m<m"
        }, [0.0]),
    ]
    # ghu.clone().dbg_run(inputs, episode_duration, correct_choices)
    # input("???????")
    # ################### Sanity check

    # Run optimization
    avg_rewards, grad_norms = reinforce(
        ghu,
        num_epochs=500,
        episode_duration=episode_duration,
        training_example=training_example,
        reward=reward,
        task="basic_plastic",
        learning_rate=.05,
        # line_search_iterations = 5,
        # distribution_cap = .1,
        # likelihood_cap = .7,
        # distribution_variance_coefficient = 0.05,
        # choices=correct_choices, # perfect rewards with this
        verbose=1,
        save_file=save_file)
示例#16
0
def echo_trial(episode_duration, save_file):

    # Configuration
    num_symbols = 10
    layer_sizes = {"rinp": 32, "rout": 32, "rtmp": 32}
    hidden_size = 32
    rho = .99
    plastic = []
    num_episodes = 250

    # Setup GHU
    symbols = [str(a) for a in range(num_symbols)]
    pathways, associations = default_initializer(  # all to all
        layer_sizes.keys(), symbols)
    codec = Codec(layer_sizes, symbols, rho=rho, ortho=True)
    controller = Controller(layer_sizes, pathways, hidden_size, plastic)
    ghu = GatedHebbianUnit(layer_sizes,
                           pathways,
                           controller,
                           codec,
                           batch_size=num_episodes,
                           plastic=plastic)
    ghu.associate(associations)

    # Initialize layers
    separator = "0"
    ghu.fill_layers(separator)

    # Generate dataset
    input_length = int(episode_duration / 2)
    all_inputs = []
    for symbol in symbols[1:]:
        for t in range(input_length):
            inputs = [separator] * input_length
            inputs[t] = symbol
            all_inputs.append(inputs)
    split = int(.80 * len(all_inputs))

    # example generation
    def example(dataset):
        # Randomly choose echo symbol (excluding 0 separator)
        inputs = dataset[np.random.randint(len(dataset))]
        targets = [inp for inp in inputs if inp != separator]
        return inputs, targets

    def training_example():
        return example(all_inputs[:split])

    def testing_example():
        return example(all_inputs[split:])

    # all or nothing reward
    def reward(ghu, targets, outputs):
        r = np.zeros(len(outputs))
        outputs = np.array(
            [out for out in outputs[input_length:] if out != separator])
        if len(outputs) == len(targets): r[-1] = (targets == outputs).all()
        return r

    # # correct choices for debugging
    # correct_choices = \
    #     [({"rinp": "rinp<rout", "rout": "rout<rinp"}, [])]*2 + \
    #     [({"rinp": "rinp<rinp", "rout": "rout<rout"}, [])]*(episode_duration-2)
    # # run it to debug:
    # inputs, targets = zip(*[training_example() for b in range(ghu.batch_size)])
    # ghu.run(episode_duration, inputs, targets, reward, choices=correct_choices, verbose=3)
    # input("????")

    # Run optimization
    avg_rewards, avg_general, grad_norms = reinforce(
        ghu,
        num_epochs=200,
        episode_duration=episode_duration,
        training_example=training_example,
        testing_example=testing_example,
        reward=reward,
        task="echo",
        learning_rate=.1,
        verbose=1,
        save_file=save_file)

    return avg_rewards, avg_general, grad_norms
示例#17
0
def reverse_trial(num_episodes, save_file):

    # Configuration
    register_names = ["rinp", "rout"]
    layer_sizes = {q: 64 for q in register_names + ["m"]}
    hidden_size = 64
    rho = .99
    plastic = ["rout<m"]
    # remove_pathways = ["rinp<rout", "m<rinp", "m<rout", "rout<m"]
    remove_pathways = ["rinp<rout", "rinp<m", "m<rinp", "m<rout"]
    # remove_pathways = []
    input_keys = None

    # Setup GHU
    num_addresses = 4
    symbols = [str(a) for a in range(num_addresses)]
    pathways, associations = turing_initializer(register_names, num_addresses)
    for p in remove_pathways:
        pathways.pop(p)
    associations = list(
        filter(lambda x: x[0] not in remove_pathways, associations))
    codec = Codec(layer_sizes, symbols, rho=rho, ortho=True)
    controller = Controller(layer_sizes,
                            pathways,
                            hidden_size,
                            plastic,
                            input_keys=input_keys,
                            nonlinearity='relu')
    # controller = Controller(layer_sizes, pathways, hidden_size, plastic, nonlinearity='tanh')
    ghu = GatedHebbianUnit(layer_sizes,
                           pathways,
                           controller,
                           codec,
                           batch_size=num_episodes,
                           plastic=plastic)
    ghu.associate(associations)

    # Initialize layers
    separator = "0"
    ghu.fill_layers(separator)

    # Dataset of all possible input lists
    min_length = 4
    max_length = 4
    episode_duration = 2 * max_length
    all_inputs = [
        np.array(inputs + (separator, ))
        for list_length in range(min_length, max_length + 1)
        for inputs in it.product(symbols[1:], repeat=list_length)
    ]
    split = int(.80 * len(all_inputs))

    # example generation
    def example(dataset):
        inputs = dataset[np.random.randint(len(dataset))]
        targets = inputs[:-1][::-1]
        return inputs, targets

    def training_example():
        return example(all_inputs[:split])

    def testing_example():
        return example(all_inputs[split:])

    # all or nothing reward calculation
    def reward(ghu, targets, outputs):
        r = np.zeros(len(outputs))
        outputs = outputs[len(targets) - 1:]
        outputs = np.array([out for out in outputs if out != separator])
        if len(outputs) == len(targets): r[-1] = (outputs == targets).all()
        return r

    # ################### Sanity check
    correct_choices = [
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<rinp",
            "m": "inc-m"
        }, []),
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<rinp",
            "m": "inc-m"
        }, ["rout<m"]),
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<rinp",
            "m": "inc-m"
        }, ["rout<m"]),
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<rinp",
            "m": "inc-m"
        }, ["rout<m"]),
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<rout",
            "m": "dec-m"
        }, []),
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<m",
            "m": "dec-m"
        }, []),
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<m",
            "m": "dec-m"
        }, []),
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<m",
            "m": "dec-m"
        }, []),
    ]
    # ################### Sanity check

    # Run optimization
    avg_rewards, _, grad_norms = reinforce(
        ghu,
        num_epochs=500,
        episode_duration=episode_duration,
        training_example=training_example,
        # testing_example = testing_example,
        testing_example=None,
        reward=reward,
        task="reverse",
        learning_rate=.1,
        # line_search_iterations = 5,
        # distribution_cap = .1,
        # likelihood_cap = .7,
        distribution_variance_coefficient=0.05,
        # choices = correct_choices, # perfect reward with this
        verbose=1,
        save_file=save_file)

    # Assess generalization similarly after run
    print("Cloning GHU for generalization...")
    ghu_gen = ghu.clone()
    print("Sampling problem instances...")
    inputs, targets = zip(
        *[testing_example() for b in range(ghu_gen.batch_size)])
    print("Running on test data...")
    outputs, rewards = ghu_gen.run(episode_duration,
                                   inputs,
                                   targets,
                                   reward,
                                   verbose=1)
    R_gen = rewards.sum(axis=1)

    # Overwrite file dump with R_gen in place of avg_general
    with open(save_file, "rb") as f:
        result = pk.load(f)
    result = list(result)
    result[2] = R_gen
    result = tuple(result)
    with open(save_file, "wb") as f:
        pk.dump(result, f)
示例#18
0
intranoise_levels = np.arange(0.05, 0.35, 0.05)
num_cs = [4, 8, 12, 16, 20]

for inter in tqdm(internoise_levels, desc="eta_1"):
    for intra in tqdm(intranoise_levels, desc="eta_2"):
        for num_c in tqdm(num_cs, desc="num_c"):

            #print(f"\n### n:{n} num_c:{num_c} ###")

            #print("Generation of G ...")
            clusters = pd.fixed_clusters(n, num_c)
            G, GT, labels = pd.custom_cluster_matrix(n, clusters, inter, 1,
                                                     intra, 0)

            # epsilon parameter for the approx Alon et al
            c = Codec(0.285, 0.285, 1)
            c.verbose = False

            #print("Generation of compressed/reduced graph ...")
            tm = time.time()
            k, epsilon, classes, sze_idx, reg_list, nirr = c.compress(
                G, "indeg_guided")
            red = c.reduced_matrix(G, k, epsilon, classes, reg_list)
            t_compression = time.time() - tm
            #print(f"s:{t_compression:.2f}")

            # Precomputation of the eigenvalues
            #print("Eigenvalues of G ... ")
            tm = time.time()
            G_eig, aux = metrics.eigs(G)
            t_G_eig = time.time() - tm
示例#19
0
try:
    GT = data['GT']
    labeling = data['labels']
except:
    print("[x] No GT found, No labeling found")

G = data['G']

n = G.shape[0]
repetitions = 1
refinement = 'indeg_guided'
ksize = 23

s = Stats(f"/tmp/{dset_name}.csv")
c = Codec(0.1, 0.4, 40)

for r in range(repetitions):
    print(f"### r={r} ###")

    tm = time.time()
    k, epsilon, classes, sze_idx, reg_list, nirr = c.compress(G, refinement)
    tcompression = time.time() - tm

    sze = c.decompress(G, 0, classes, k, reg_list)
    tdecompression = time.time() - tm

    fsze = c.post_decompression(sze, ksize)
    tpostdecompression = time.time() - tm

    red = c.reduced_matrix(G, k, epsilon, classes, reg_list)
示例#20
0
文件: max.py 项目: garrettkatz/ghu
def trials(i, avgrew, avggen, gradnorm):
    print("***************************** Trial ", str(i + 1),
          " *******************************")

    # Configuration
    num_symbols = 10
    layer_sizes = {"rinp": 32, "rout": 32, "rtemp": 32}
    hidden_size = 32
    rho = .99
    plastic = []
    num_episodes = 500

    # Setup GHU
    symbols = [str(a) for a in range(num_symbols)]
    pathways, associations = default_initializer(  # all to all
        layer_sizes.keys(), symbols)
    codec = Codec(layer_sizes, symbols, rho=rho, ortho=True)
    controller = Controller(layer_sizes, pathways, hidden_size, plastic)
    ghu = GatedHebbianUnit(layer_sizes,
                           pathways,
                           controller,
                           codec,
                           plastic=plastic,
                           batch_size=num_episodes)
    ghu.associate(associations)

    # Initialize layers
    separator = symbols[0]
    ghu.fill_layers(separator)

    # Generate dataset
    input_length = 5
    all_inputs = [
        np.array(inputs)
        for inputs in it.product(symbols[1:], repeat=input_length)
    ]
    split = int(.80 * len(all_inputs))

    # example generation
    def example(dataset):
        inputs = dataset[np.random.randint(len(dataset))]
        targets = np.array([max(inputs)])
        return inputs, targets

    def training_example():
        return example(all_inputs[:split])

    def testing_example():
        return example(all_inputs[split:])

    # all or nothing reward
    def reward(ghu, targets, outputs):
        r = np.zeros(len(outputs))
        r[-1] = (outputs[-1] == targets[0])
        return r

    # Optimization settings
    avg_rewards, avg_general, grad_norms = reinforce(
        ghu,
        num_epochs=100,
        episode_duration=input_length,
        training_example=training_example,
        testing_example=testing_example,
        reward=reward,
        task="max",
        learning_rate=.1,
        verbose=1)

    gradnorm[i + 1] = grad_norms.tolist()
    avgrew[i + 1] = avg_rewards.tolist()
    avggen[i + 1] = avg_general.tolist()
示例#21
0
file_handler_except = log.FileHandler(sigs.except_path)
file_handler_except.setLevel(log.ERROR)
file_handler_except.setFormatter(formatter_except)
logger_except.addHandler(file_handler_except)
# </editor-fold>

# <editor-fold desc="globals"
BP = g.BREAK_POINT
SET = g.OP_SET

PUZZLE_DEFAULT =\
'.....627........5....54..311.....5..97.8.2.64..6.....764..29....8........526.....'
# </editor-fold>

codec = Codec()
encoded_puzzle = ''
solved_puzzle = ''


def load(cb, puz=PUZZLE_DEFAULT):
    try:
        global codec
        global encoded_puzzle
        global solved_puzzle

        if sigs.is_grid:
            puzzle_list = puz
            num_list = [
                item if len(item) == 1 else '.' for item in puzzle_list
            ]
示例#22
0
def trials(i, avgrew, gradnorm):
    print("***************************** Trial ", str(i + 1),
          "*******************************")

    num_addresses = 4
    register_names = ["rinp", "rout"]
    num_episodes = 5000

    layer_sizes = {q: 128 for q in register_names + ["m"]}
    hidden_size = 32
    # plastic = ["%s<m"%q for q in register_names]
    plastic = ["rinp<m"]

    symbols = [str(a) for a in range(num_addresses)]
    pathways, associations = turing_initializer(register_names, num_addresses)

    # constrain pathways inductive bias
    remove_pathways = ["rout<m", "m<rout"]
    for p in remove_pathways:
        pathways.pop(p)
    associations = list(
        filter(lambda x: x[0] not in remove_pathways, associations))

    codec = Codec(layer_sizes, symbols, rho=.999)
    controller = Controller(layer_sizes, pathways, hidden_size, plastic)

    # Sanity check
    ghu = GatedHebbianUnit(layer_sizes,
                           pathways,
                           controller,
                           codec,
                           plastic=plastic,
                           batch_size=num_episodes)
    ghu.associate(associations)

    # Initialize layers
    separator = "0"
    ghu.fill_layers(separator)

    # training example generation
    list_symbols = 4
    min_length = 3
    max_length = 3

    def training_example():

        list_length = np.random.randint(min_length, max_length + 1)
        inputs = np.array([separator] * (list_length))
        inputs[:] = np.random.choice(symbols[1:list_symbols],
                                     size=list_length,
                                     replace=False)
        targets = inputs[::-1]
        return inputs, targets

    # # reward calculation from LVD
    # def reward(ghu, targets, outputs):
    #     # Assess reward: negative LVD after separator filtering
    #     outputs_ = [out for out in outputs if out != separator]
    #     l, _ = lvd(outputs_, targets)
    #     return -l

    def reward(ghu, targets, outputs):
        outputs_ = outputs[len(targets) - 1:]
        #zeros = [o for o in outputs[len(targets)-1:] if o==separator]
        #totzeros = len(zeros)
        r = np.zeros(len(outputs))
        # if len(outputs_)==0:
        #     r[-1] -= 2*(len(outputs[len(targets)-1:])+1)
        # else:
        _, d = lvd(outputs_, targets)
        for i in range(1, d.shape[0]):
            r[-1] += 1. if (i < d.shape[1]
                            and d[i, i] == d[i - 1, i - 1]) else -2.
            #r[-1] -= 0.1*totzeros
        return r

    filename = "reverse" + str(i + 1) + ".png"
    avg_rewards, grad_norms = reinforce(ghu,
                                        num_epochs=1000,
                                        episode_duration=2 * max_length - 1,
                                        training_example=training_example,
                                        reward=reward,
                                        task="reverse",
                                        learning_rate=.03,
                                        verbose=2)

    gradnorm[i + 1] = grad_norms.tolist()
    avgrew[i + 1] = avg_rewards.tolist()

    pt.subplot(2, 1, 1)
    pt.plot(avg_rewards)
    pt.title("Learning curve of reverse")
    pt.ylabel("Avg Reward")
    pt.subplot(2, 1, 2)
    pt.plot(grad_norms)
    pt.xlabel("Epoch")
    pt.ylabel("||Grad||")
    pt.savefig(filename)
示例#23
0
def recall_trial(num_episodes, save_file):

    # Configuration
    layer_sizes = {"rinp": 64, "rout": 64, "rtmp": 64}
    hidden_size = 64
    rho = .99
    plastic = ["rout<rtmp"]
    # remove_pathways = ["rinp<rout", "rinp<rtmp", "rtmp<rout"]
    remove_pathways = []

    # Setup GHU
    num_symbols = 5
    chars = "abcdefghi"
    numbs = "123456789"
    symbols = chars[:num_symbols] + "0" + numbs[:num_symbols - 1]
    pathways, associations = default_initializer(  # all to all
        layer_sizes.keys(), symbols)
    for p in remove_pathways:
        pathways.pop(p)
    associations = list(
        filter(lambda x: x[0] not in remove_pathways, associations))
    codec = Codec(layer_sizes, symbols, rho=rho, ortho=True)
    controller = Controller(layer_sizes,
                            pathways,
                            hidden_size,
                            plastic,
                            nonlinearity='relu')
    # controller = Controller(layer_sizes, pathways, hidden_size, plastic, nonlinearity='tanh')
    ghu = GatedHebbianUnit(layer_sizes,
                           pathways,
                           controller,
                           codec,
                           batch_size=num_episodes,
                           plastic=plastic)
    ghu.associate(associations)

    # Initialize layers
    separator = "0"
    ghu.fill_layers(separator)

    # Dataset of all possible input lists
    all_inputs = [
        np.array([k1, v1, k2, v2, k, "0"])
        for (k1, k2) in it.permutations(chars[:num_symbols], 2)
        for (v1, v2) in it.permutations(numbs[:num_symbols - 1], 2)
        for k in [k1, k2]
    ]
    input_length = 6
    output_window = 2
    episode_duration = input_length + output_window
    split = int(.80 * len(all_inputs))

    # example generation
    def example(dataset):
        inputs = dataset[np.random.randint(len(dataset))]
        targets = inputs[[1 if inputs[0] == inputs[4] else 3]]
        return inputs, targets

    def training_example():
        return example(all_inputs[:split])

    def testing_example():
        return example(all_inputs[split:])

    # reward calculation based on leading LVD at individual steps
    def reward(ghu, targets, outputs):
        # all or nothing at final time-step
        r = np.zeros(len(outputs))
        outputs = np.array(
            [out for out in outputs[input_length - 1:] if out != separator])
        if len(outputs) == len(targets): r[-1] = (targets == outputs).all()
        return r

    # ################### Sanity check
    correct_choices = [
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<rout",
            "rtmp": "rtmp<rinp"
        }, []),
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<rinp",
            "rtmp": "rtmp<rtmp"
        }, []),
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<rout",
            "rtmp": "rtmp<rinp"
        }, ["rout<rtmp"]),
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<rinp",
            "rtmp": "rtmp<rtmp"
        }, []),
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<rout",
            "rtmp": "rtmp<rinp"
        }, ["rout<rtmp"]),
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<rtmp",
            "rtmp": "rtmp<rtmp"
        }, []),
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<rinp",
            "rtmp": "rtmp<rtmp"
        }, []),
        ({
            "rinp": "rinp<rinp",
            "rout": "rout<rinp",
            "rtmp": "rtmp<rtmp"
        }, []),
    ]
    # ################### Sanity check

    # Run optimization
    avg_rewards, avg_general, grad_norms = reinforce(
        ghu,
        num_epochs=500,
        episode_duration=episode_duration,
        training_example=training_example,
        testing_example=None,
        reward=reward,
        task="recall",
        learning_rate=.1,
        # line_search_iterations = 5,
        # distribution_cap = .1,
        # likelihood_cap = .7,
        distribution_variance_coefficient=.05,
        # choices=correct_choices, # perfect rewards with this
        verbose=1,
        save_file=save_file)

    # Assess generalization similarly after run
    print("Cloning GHU for generalization...")
    ghu_gen = ghu.clone()
    print("Sampling problem instances...")
    inputs, targets = zip(
        *[testing_example() for b in range(ghu_gen.batch_size)])
    print("Running on test data...")
    outputs, rewards = ghu_gen.run(episode_duration,
                                   inputs,
                                   targets,
                                   reward,
                                   verbose=1)
    R_gen = rewards.sum(axis=1)

    # Overwrite file dump with R_gen in place of avg_general
    with open(save_file, "rb") as f:
        result = pk.load(f)
    result = list(result)
    result[2] = R_gen
    result = tuple(result)
    with open(save_file, "wb") as f:
        pk.dump(result, f)
示例#24
0
def swap_trial(distribution_variance_coefficient, save_file):

    # Configuration
    num_symbols = 10
    layer_sizes = {"rinp": 32, "rout": 32, "rtmp": 32}
    hidden_size = 32
    rho = .99
    plastic = []
    num_episodes = 100

    # Setup GHU
    symbols = [str(a) for a in range(num_symbols)]
    pathways, associations = default_initializer(  # all to all
        layer_sizes.keys(), symbols)
    codec = Codec(layer_sizes, symbols, rho=rho, ortho=True)
    controller = Controller(layer_sizes, pathways, hidden_size, plastic)
    ghu = GatedHebbianUnit(layer_sizes,
                           pathways,
                           controller,
                           codec,
                           batch_size=num_episodes,
                           plastic=plastic)
    ghu.associate(associations)

    # Initialize layers
    separator = "0"
    ghu.fill_layers(separator)

    # Generate dataset
    all_inputs = list(it.permutations(symbols[1:], 2))
    split = int(.80 * len(all_inputs))

    # example generation
    def example(dataset):
        # Randomly choose echo symbol (excluding 0 separator)
        inputs = dataset[np.random.randint(len(dataset))]
        targets = inputs[::-1]
        return inputs, targets

    def training_example():
        return example(all_inputs[:split])

    def testing_example():
        return example(all_inputs[split:])

    # all or nothing reward
    def reward(ghu, targets, outputs):
        r = np.zeros(len(outputs))
        outputs = np.array([out for out in outputs[1:] if out != separator])
        if len(outputs) == len(targets): r[-1] = (targets == outputs).all()
        return r

    # Run optimization
    avg_rewards, avg_general, grad_norms = reinforce(
        ghu,
        num_epochs=100,
        episode_duration=3,
        training_example=training_example,
        testing_example=testing_example,
        reward=reward,
        task="swap",
        learning_rate=.1,
        distribution_variance_coefficient=distribution_variance_coefficient,
        verbose=1,
        save_file=save_file)
def main(args, arch):
    adv_models = None
    train_loader, test_loader = create_loaders(args, root='../data')
    if args.dataset == 'cifar':
        args.nc, args.h, args.w = 3, 32, 32
        args.input_size = 32
        model, l_test_classif_paths = load_all_classifiers(args, load_archs=[args.source_arch])
        model_type = args.source_arch
        if args.target_arch is not None:
            model_target, l_test_classif_paths = load_all_classifiers(args, load_archs=[args.target_arch])
            model_type = args.target_arch
            del model_target
            torch.cuda.empty_cache()
    elif args.dataset == 'mnist':
        args.input_size = 28
        if args.source_arch == 'natural':
            model, l_test_classif_paths = load_all_classifiers(args, load_archs=["natural"])
            model_type = 'natural'
        elif args.source_arch == 'ens_adv':
            adv_model_names = args.adv_models
            adv_models = [None] * len(adv_model_names)
            for i in range(len(adv_model_names)):
                type = get_model_type(adv_model_names[i])
                adv_models[i] = load_model(args, adv_model_names[i], type=type).to(args.dev)

            path = os.path.join(args.dir_test_models, "pretrained_classifiers",
                                args.dataset, "ensemble_adv_trained", args.model)
            model = load_model(args, args.model, type=args.type)
            l_test_classif_paths = [path]
            model_type = 'Ensemble Adversarial'

    model.to(args.dev)
    model.eval()
    test_classifier(args, model, args.dev, test_loader, epoch=1)
    print("Testing on %d Test Classifiers" %(len(l_test_classif_paths)))
    # attack related settings
    if args.attack_method == "zoo" or args.attack_method == "autozoom_bilin":
        if args.img_resize is None:
            args.img_resize = args.input_size
            print("Argument img_resize is not set and not using autoencoder, set to image original size:{}".format(
                args.img_resize))

    codec = None
    if args.attack_method == "zoo_ae" or args.attack_method == "autozoom_ae":
        codec = Codec(args.input_size, IN_CHANNELS[args.dataset],
                      args.compress_mode, args.resize, use_tanh=args.use_tanh)
        codec.load_codec(args,codec_path)
        codec.cuda()
        decoder = codec.decoder
        args.img_resize = decoder.input_shape[1]
        print("Loading autoencoder: {}, set the attack image size to:{}".format(args.codec_path, args.img_resize))

    # setup attack
    if args.attack_method == "zoo":
        blackbox_attack = ZOO(model, args.dataset, args)
    elif args.attack_method == "zoo_ae":
        blackbox_attack = ZOO_AE(model, args.dataset, args, decoder)
    elif args.attack_method == "autozoom_bilin":
        blackbox_attack = AutoZOOM_BiLIN(model, args.dataset, args)
    elif args.attack_method == "autozoom_ae":
        blackbox_attack = AutoZOOM_AE(model, args["dataset"], args, decoder)
    target_str = "untargeted" if  args.attack_type!="targeted" else "targeted_{}".format(args.target_type)
    attack_framework = AutoZoomAttackFramework(args, test_loader)
    attack_framework.attack_dataset_images(args, blackbox_attack, arch, model,
            codec, l_test_classif_paths=l_test_classif_paths,
            adv_models=adv_models)
    model.cpu()