Esempio n. 1
0
 def __init__(self):
     """
     Instantiate the plugin and all its modules.
     """
     self._core = Core(self)
     self._interface = Interface(self)
     self._network = Network(self)
Esempio n. 2
0
def test_ntm_layer():
    state_size = 10
    memory_shape = (100, 5)
    batch_size = 7
    controller_num_layers = 1
    controller_hidden_size = 32
    input_size = 16
    T = 9
    controller_network = MLP((input_size, memory_shape[1]),
                             controller_num_layers, controller_hidden_size,
                             state_size)

    x = tf.placeholder(tf.float32, [batch_size, T, input_size])
    ntm_cell = NTMCell(controller_network, memory_shape, batch_size)
    ntm = Network(ntm_cell, x)
    output, final_state = ntm.output()
    assert output.get_shape()[0] == batch_size
    assert output.get_shape()[1] == T
    assert output.get_shape()[2] == state_size

    x_ = np.random.randn(batch_size, T, input_size)
    sess = tf.Session()
    sess.run(tf.initialize_all_variables())
    y_ = sess.run(output, feed_dict={x: x_})
    assert not np.isnan(y_).any()
    assert not np.isinf(y_).any()
Esempio n. 3
0
def test_ntm_gradients():
    state_size = 1
    memory_shape = (5,1)
    batch_size=10
    controller_num_layers=1
    controller_hidden_size=10
    input_size=1
    n_batches=20
    T=2
    controller_network = MLP((input_size, memory_shape[1]),
                             controller_num_layers, controller_hidden_size,
                             state_size)


    x = tf.placeholder(tf.float32, [batch_size, T, input_size])
    x_ = np.random.randn(batch_size*n_batches, T, input_size)
    y_ = 2*x_ + 1.
    addr = ShortcircuitAddressing(memory_shape, batch_size)
    rh = ReadHead(state_size, memory_shape, addresser=addr, batch_size=batch_size,
                  hidden_size=2)
    #ntm_cell = NTMCell(controller_network, memory_shape, batch_size,
    #                   read_head=rh)
    ntm_cell = NTMCell(controller_network, memory_shape, batch_size)
    ntm = Network(ntm_cell, x)
    loss = lambda a, b: tf.nn.l2_loss(a - b)
    optimizer = tf.train.GradientDescentOptimizer(1e-4)
    ntm.compile(loss, optimizer)
    ntm.train(x_, y_, batch_size=batch_size, n_epochs=2)
Esempio n. 4
0
    def _evaluate_test(self):
        self.keep_prob = 1.0
        self.is_training = False
        self.net = Network(self.is_training)
        self.ten_accuracy = []
        self.epoch_accuracy = []
        self.test_writer = tf.summary.FileWriter(self.test_logs_path, graph=self.net.graph)

        # TODO: reset graph and update model
        with tf.Session(graph=self.net.graph) as sess:
            self._restore_checkpoint_or_init(sess)
            step_num = 1
            max_steps = FLAGS.epoch * 100
            while step_num <= max_steps:
                if step_num % 10 == 0:
                    gs, acc = self._test_step(sess)
                    self._add_accuracy(step_num, gs, acc)
                    if step_num % 100 == 0:
                        self._evaluate_test()
                else:
                    gs, acc = self._test_step(sess)
                    self._add_accuracy(step_num, gs, acc)
                step_num += 1

            self.keep_prob = 0.75
            self.is_training = True
            self.net = Network(self.is_training)
            self.net.saver.restore(sess, self.chkpt_file)
Esempio n. 5
0
def task_2():
    network = Network(1, [10], [3])
    population = network.populations[0]
    population.connect_randomly(0.3)
    cluster_number = 3
    output_populations = [
        population.neurons[i * 2:(i + 1) * 2]
        for i in range(0, cluster_number)
    ]
    print(output_populations)
    input_population = population.neurons[6:10]
    for neuron in population.neurons:
        neuron.set_current(0, TOTAL_TIME, 0)
    population.synapse.target_neurons = output_populations[0]
    population.learn_method = "rstdp"
    encode_task2(input_population)
    size_activity_neuron = [[] for i in range(0, cluster_number)]
    time = np.arange(0, TOTAL_TIME)
    for t in range(TOTAL_TIME):
        network.update_voltage(t)
        for index, output_neurons in enumerate(output_populations):
            activity = 0
            for neuron in output_neurons:
                activity += neuron.voltage[t]
            size_activity_neuron[index].append(activity / len(output_neurons))
    fig, ax = plt.subplots(1, 1, figsize=(50, 100))
    for i in range(cluster_number):
        ax.scatter(time, size_activity_neuron[i])
    plt.show()
Esempio n. 6
0
 def __init__(self, data):
     self._data = data
     self.network = Network()
     self.solution = None
     self.requirements = None
     self.id = self._data["id"]
     self.hash = hash(self.get_id())
     self.int_id = 0
Esempio n. 7
0
 def __init__(self, population_count, exc_neurons_count, inh_neurons_count):
     self.dt = DT
     self.current_time = 0
     self.total_time = TOTAL_TIME
     self.population_count = population_count
     self.exc_neurons_count = exc_neurons_count
     self.inh_neurons_count = inh_neurons_count
     self.network = Network(self.population_count, self.exc_neurons_count, self.inh_neurons_count)
Esempio n. 8
0
 def __init__(self):
     Network.__init__(self)
     self.n_conv_blocks      = None
     self.kernel_size        = None
     self.n_channels_first   = None
     self.dilation_rates     = None
     self.growth_block_end   = None
     self.strides_block_end  = None
     self.max_pooling        = None
Esempio n. 9
0
def main():
    net = Network()
    train(
        net=net,
        criterion=nn.CrossEntropyLoss(),
        optimizer=optim.RMSprop(net.parameters(), lr=0.001),
        num_epochs=NUM_EPOCHS
    )
    validate(net)
Esempio n. 10
0
def ten_arr_test(file_name):
    weights, topology = get_genotype_data_from_file(file_name + ".csv")
    net = Network(topology)
    net.set_weights(weights)

    while True:
        ins, out = get_test_ten_arr()
        print("Answer: " + ", ".join("{:.2f}".format(x)
                                     for x in net.process_input(ins)))
        print(net)
Esempio n. 11
0
def blockchain_factory():
    """
    Create a new blockchain with its network.
    """
    network = Network()
    bc = Blockchain(network)

    network.set_blockchain(bc)

    return bc, network
Esempio n. 12
0
 def __init__(self):
     Network.__init__(self)
     self.n_conv_blocks      = None
     self.kernel_size        = None
     self.n_channels_first   = None
     self.dilation_rates     = None
     self.growth_block_end   = None
     self.strides_block_end  = None
     self.max_pooling        = None
     self.n_lstmneurons      = None
     self.n_lstmlayers       = None
     self.bidirectional      = None
Esempio n. 13
0
def load_state(T, chkpt_path, device, network_temp=2):
    net = Network(T, temp=network_temp).to(device)
    optimizer = Adam(net.parameters(), lr=1e-4, weight_decay=1e-4)

    if chkpt_path is not None and os.path.exists(chkpt_path):
        checkpoint = torch.load(chkpt_path, map_location=torch.device(device))
        net.load_state_dict(checkpoint[MODEL_KEY])
        optimizer.load_state_dict(checkpoint[OPTIMIZER_KEY])
        games_trained = checkpoint[GAMES_TRAINED_KEY]
        replay_mem = checkpoint[REPLAY_MEM_KEY]
    else:
        games_trained = 0
        replay_mem = ReplayMemory()

    return net, optimizer, games_trained, replay_mem
Esempio n. 14
0
def task_2():
    network = Network(1, [12], [0])
    population = network.populations[0]
    population.add_layer(10)
    population.add_layer(2)
    population.connect_layer_fully()
    for neuron in population.neurons:
        neuron.set_current(0, TOTAL_TIME, 0)
    # encode(population, [1, 0, 1, 1, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1, 1, 0])
    encode(population, [1, 0, 1, 1, 1, 0, 0, 0, 0, 0],
           [0, 0, 0, 0, 0, 1, 1, 1, 1, 0])
    weights = dict()
    pre_synaptic_neurons = population.neurons[:10]
    post_synaptic_neurons = population.neurons[10:]
    for pre_synaptic_neuron in pre_synaptic_neurons:
        for post_synaptic_neuron in post_synaptic_neurons:
            if pre_synaptic_neuron not in weights.keys():
                weights[pre_synaptic_neuron] = dict()
            weights[pre_synaptic_neuron][post_synaptic_neuron] = list()
    for t in range(TOTAL_TIME):
        network.update_voltage(t)
        for pre_synaptic_neuron in pre_synaptic_neurons:
            for post_synaptic_neuron in post_synaptic_neurons:
                weights[pre_synaptic_neuron][post_synaptic_neuron].append(
                    population.synapse.adjacency[pre_synaptic_neuron]
                    [post_synaptic_neuron]['weight'])
    for neuron in population.neurons:
        if neuron in population.synapse.adjacency.keys():
            for post_synaptic_neuron in population.synapse.adjacency[
                    neuron].keys():
                print(population.synapse.adjacency[neuron]
                      [post_synaptic_neuron]['weight'],
                      end=" ")
            print()
    # fig, ax = plt.subplots(1, 1, figsize=(50, 30))
    # time = np.arange(0, TOTAL_TIME)
    # ax.plot(time, population.neurons[0].voltage)
    # ax.plot(time, population.neurons[10].voltage)
    # plt.show()
    fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(50, 100))
    time = np.arange(0, TOTAL_TIME)
    for pre_synaptic_neuron in pre_synaptic_neurons:
        for post_synaptic_neuron in post_synaptic_neurons:
            ax1.plot(time, weights[pre_synaptic_neuron][post_synaptic_neuron])
    # ax2.plot(time, population.neurons[0].voltage)
    ax2.plot(time, population.neurons[11].voltage)
    ax2.plot(time, population.neurons[10].voltage)
    plt.show()
Esempio n. 15
0
class Simulation:
    def __init__(self, population_count, exc_neurons_count, inh_neurons_count):
        self.dt = DT
        self.current_time = 0
        self.total_time = TOTAL_TIME
        self.population_count = population_count
        self.exc_neurons_count = exc_neurons_count
        self.inh_neurons_count = inh_neurons_count
        self.network = Network(self.population_count, self.exc_neurons_count, self.inh_neurons_count)

    def simulate(self):
        for population in self.network.populations:
            population.fully_connected_one_way()

        for t in range(1, self.total_time):
            self.network.update_voltage(t)
Esempio n. 16
0
 def __init__(self, network = None, wxId = wx.ID_ANY, populateDisplay = True):
     if network is None:
         title = Network().name()
     else:
         title = network.name()
     wx.Frame.__init__(self, None, wxId, title, size = (800, 600), style = wx.DEFAULT_FRAME_STYLE | wx.FULL_REPAINT_ON_RESIZE)
     
     self.Bind(wx.EVT_ACTIVATE, self.onActivate)
     self.Bind(wx.EVT_UPDATE_UI, self.onUpdateUI)
     self.Bind(wx.EVT_CLOSE, self.onClose)
     
     self.splitter = wx.SplitterWindow(self, wx.ID_ANY, style = wx.SP_LIVE_UPDATE)
     self.splitter.SetMinimumPaneSize(20)
     
     self._modified = False
     
     self.display = display.display.Display(self.splitter)
     self.display.autoVisualize = populateDisplay
     self.display.setNetwork(network)
     dispatcher.connect(self.networkDidChange, ('set', 'network'), self.display)
     dispatcher.connect(self.networkDidChangeSavePath, ('set', 'savePath'), network)
     dispatcher.connect(self.displayDidChange, dispatcher.Any, self.display)
     
     self._scriptLocals = self.scriptLocals()
     self._console = wx.py.shell.Shell(self.splitter, wx.ID_ANY, locals = self._scriptLocals, introText = gettext('Welcome to Neuroptikon.'))
     self._console.autoCompleteIncludeSingle = False
     self._console.autoCompleteIncludeDouble = False
     
     self.splitter.SplitHorizontally(self.display, self._console)
     self.splitter.SetSashGravity(1.0)
     
     sizer = wx.BoxSizer(wx.VERTICAL)
     sizer.Add(self.splitter, 1, wx.EXPAND)
     self.SetAutoLayout(True)
     self.SetSizer(sizer)
     
     self.finder = None
     
     self._progressNestingLevel = 0
     self._progressDialog = None
     self._progressDisplayTime = None
     self._progressMessage = None
     self._progressFractionComplete = None
     self._progressShouldContinue = True
     self._progressLastUpdate = datetime.datetime.now()
     self._progressUpdateDelta = datetime.timedelta(0, 0, 100000)    # Don't update the GUI more than 10 times a second.
     
     self.layoutClasses = {}
     
     self.SetMenuBar(self.menuBar())
     self.SetToolBar(self.toolBar())
     
     dispatcher.connect(self.onDisplayChangedHighlightOnlyWithinSelection, ('set', 'highlightOnlyWithinSelection'), self.display)
     
     if platform.system() == 'Darwin':
         # Have new windows cascade so they don't sit right on top of each other.
         carbon.RepositionWindow(self.MacGetTopLevelWindowRef(), 0, 4)   # 4 = kWindowCascadeOnMainScreen
     
     self.Show(1)
     self.splitter.SetSashPosition(-100)
Esempio n. 17
0
    def _evaluate_train(self):
        self.keep_prob = 0.75
        self.is_training = True
        self.net = Network(self.is_training)
        self.ten_accuracy = []
        self.epoch_accuracy = []
        self.train_writer = tf.summary.FileWriter(self.train_logs_path, graph=self.net.graph)
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.per_process_gpu_memory_fraction = 0.4
        with tf.Session(config=config,graph=self.net.graph) as sess:
            with tf.device("/device:GPU:0"):
                self._restore_checkpoint_or_init(sess)

                step_num = 1
                max_steps = FLAGS.epoch * 100
                while step_num <= max_steps:
                    if step_num % 10 == 0:
                        gs, acc = self._train_step(sess,
                                                   tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),
                                                   tf.RunMetadata())
                        self._add_accuracy(step_num, gs, acc)
                        save_path = self.net.saver.save(sess, self.chkpt_file)
                        print("Model saved in file: %s" % save_path)
                        if step_num % 100 == 0:
                            self._evaluate_test()
                    else:
                        gs, acc = self._train_step(sess)
                        print("acc is ",acc)
                        self._add_accuracy(step_num, gs, acc)
                    step_num += 1
Esempio n. 18
0
    def _evaluate_train(self):
        self.keep_prob = 0.75
        self.is_training = True
        self.net = Network(self.is_training)
        self.ten_accuracy = []
        self.epoch_accuracy = []
        self.train_writer = tf.summary.FileWriter(self.train_logs_path,
                                                  graph=self.net.graph)

        with tf.Session(graph=self.net.graph) as sess:
            self._restore_checkpoint_or_init(sess)

            step_num = 1
            max_steps = FLAGS.epoch * 100
            while step_num <= max_steps:
                if step_num % 10 == 0:
                    gs, acc = self._train_step(
                        sess,
                        tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),
                        tf.RunMetadata())
                    self._add_accuracy(step_num, gs, acc)
                    save_path = self.net.saver.save(sess, self.chkpt_file)
                    print("Model saved in file: %s" % save_path)
                    if step_num % 100 == 0:
                        self._evaluate_test()
                else:
                    gs, acc = self._train_step(sess)
                    self._add_accuracy(step_num, gs, acc)
                step_num += 1
Esempio n. 19
0
    def _evaluate_train(self):
        self.keep_prob = 0.75
        self.is_training = True
        self.net = Network(self.is_training)
        self.train_writer = tf.summary.FileWriter(self.train_logs_path,
                                                  graph=self.net.graph)

        with tf.device("/gpu:0"):
            config = tf.ConfigProto()
            config.gpu_options.per_process_gpu_memory_fraction = 0.8
            # config.gpu_options.allow_growth = True

            with tf.Session(graph=self.net.graph, config=config) as sess:

                self._restore_checkpoint_or_init(sess)

                step_num = 1
                max_steps = FLAGS.max_steps
                while step_num <= max_steps:
                    if step_num % 1000 == 0:
                        gs = self._train_step(
                            sess,
                            tf.RunOptions(
                                trace_level=tf.RunOptions.FULL_TRACE),
                            tf.RunMetadata())
                        save_path = self.net.saver.save(
                            sess,
                            self.logs_dir + "/model.ckpt",
                            global_step=self.net.global_step)
                        print("Model saved in file: %s" % save_path)

                    else:
                        gs = self._train_step(sess)
                    step_num += 1
Esempio n. 20
0
def test_rnn():
    input_size = 1
    output_size = 1
    T = 10
    n_batches = 5
    batch_size = 5
    cell = RNNLayer(input_size, output_size, batch_size)
    x = tf.placeholder(tf.float32, [batch_size, T, input_size])
    y = tf.placeholder(tf.float32, [batch_size, T, output_size])
    network = Network(cell, x)
    x_ = np.random.randn(batch_size, T, input_size)
    y_ = 2*x_ + 1
    optimizer = tf.train.GradientDescentOptimizer(1e-4)
    loss = lambda a, b: tf.reduce_mean(tf.pow(a - b, 2))
    #loss = tf.reduce_mean(tf.pow(network.output()[0] - y, 2))
    network.compile(loss, optimizer)
    losses = network.train(x_, y_, batch_size=batch_size, verbose=False)
Esempio n. 21
0
 def start_evolution(self, nr_of_genotypes):
     self.genotypes = []
     temp_network = Network(self.topology)
     for i in range(nr_of_genotypes):
         new_genotype = Genotype([0] * temp_network.weight_count)
         new_genotype.set_rand_params(-5, 5)
         self.genotypes.append(new_genotype)
     self.start_evaluation()
Esempio n. 22
0
    def build(configuration):
        parameters = ParametersFactory.build(configuration.layers)
        forward_propagator = ForwardPropagator(configuration.layers)
        trainer = TrainerFactory.build(forward_propagator, configuration.input,
                                       configuration.output,
                                       configuration.iterations,
                                       configuration.learning_rate)

        return Network(forward_propagator, parameters, trainer)
Esempio n. 23
0
 def __init__(self, stop_event, config: Config):
     threading.Thread.__init__(self)
     self.config = config
     self._stop_event = stop_event
     self._scheduler = sched.scheduler(time.time, time.sleep)
     self._network = Network(config)
     self._scheduler.enter(self.get_scheduler_time(),
                           self.get_scheduler_priority(), self.update)
     self._scheduler.run()
    def generate(self):
        """Generates a neural network with the given structure specified.

        :return: New neural network with specfied structure and random weights
        """
        self.networks_created += 1
        return Network(self.num_layers, self.num_inputs, self.num_neurons,
                       self.num_outputs, self.activation,
                       self.networks_created - 1)
Esempio n. 25
0
class Agent:
    def __init__(self, genotype, topology):
        self.genotype = genotype
        self.genotype.eval = []
        self.output = []
        self.network = Network(topology)
        self.network.set_weights(genotype.params)

    def __lt__(self, other):
        return self.genotype.fitness < other.genotype.fitness

    def evaluate(self, inputs, correct):
        network_answer = self.network.process_input(inputs)
        self.output = network_answer
        evaluation = Agent.eval_diff(network_answer, correct)
        self.genotype.eval.append(evaluation)
        return evaluation

    @staticmethod
    def eval_diff(answer, correct):
        diff_sum = 0
        for i in range(len(answer)):
            diff_sum = diff_sum + (answer[i] - correct[i])**2
        return diff_sum

    @staticmethod
    def eval_nr_incorrect(answer, correct):
        incorrect = 0
        for i in range(len(answer)):
            diff = correct[i] - answer[i]
            if abs(diff) > 0.01:
                incorrect = incorrect + 1
        return incorrect

    @staticmethod
    def eval_progress(answer, correct):
        sum_percent = 0
        for i in range(len(answer)):
            if correct[i] is 1:
                sum_percent = sum_percent + answer[i]
            else:
                sum_percent = sum_percent + (1 - answer[i])
        return sum_percent / len(answer)
Esempio n. 26
0
        def test_correct_shape(T):
            encoder = StateEncoder(T)
            prev_state = encoder.get_empty_state()
            curr_state = encoder.encode_state(chess.Board(), prev_state)
            curr_state = curr_state.unsqueeze(0)

            net = Network(M * T + L)
            out = net(curr_state)

            self.assertEqual(out[0].shape, torch.Size([1]))
            self.assertEqual(out[1].shape, torch.Size((1, 8, 8, 73)))
Esempio n. 27
0
def build_network(*args, input_dim=3, p0_z=None, z_dim=128, beta=None, skip_connection=True, variational=False,
                  use_kl=False, geo_initial=True):
    net = Network(*args, input_dim=input_dim, p0_z=p0_z, z_dim=z_dim, beta=beta, skip_connection=skip_connection,
                  variational=variational, use_kl=use_kl)
    if geo_initial:
        print("Perform geometric initialization!\n")
        for k, v in net.named_parameters():
            if 'encoder' in k:
                pass
            else:
                if 'weight' in k:
                    std = np.sqrt(2) / np.sqrt(v.shape[0])
                    nn.init.normal_(v, 0.0, std)
                if 'bias' in k:
                    nn.init.constant_(v, 0)
                if 'l_out.weight' in k:
                    std = np.sqrt(np.pi) / np.sqrt(v.shape[1])
                    nn.init.constant_(v, std)
                if 'l_out.bias' in k:
                    nn.init.constant_(v, -0.5)
    return net
Esempio n. 28
0
 def testRoundForNode_part1(self):
     adjacencyMatrix = [[0, 1, 1, 1],
                        [1, 0, 1, 1],
                        [1, 1, 0, 1],
                        [1, 1, 1, 0]]
     
     # tworzenie sieci
     network = Network(adjacencyMatrix)
     
     alg = LogStar(network)
     nodes = network.ndList
     
     # 1 - sprawdzenie czy inicjalizacja sie powiodla
     
     network.algorithm = alg
     
     for node in nodes:
         alg.initiateNode(node)
         
     for node in nodes:
         self.failUnless(node.memory['state'] == 'competitor')
         self.failUnless(node.memory['activity'] == 'initiate loop')
Esempio n. 29
0
def main():
    n = Network()
    l1 = Layer(4)
    l2 = Layer(1)
    c = Connection(l1, l2)
    n.addHiddenLayer(l1)
    n.addHiddenLayer(l2)
    n.addConnection(c)
Esempio n. 30
0
def load_pretrained_weights(args):
    model = Network(args.classes)
    model_path = args.model_path.replace('RealWorld', args.domain)
    pre = torch.load(args.model_path)
    new_pre = OrderedDict()
    for p in pre:
        if ('classifier' in p):
            # print('----', p)
            continue
        else:
            new_pre[p] = pre[p]
    model.load_state_dict(new_pre, strict=False)

    for name, p in model.state_dict().items():
        if ('classifier' in name):
            continue
        else:
            p.requires_grad = False

    torch.nn.init.xavier_uniform_(model.classifier.fc8.weight)
    del new_pre

    return model
Esempio n. 31
0
def task_1():
    network = Network(1, [12], [0])
    population = network.populations[0]
    population.add_layer(10)
    population.add_layer(2)
    population.connect_layer_fully()
    population.synapse.target_neurons = [population.neurons[10]]
    population.learn_method = "rstdp"
    for neuron in population.neurons:
        neuron.set_current(0, TOTAL_TIME, 0)
    encode(population, [1, 0, 1, 1, 1, 0, 0, 0, 0, 0],
           [0, 0, 0, 0, 0, 0, 1, 1, 1, 0])
    pre_synaptic_neurons, post_synaptic_neurons, weights = initialize_weight(
        population)

    for t in range(TOTAL_TIME):
        network.update_voltage(t)
        update_weight(pre_synaptic_neurons, post_synaptic_neurons, weights,
                      population)
        if t == TOTAL_TIME // 2:
            population.synapse.target_neurons = [population.neurons[11]]
    show_weight_plot(population, pre_synaptic_neurons, post_synaptic_neurons,
                     weights)
Esempio n. 32
0
def solve(train, test):
    net = Network(LAYERS, ACTIVATION, ACTIVATION_PRIME)

    net.gradient_descent(train, EPOCHS, BATCH_SIZE, L_RATE, EPOCH_FEEDBACK,
                         test)

    print("FINAL COST: {0}".format(net.evaluate(test)))

    print_result("files/result.txt", str(net.evaluate(test)))
Esempio n. 33
0
def task_3():
    network = Network(1, [12], [1])
    population = network.populations[0]
    population.add_layer(10)
    population.add_layer(3)
    population.connect_layer_fully()
    neuron_10 = population.neurons[10]
    neuron_11 = population.neurons[11]
    neuron_12 = population.neurons[12]
    population.synapse.connect(neuron_12, neuron_10)
    population.synapse.connect(neuron_12, neuron_11)
    for neuron in population.neurons:
        neuron.set_current(0, TOTAL_TIME, 0)
    encode(population, [1, 0, 1, 1, 1, 0, 0, 0, 0, 0])
    for t in range(TOTAL_TIME):
        network.update_voltage(t)
    for neuron in population.neurons:
        if neuron in population.synapse.adjacency.keys():
            for post_synaptic_neuron in population.synapse.adjacency[
                    neuron].keys():
                print(
                    population.synapse.adjacency[neuron][post_synaptic_neuron],
                    end=" ")
            print()
Esempio n. 34
0
File: test.py Progetto: johntsr/cMix
    def setUpClass(cls):
        # create a cMix mixnet of 3 mix-nodes and 10 users
        cls.b = 5
        cls.nodes = 3
        cls.usersNum = 10

        cls.network = Network()
        cls.network.setNetworkHandler(NetworkHandler(cls.b))
        for _ in range(0, cls.nodes):
            cls.network.addMixNode(MixNode(cls.b))

        cls.network.init()

        cls.users = [User("user") for _ in range(0, cls.usersNum)]
        for user in cls.users:
            cls.network.addUser(user)
    def openNetwork(self, filePath):
        """
        Open the previously saved network at the indicated path.
        
        Any displays will be restored or a default one will be opened.
        """

        from neuroptikon_frame import NeuroptikonFrame

        # TODO: raise an existing window if the network is already open

        try:
            xmlTree = ElementTree.parse(filePath)

            # Instantiate the network
            networkElement = xmlTree.find("Network")
            network = Network._fromXMLElement(networkElement)
            if network is None:
                raise ValueError, gettext("Could not load the network")
            network.setSavePath(filePath)
            network.setModified(False)
            self._networks.add(network)

            # Instantiate any displays
            for frameElement in xmlTree.findall("DisplayWindow"):
                frame = NeuroptikonFrame._fromXMLElement(frameElement, network=network)
                if frame is None:
                    raise ValueError, gettext("Could not create one of the displays")
                frame.Show(True)
                frame.Raise()
                self._frames.append(frame)

            # Create a default display if none were specified in the file.
            if len(network.displays) == 0:
                self.displayNetwork(network).zoomToFit()
        except:
            raise





from algorithm.MIS_for_planar_graphs import PlanarMIS
from network.network import Network
from graph_generators.n_unit_disk_graph import getUnitDiskGraph

from graph_tool.all import *

adjacencyMatrix = getUnitDiskGraph(10)

# tworzenie sieci
network = Network(adjacencyMatrix)

alg = PlanarMIS()

network.algorithm = alg
alg.network = network

nodes = network.ndList

alg.divideIntoSubgraphs()

# czesc rysujaca
g = Graph(directed=False)

# dodawanie wezlow - indeksowanie bedzie sie zgadzalo
for node in nodes:
Esempio n. 37
0
import importlib
agentModule = importlib.import_module(agentModuleName)
cascadeModule = importlib.import_module(cascadeModuleName)

# initialize the network and cascade (diffusion) process model
cascade = cascadeModule.CascadeModel(Config)

avetime = 0
aveutility = 0
numSamples = 10

for s in range(numSamples):
    random.seed(s)

    nw = Network(Config,cascade,random)
    
    # initialize agent
    agent =  agentModule.MyAgent(0,Config)
    
    # run simulations
    budget = int(ConfigSectionMap(Config, "AgentParameters")["budget"])
    
    start = time.time()
    for t in range(0,timeSteps):
        selected = EnforceSelectionFeasibility(int(math.floor(budget)), agent.selectNodes(copy.deepcopy(nw),t))
        adopters = nw.update(selected)
        
        # subtract selection from remaining budget
        budget -= len(selected)
        budget *= 1+interest
Esempio n. 38
0
from algorithm.log_star_MIS import LogStar
from network.network import Network
from graph_tool.all import *
from graph_generators.n_unit_disk_graph import getUnitDiskGraph

n = 30
adjacencyMatrix = getUnitDiskGraph(n)

# tworzenie sieci
network = Network(adjacencyMatrix)

alg = LogStar()

network.algorithm = alg
alg.network = network

nodes = network.ndList

for node in nodes:
    alg.initiateNode(node)

network.executeAlgorithm()

# czesc rysujaca
g = Graph(directed=False)

# dodawanie wezlow - indeksowanie bedzie sie zgadzalo
for node in nodes:
    g.add_vertex()

# dodawanie krawedzi - tu trzeba isc po macierzy incydencji