Пример #1
0
class Webserver(object):

    def __init__(self):
        cherrypy.engine.subscribe('stop', self.stop)
        self.net = Network(WEBSERVER_IP, WEBSERVER_PORT)
        # spusti se pri zapinani


    def stop(self):
        # spusti se pri vypinani
        pass

    @cherrypy.expose
    def index(self):
        # exposed metoda -- zavolali jsme /
        return base.render()

    def doit(self, action, value):
        # spustit ledky
        data = dict()
        data["key"] = action
        data["value"] = value
        self.net.send(data, SERVER_IP, SERVER_PORT)

    @cherrypy.expose
    def set(self, value):
        self.doit("set", value)
        return self.index()

    @cherrypy.expose
    def get(self, value):
        self.doit("get", value)
        tmp = self.net.recv()
        # should be recv
        return tmp
Пример #2
0
def create_system(options, full_system, system, piobus=None, dma_ports=[]):

    system.ruby = RubySystem()
    ruby = system.ruby

    # Create the network object
    (network, IntLinkClass, ExtLinkClass, RouterClass, InterfaceClass) = Network.create_network(options, ruby)
    ruby.network = network

    protocol = buildEnv["PROTOCOL"]
    exec "import %s" % protocol
    try:
        (cpu_sequencers, dir_cntrls, topology) = eval(
            "%s.create_system(options, full_system, system, dma_ports,\
                                    ruby)"
            % protocol
        )
    except:
        print "Error: could not create sytem for ruby protocol %s" % protocol
        raise

    # Create the network topology
    topology.makeTopology(options, network, IntLinkClass, ExtLinkClass, RouterClass)

    # Initialize network based on topology
    Network.init_network(options, network, InterfaceClass)

    # Create a port proxy for connecting the system port. This is
    # independent of the protocol and kept in the protocol-agnostic
    # part (i.e. here).
    sys_port_proxy = RubyPortProxy(ruby_system=ruby)
    if piobus is not None:
        sys_port_proxy.pio_master_port = piobus.slave

    # Give the system port proxy a SimObject parent without creating a
    # full-fledged controller
    system.sys_port_proxy = sys_port_proxy

    # Connect the system port for loading of binaries etc
    system.system_port = system.sys_port_proxy.slave

    setup_memory_controllers(system, ruby, dir_cntrls, options)

    # Connect the cpu sequencers and the piobus
    if piobus != None:
        for cpu_seq in cpu_sequencers:
            cpu_seq.pio_master_port = piobus.slave
            cpu_seq.mem_master_port = piobus.slave

            if buildEnv["TARGET_ISA"] == "x86":
                cpu_seq.pio_slave_port = piobus.master

    ruby.number_of_virtual_networks = ruby.network.number_of_virtual_networks
    ruby._cpu_ports = cpu_sequencers
    ruby.num_of_sequencers = len(cpu_sequencers)

    # Create a backing copy of physical memory in case required
    if options.access_backing_store:
        ruby.access_backing_store = True
        ruby.phys_mem = SimpleMemory(range=system.mem_ranges[0], in_addr_map=False)
Пример #3
0
 def set_response(self):
     if self.from_iso.is_network_request():
         Network.set_response(self)
     elif self.from_iso.is_transaction_request():
         self.set_transaction_response()
     elif self.from_iso.is_reversal_request():
         self.set_reversal_response()
Пример #4
0
    def breed(self, mother, father):
        """Make two children as parts of their parents.

        Args:
            mother (dict): Network parameters
            father (dict): Network parameters

        Returns:
            (list): Two network objects

        """
        children = []
        for _ in range(2):

            child = {}

            child['activation'] = random.choice([mother.network['activation'], father.network['activation']])
            child['optimizer'] = random.choice([mother.network['optimizer'], father.network['optimizer']])
            child['epochs'] = random.choice([mother.network['epochs'], father.network['epochs']])
            layer_parent = random.choice([mother, father])
            child['nb_layers'] = layer_parent.network['nb_layers']
            child['nb_neurons'] = []
            child['nb_neurons'].extend(layer_parent.network['nb_neurons'])


            # Now create a network object.
            network = Network(self.nn_param_choices)
            network.create_set(child)

            if self.mutate_chance > random.random():
                network = self.mutate(network)

            children.append(self.mutate(network))

        return children
Пример #5
0
def train(job_id, border, n_hidden_layer, eta):
    print "Job ID: %d" % job_id
    metric_recorder = MetricRecorder(config_dir_path='.', job_id=job_id)
    C = {
        'X_dirpath' : '../../../data/train/*',
        'y_dirpath' : '../../../data/train_cleaned/',
        'mini_batch_size' : 500,
        'batchsize' : 500000,
        'limit' : 30,
        'epochs' : 100,
        'patience' : 20000,
        'patience_increase' : 2,
        'improvement_threshold' : 0.995,
        'validation_frequency' : 5000,
        'lmbda' : 0.0,
        'training_size' : None,
        'validation_size' : None,
        'algorithm' : 'RMSProp'
    }

    training_data = BatchProcessor(
        X_dirpath='../../../data/train/*',
        y_dirpath='../../../data/train_cleaned/',
        batchsize=C['batchsize'],
        border=border,
        limit=C['limit'],
        dtype=theano.config.floatX)

    validation_data = BatchProcessor(
        X_dirpath='../../../data/valid/*',
        y_dirpath='../../../data/train_cleaned/',
        batchsize=C['batchsize'],
        border=border,
        limit=C['limit'],
        dtype=theano.config.floatX)

    C['training_size'] = len(training_data)
    C['validation_size'] = len(validation_data)
    print "Training size: %d" % C['training_size']
    print "Validation size: %d" % C['validation_size']

    metric_recorder.add_experiment_metainfo(constants=C)
    metric_recorder.start()

    n_in = (2*border+1)**2
    net = Network([FullyConnectedLayer(n_in=n_in, n_out=n_hidden_layer),
                   FullyConnectedLayer(n_in=n_hidden_layer, n_out=1)],
                  C['mini_batch_size'])

    result = net.train(tdata=training_data, epochs=C['epochs'],
                     mbs=C['mini_batch_size'], eta=eta,
                     vdata=validation_data, lmbda=C['lmbda'],
                     momentum=None, patience_increase=C['patience_increase'],
                     improvement_threshold=C['improvement_threshold'],
                     validation_frequency=C['validation_frequency'],
                     metric_recorder=metric_recorder)

    print 'Time = %f' % metric_recorder.stop()
    print 'Result = %f' % result
    return float(result)
Пример #6
0
def trainnetwork():
     
     training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
     
     network= Network([1200,30,10])
     network.SGD(training_data,30,10,.5,test_data=test_data)
     network.save("network.txt")
def get_sample_from_network_file(network_file):
    """Given a network_file path, it creates the sample value as its id

    1: 23.6438
    2: 23.4968
    ...

    >>> print get_sample_from_network_file('/Users/smcho/code/PyCharmProjects/contextAggregator/test_files/data/10_100_10_10/tree/tree10_10_2_0.txt')
    0: 0
    1: 1
    2: 2
    3: 3
    4: 4
    5: 5
    6: 6
    7: 7
    8: 8
    9: 9
    """
    if not os.path.exists(network_file):
        raise RuntimeError("No file %s exists" % network_file)
    n = Network(network_file)
    ids = n.get_host_ids()
    result = []
    for id in ids[0:-1]:
        result.append("%d: %d\n" % (id, id))
    result.append("%d: %d" % (ids[-1], ids[-1]))
    return "".join(result)
def generate_network_list(nn_param_choices):
    """Generate a list of all possible networks.

    Args:
        nn_param_choices (dict): The parameter choices

    Returns:
        networks (list): A list of network objects

    """
    networks = []

    # This is silly.
    for nbn in nn_param_choices['nb_neurons']:
        for nbl in nn_param_choices['nb_layers']:
            for a in nn_param_choices['activation']:
                for o in nn_param_choices['optimizer']:

                    # Set the parameters.
                    network = {
                        'nb_neurons': nbn,
                        'nb_layers': nbl,
                        'activation': a,
                        'optimizer': o,
                    }

                    # Instantiate a network object with set parameters.
                    network_obj = Network()
                    network_obj.create_set(network)

                    networks.append(network_obj)

    return networks
Пример #9
0
def train_mnist_worker(params):
    net_id = params.get('net-id', 'nn')
    layers = [784]
    layers.extend([int(i) for i in params.get('layers', [15])])
    layers.append(10)
    net_params                    = {}
    net_params['epochs']          = int(params.get('epochs', 1))
    net_params['mini_batch_size'] = int(params.get('mini-batch-size', 4))
    net_params['eta']             = float(params.get('eta', 0.1))
    net_params['lmbda']           = float(params.get('lmbda', 0.0001))
    net_params['layers']          = layers

    redis.set(redis_key('params', net_id), json.dumps(net_params))
    redis.set(redis_key('status', net_id), 'train_mnist: started')

    net = Network(layers)
    training_data, validation_data, test_data = load_data_wrapper()
    redis.set(redis_key('status', net_id), 'train_mnist: training with mnist data')
    net.SGD(training_data, net_params['epochs'],
                           net_params['mini_batch_size'],
                           net_params['eta'],
                           net_params['lmbda'])

    redis.set(redis_key('data', net_id), net.tostring())
    redis.set(redis_key('status', net_id), 'train_mnist: trained')
Пример #10
0
    def __init__(self, scen_inst) :
        Network.__init__(self, scen_inst)

        ''' For each node in the scenario '''
        for node in self._scen_inst.get_nodes() :
            ''' Add a wireless interface to the node (eth0) '''
            node.get('interfaces')['eth0'] = Model(type='wireless', range=20, ssid='wlan0')
Пример #11
0
class Aging(object):
    '''
    classdocs
    '''

    def __init__(self, run_num, cyclic_data):
        '''
        Aging class manages network through cycles.
        '''
        # The network of this aging instance.
        self.network = Network(START_NUM_AGENT, cyclic_data)
        # The instance that stores the cycle values for a network for all cycles.
        # The array that keeps the number of agents to be added to the network for each cycle. 
        # The number of agents is determined randomly by poisson distribution.
        self.agent_entry_array = random.poisson(LAMBDA_POISSON, NUMBER_OF_CYCLES + 1)
        self.run_cycles(run_num, cyclic_data)
        
    def run_cycles(self, run_num, cyclic_data):
        for cycle in range(1, NUMBER_OF_CYCLES + 1):
            cyclic_data.set_cycle(cycle)
            self.network.plot_map(run_num, cycle - 1)
            print "============== cycle =", cycle,", run =", str(run_num),"=================\r\r"
            self.network.calculate_network()
#            self.network.manage_exit()
            number_entering_agents = self.agent_entry_array[cycle]
            self.network.manage_breakthrough(number_entering_agents)
#            print "POISSON = ", number_entering_agents
#            self.network.manage_entry(number_entering_agents, cycle)
            self.network.reset()
        self.network.plot_map(run_num, cycle)
Пример #12
0
 def test_train(self):
     border = 2
     mbs = 500
     n_in = (2*border+1)**2
     tdata = BatchProcessor(
       X_dirpath=config.data_dir_path + 'train/*',
       y_dirpath=config.data_dir_path + 'train_cleaned/',
       batchsize=5000, border=border,
       limit=1, dtype=theano.config.floatX,
       random=True, random_mode='fully',
       rnd=rnd)
     vdata = BatchProcessor(
       X_dirpath=config.data_dir_path + 'train/*',
       y_dirpath=config.data_dir_path + 'train_cleaned/',
       batchsize=5000, border=border,
       limit=1, dtype=theano.config.floatX,
       random=False, rnd=rnd)
     net = Network([
             FullyConnectedLayer(n_in=25, n_out=19, rnd=rnd),
             FullyConnectedLayer(n_in=19, n_out=1, rnd=rnd),
           ], mbs)
     cost = net.train(tdata=tdata, epochs=1, mbs=mbs, eta=0.1,
                      eta_min=0.01, vdata=vdata, lmbda=0.0,
                      momentum=0.95, patience_increase=2,
                      improvement_threshold=0.995,
                      validation_frequency=1, algorithm='rmsprop',
                      early_stoping=False)
     self.assertTrue(float(cost) < 1.0)
Пример #13
0
class NetworkServer(util.DaemonThread):

    def __init__(self, config):
        util.DaemonThread.__init__(self)
        self.debug = False
        self.config = config
        self.pipe = util.QueuePipe()
        self.network = Network(self.pipe, config)
        self.lock = threading.RLock()
        # each GUI is a client of the daemon
        self.clients = []
        self.request_id = 0
        self.requests = {}

    def add_client(self, client):
        for key in ['status', 'banner', 'updated', 'servers', 'interfaces']:
            value = self.network.get_status_value(key)
            client.response_queue.put({'method':'network.status', 'params':[key, value]})
        with self.lock:
            self.clients.append(client)
            print_error("new client:", len(self.clients))

    def remove_client(self, client):
        with self.lock:
            self.clients.remove(client)
            print_error("client quit:", len(self.clients))

    def send_request(self, client, request):
        with self.lock:
            self.request_id += 1
            self.requests[self.request_id] = (request['id'], client)
            request['id'] = self.request_id
        if self.debug:
            print_error("-->", request)
        self.pipe.send(request)

    def run(self):
        self.network.start()
        while self.is_running():
            try:
                response = self.pipe.get()
            except util.timeout:
                continue
            if self.debug:
                print_error("<--", response)
            response_id = response.get('id')
            if response_id:
                with self.lock:
                    client_id, client = self.requests.pop(response_id)
                response['id'] = client_id
                client.response_queue.put(response)
            else:
                # notification
                m = response.get('method')
                v = response.get('params')
                for client in self.clients:
                    if m == 'network.status' or v in client.subscriptions.get(m, []):
                        client.response_queue.put(response)
        self.network.stop()
        print_error("server exiting")
def run_simulation(network_dir, condition, test_sub_name, disconnection_rate=0.0, drop_rate=0.0, threshold=sys.maxint):
    """
    Network directory should contain network and sample files
    """
    test_name = os.path.basename(network_dir)
    print "%s - %s - %s disconnect(%4.2f) drop(%4.2f) threshold(%d)" % (network_dir, test_name, test_sub_name, disconnection_rate, drop_rate, threshold)
    network_file_path = os.path.join(network_dir, test_name + ".txt")
    assert os.path.exists(network_file_path), "No network file %s exists " % network_file_path
    sample_file_path = os.path.join(network_dir, test_name + ".sample.txt")
    assert os.path.exists(sample_file_path), "No network file %s exists " % sample_file_path

    network = Network(network_file_path)
    host_ids = network.get_host_ids() # [h0, h1, h2]
    hosts = []
    for h in host_ids:
        hosts.append(Host(h))
    neighbors = network.get_network() # {0:[1], 1:[0,2], 2:[1]}

    test_directory, sample = make_ready_for_test(network_dir=network_dir, test_name=test_name, condition=condition, test_sub_name=test_sub_name)

    if test_sub_name.startswith("single"):
        propagation_mode = ContextAggregator.SINGLE_ONLY_MODE
    else:
        propagation_mode = ContextAggregator.AGGREGATION_MODE

    config = {"hosts":hosts, "neighbors":neighbors,
              "test_directory":test_directory, "sample":sample,
              "disconnection_rate":disconnection_rate, "drop_rate":drop_rate,
              ContextAggregator.PM:propagation_mode,
              "threshold":threshold}
    simulation = AggregationSimulator.run(config=config)
    return simulation
def make_ready_for_test(network_dir, test_name, condition, test_sub_name):
    """

    >>> test_files_directory = get_test_files_directory()
    >>> result = make_ready_for_test(test_files_directory, "normal", "test1","aggregate")
    >>> len(result) == 2
    True
    """
    sample_name = get_sample_name(test_name)
    sample_file_path = os.path.join(network_dir, sample_name)
    # There should be sample files
    assert os.path.exists(sample_file_path), "No sample file at %s" % sample_file_path

    net_file_path = os.path.join(network_dir, test_name + ".txt")
    dot_file_path = net_file_path + ".dot"

    if os.path.exists(net_file_path):
        if not os.path.exists(dot_file_path):
            n = Network(net_file_path)
            dumb = n.dot_gen(dot_file_path)

    # get the target root file
    test_report_directory = network_dir + os.sep + condition
    test_report_sub_directory = test_report_directory + os.sep + test_sub_name
    if os.path.exists(test_report_sub_directory):
        shutil.rmtree(test_report_sub_directory)
    os.makedirs(test_report_sub_directory)

    sample = Sample()
    sample.read(sample_file_path)

    return test_report_sub_directory, sample
Пример #16
0
def define_options(parser):
    # By default, ruby uses the simple timing cpu
    parser.set_defaults(cpu_type="TimingSimpleCPU")

    parser.add_option("--ruby-clock", action="store", type="string",
                      default='2GHz',
                      help="Clock for blocks running at Ruby system's speed")

    parser.add_option("--access-backing-store", action="store_true", default=False,
                      help="Should ruby maintain a second copy of memory")

    # Options related to cache structure
    parser.add_option("--ports", action="store", type="int", default=4,
                      help="used of transitions per cycle which is a proxy \
                            for the number of ports.")

    # network options are in network/Network.py

    # ruby mapping options
    parser.add_option("--numa-high-bit", type="int", default=0,
                      help="high order address bit to use for numa mapping. " \
                           "0 = highest bit, not specified = lowest bit")

    parser.add_option("--recycle-latency", type="int", default=10,
                      help="Recycle latency for ruby controller input buffers")

    protocol = buildEnv['PROTOCOL']
    exec "import %s" % protocol
    eval("%s.define_options(parser)" % protocol)
    Network.define_options(parser)
    def breed(self, mother, father):
        """Make two children as parts of their parents.

        Args:
            mother (dict): Network parameters
            father (dict): Network parameters

        Returns:
            (list): Two network objects

        """
        children = []
        for _ in range(2):

            child = {}

            # Loop through the parameters and pick params for the kid.
            for param in self.nn_param_choices:
                child[param] = random.choice(
                    [mother.network[param], father.network[param]]
                )

            # Now create a network object.
            network = Network(self.nn_param_choices)
            network.create_set(child)

            # Randomly mutate some of the children.
            if self.mutate_chance > random.random():
                network = self.mutate(network)

            children.append(network)

        return children
Пример #18
0
    def test_secret(self, config):
        network = Network(config['server'], config['port'], config['secret'], config['verify_ssl'])

        response = network.get_ip('test_domain')
        res = response != 'connection_error' and response != 'wrong_secret'
        if not res:
            print "Could not verify secret."
        return res
Пример #19
0
    def test_send_message_correct_call_when_message_is_too_long(self):
        message_content = ""
        for i in range(256):
            message_content += 'm'
        assert len(message_content) == 256

        with pytest.raises(ValueError):
            Network.send_message(self.socket_mock, message_content)
Пример #20
0
def lambdas_to_plan(game, d):
    result = Network(game)
    for slot, term in d.items():
        term = parse_lambda(term)
        term = binarize_term(term)
        term = unfold_numbers(term)
        result.add_goal(Goal(term, slot))
    return result
Пример #21
0
    def test_send_text_correct_call(self, send_message_function_mock):
        message = 'a'
        for i in range(253):
            message += 'm'

        Network.send_text(self.socket_mock, message)

        send_message_function_mock.assert_has_calls([call(self.socket_mock, 'L' + message),
                                                     ])
Пример #22
0
def create_brain(network_params, fitness, evaluations):
    network = Network(NeuralBee.network_input_size(), network_params.hidden_neurons, network_params.outputs)
    
    def get_network_with(args):
        network.set_params(args)
        return network
    
    best_params = genetic(lambda x: fitness(get_network_with(x)), network.number_of_params())
    return get_network_with(best_params)
Пример #23
0
    def run(self, dryrun):
        ensure_sudo()

        icv = InteractiveConfigValidation()
        icv.run()
        config = icv.get()

        network = Network(config['server'], config['port'], config['secret'], config['verify_ssl'])
        resp = network.get_ips()

        if resp == 'connection_error':
            print 'connection error'
            sys.exit(2)

        domains = [ d.split(' ') for d in resp.split('\n')]

        updated_hosts = []
        PD_BEGIN = '### PRIVATE DOMAINS BEGIN ###'
        PD_END =   '### PRIVATE DOMAINS END ###'

        if isfile('/etc/hosts'):
            PD_NOTSTARTED = 1
            PD_STARTED = 2
            PD_ENDED = 3
            state = PD_NOTSTARTED
            with open('/etc/hosts') as f:
                for line in f:
                    line = line[:-1] # remove \n
                    if state == PD_NOTSTARTED:
                        assert line not in [PD_END]
                        if line == PD_BEGIN:
                            state = PD_STARTED
                        else:
                            updated_hosts.append(line)
                    elif state == PD_STARTED:
                        assert line not in [PD_BEGIN]
                        if line == PD_END:
                            state = PD_ENDED
                    elif state == PD_ENDED:
                        assert line not in [PD_BEGIN, PD_END]
                        updated_hosts.append(line)
        else:
            # hosts did not exists create new one
            pass
        updated_hosts.append(PD_BEGIN)
        for domain, ip in domains:
            updated_hosts.append('%s\t%s' % (ip, domain))
        updated_hosts.append(PD_END)
        # adding a newline at the end of file
        updated_hosts.append('')

        updated_hosts_string = '\n'.join(updated_hosts)
        if dryrun:
            print updated_hosts_string
        else:
            with open('/etc/hosts', "w+") as f:
                f.write(updated_hosts_string)
Пример #24
0
 def test_dont_lose_nodes(self):
     n1 = Node(1)
     n2 = Node(2)
     n1.add_arc(Arc(n2))
     nw = Network([n1])
     self.assertEqual(nw.find_node(2), n2)
     n3 = Node(3)
     n2.add_arc(Arc(n3))
     self.assertEqual(nw.find_node(3), n3)
Пример #25
0
class WorkclipMini(object):
  #===============================
  def __init__(self):
    self.clipboard = anyos.Clipboard()
    self.hotkey = anyos.Hotkey()
    louie.connect(self.on_hotkey, self.hotkey.event)
    self.network = Network('192.168.0.255', 45644)
    louie.connect(self.on_command, self.network.command)
    
    self.hotkey.add_bind('CLIPBOARD', '<control><alt>C')
    self.hotkey.add_bind('URL', '<control><alt>B')    
    
    chrome = os.path.join(os.environ.get('LOCALAPPDATA') or '', 'Google\\Chrome\\Application\\chrome.exe')
    extra_browsers = [chrome, 'chrome.exe']
    for browser in extra_browsers:
      if webbrowser._iscommand(browser):
        webbrowser.register(browser, None, webbrowser.BackgroundBrowser(browser), -1)
    
  #===============================
  def run(self):
    thread = threading.Thread(target=self.loop)
    thread.start()
    self.hotkey.loop()
  
  #===============================
  def on_command(self, command, **kwargs):
    command = command.split(' ', 1)
    
    if command[0] == 'CLIPBOARD':
      self.clipboard.text = command[1]
    elif command[0] == 'URL':
      webbrowser.open_new_tab(command[1])
      
  #===============================
  def on_hotkey(self, name, **kwargs):
    self.clipboard.copy_selected_text()
    text = self.clipboard.text
    
    if name == 'CLIPBOARD':
      for address in self.network.addresses:
        self.network.send(address, name, text)
    elif name == 'URL':
      urls = re.findall(r'(([a-zA-Z]+://)?(www.)?[^ ]+\.[^ \n\r]{2,})', text, re.IGNORECASE)
      if not urls:
        urls = [[text,],]
        
      for url in urls:
        for address in self.network.addresses:
          self.network.send(address, name, url[0])

  #===============================
  def loop(self):    
    reactor.listenUDP(45644, self.network)
    refresh = task.LoopingCall(self.network.refresh_list)
    refresh.start(30) # Refresh list at rate defined in config.py
    reactor.run(False)
Пример #26
0
    def create_network(self, color):
        """
        Creates a partition rebalance network for flows of a given color.

        The network looks like this:


        +-+
        |1|                                         prev_avb
        +++_____________________________
         |               |              \                 za
         v               v              v
        +-+             +-+            +-+
        |1|             |2|            |3|               avb
        +-+             +-+            +-+
                 x               x
        +-+             +-+            +-+
        |1|             |2|            |3|               rvb
        +-+             +-+            +-+
         |       x       |       x      |                 zr
         v               v              v
        +-+             +-+            +-+
        |1|             |2|            |3|          prev_rvb
        +-+             +-+            +-+

        prev_avb[i]: is the previous (or existing) active vbuckets of this color on node i
        avb[i]: will be the active vbuckets of this color on node i after the move
        rvb[i]: replica vbuckets of this color on node i
        prev_rvb[i]: the previous replica vbuckets of this color on node i
        za[i,j]: the number of active vbuckets of this color that move from node i to node j
                    as part of the solution
        zr[i,]: the number of replica vbuckets of this color that move from node i to node j

        prev_avb and prev_rvb are known and we're solving for avb, rvb, za and zr.

        No self-replication is captured by the fact that there are no links between nodes
        avb[i] and rvb[i].

        Costs of the za arcs are: za[i,j] = /  0    if i == j
                                            \  1    otherwise

        And similarly for the zr arcs. This captures the idea that we charge 1 to move one
        active vbucket to another node and 0 to leave it where it is.

        :param color:
        :param current_flow:
        :return:
        """
        if self.replica_count != 1:
            raise ValueError("can't create network with replica_count != 1")
        if not self._replica_networks:
            self.generate_replica_networks()
        network = Network()
        network.set_default_node_comparator(node_compare)
        self.add_to_network(color, network)
        return network
Пример #27
0
def start_network_thread(nxt_handler):
    running = True
    while running:
        try:
            network = Network(nxt_handler)
            running = network.run()
        except Exception as ex:
            logger.error(traceback.format_exc())
            logger.error("Restarting Network")
            time.sleep(1)
Пример #28
0
    def run(self):
        # parameters
        Sim.scheduler.reset()

        if "a" in self.debug:
            Sim.set_debug('AppHandler')
        if "t" in self.debug:
            Sim.set_debug('TCP')

        # setup network
        networkPlotter = Plotter('out/2-flows-simple')
        net = Network(config='networks/one-hop.txt',plotter=networkPlotter)
        net.loss(self.loss)

        # setup routes
        n1 = net.get_node('n1')
        n2 = net.get_node('n2')
        n1.add_forwarding_entry(address=n2.get_address('n1'),link=n1.links[0])
        n2.add_forwarding_entry(address=n1.get_address('n2'),link=n2.links[0])

        # setup transport
        t1 = Transport(n1)
        t2 = Transport(n2)

        # setup connection
        c1 = TCP(t1,n1.get_address('n2'),1,n2.get_address('n1'),1,AppHandler(inputfile=self.inputfile,identifier="c1"),window=self.window,type=self.type,window_size_plot=True,sequence_plot=True)
        c2 = TCP(t2,n2.get_address('n1'),1,n1.get_address('n2'),1,AppHandler(inputfile=self.inputfile,plot=True,identifier="c2"),window=self.window,type=self.type,receiver_flow_plot=True)
        
        c3 = TCP(t1,n1.get_address('n2'),2,n2.get_address('n1'),2,AppHandler(inputfile=self.inputfile,identifier="c3"),window=self.window,type=self.type,window_size_plot=True,sequence_plot=True)
        c4 = TCP(t2,n2.get_address('n1'),2,n1.get_address('n2'),2,AppHandler(inputfile=self.inputfile,plot=True,identifier="c4"),window=self.window,type=self.type,receiver_flow_plot=True)

        global tcps
        tcps = [c1, c2, c3, c4]

        global original_size
        f = open(self.inputfile, "rb")
        try:
            data = f.read(1000)
            while data != "":
                original_size += len(data)
                Sim.scheduler.add(delay=0, event=data, handler=c1.send)
                Sim.scheduler.add(delay=0, event=data, handler=c3.send)
                data = f.read(1000)
        finally:
            f.close()

        # run the simulation

        global decisecondEvent
        decisecondEvent = Sim.scheduler.add(delay=0.1, event=Sim, handler=self.decisecond)

        Sim.scheduler.run()

        networkPlotter.plot(self.sequencefile)
        plotter.plot(self.sequencefile);
Пример #29
0
 def _get_network_node(self, network_path):
     assert self._lock.locked()
     name = "local"
     if network_path != "00":
         name = "n" % network_path
     if self.has_child(name):
         return self.get_child(name)
     network_node = Network()
     network_node.configure({"parent": self, "name": name, "network_path": network_path})
     network_node.start()
     return network_node
Пример #30
0
 def test_to_string(self):
     net = Network([
       AutoencoderLayer(n_in=25, n_hidden=22, rnd=rnd),
       AutoencoderLayer(n_in=22, n_hidden=19,
                        corruption_level=0.1, p_dropout=0.1, rnd=rnd),
       FullyConnectedLayer(n_in=19, n_out=1, rnd=rnd),
     ], 200)
     layers = net.get_layer_string()
     dropouts = net.get_layer_dropout_string()
     test_s = "Ae[sgm](25, 22)-dAe[sgm, 0.100](22, 19)-FC(19, 1)"
     self.assertEqual(layers, test_s)
Пример #31
0
def create_system(options, full_system, system, piobus = None, dma_ports = [],
                  bootmem=None):

    system.ruby = RubySystem()
    ruby = system.ruby

    # Generate pseudo filesystem
    FileSystemConfig.config_filesystem(system, options)

    # Create the network object
    (network, IntLinkClass, ExtLinkClass, RouterClass, InterfaceClass) = \
        Network.create_network(options, ruby)
    ruby.network = network

    protocol = buildEnv['PROTOCOL']
    #print("zzzzzzzzzzzzzzzzzzzzzzzzzzzzz"+protocol)
    exec("from . import %s" % protocol)
    try:
        (cpu_sequencers, dir_cntrls, topology) = \
             eval("%s.create_system(options, full_system, system, dma_ports,\
                                    bootmem, ruby)"
                  % protocol)
    except:
        print("Error: could not create sytem for ruby protocol %s" % protocol)
        raise

    # Create the network topology
    topology.makeTopology(options, network, IntLinkClass, ExtLinkClass,
            RouterClass)

    # Register the topology elements with faux filesystem (SE mode only)
    if not full_system:
        topology.registerTopology(options)


    # Initialize network based on topology
    Network.init_network(options, network, InterfaceClass)

    # Create a port proxy for connecting the system port. This is
    # independent of the protocol and kept in the protocol-agnostic
    # part (i.e. here).
    sys_port_proxy = RubyPortProxy(ruby_system = ruby)
    if piobus is not None:
        sys_port_proxy.pio_master_port = piobus.slave

    # Give the system port proxy a SimObject parent without creating a
    # full-fledged controller
    system.sys_port_proxy = sys_port_proxy

    # Connect the system port for loading of binaries etc
    system.system_port = system.sys_port_proxy.slave

    setup_memory_controllers(system, ruby, dir_cntrls, options)

    # Connect the cpu sequencers and the piobus
    if piobus != None:
        for cpu_seq in cpu_sequencers:
            cpu_seq.pio_master_port = piobus.slave
            cpu_seq.mem_master_port = piobus.slave

            if buildEnv['TARGET_ISA'] == "x86":
                cpu_seq.pio_slave_port = piobus.master

    ruby.number_of_virtual_networks = ruby.network.number_of_virtual_networks
    ruby._cpu_ports = cpu_sequencers
    ruby.num_of_sequencers = len(cpu_sequencers)

    # Create a backing copy of physical memory in case required
    if options.access_backing_store:
        ruby.access_backing_store = True
        ruby.phys_mem = SimpleMemory(range=system.mem_ranges[0],
                                     in_addr_map=False)
Пример #32
0
    def __init__(self, config, *args, **kwargs):
        Gtk.ApplicationWindow.__init__(self, *args, **kwargs)
        self.set_default_size(950, 700)
        self.connect("delete-event", self.on_delete_event)

        # Get the settings from the config file
        self.config = config

        # Set up a list of buffer objects, holding data for every buffer
        self.buffers = BufferList()
        self.buffers.connect("bufferSwitched", self.on_buffer_switched)
        self.buffers.connect_after(
            "bufferSwitched", self.after_buffer_switched)

        # Set up GTK box
        box_horizontal = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL,
                                 spacing=0)
        self.add(box_horizontal)

        # Set up a headerbar
        self.headerbar = Gtk.HeaderBar()
        self.headerbar.set_has_subtitle(True)
        self.headerbar.set_title("Gtk-WeeChat")
        self.headerbar.set_subtitle("Not connected.")
        self.headerbar.set_show_close_button(True)
        self.set_titlebar(self.headerbar)

        # Add widget showing list of buffers
        box_horizontal.pack_start(
            self.buffers.treescrolledwindow, False, False, 0)
        sep = Gtk.Separator()
        box_horizontal.pack_start(sep, False, False, 0)

        # Add stack of buffers
        box_horizontal.pack_start(self.buffers.stack, True, True, 0)

        # Set up a menu
        menubutton = Gtk.MenuButton()
        icon = Gio.ThemedIcon(name="open-menu-symbolic")
        image = Gtk.Image.new_from_gicon(icon, Gtk.IconSize.BUTTON)
        menubutton.get_child().destroy()
        menubutton.add(image)
        menubutton.show_all()
        self.headerbar.pack_end(menubutton)
        menu = Gtk.Menu()
        menu.set_halign(Gtk.Align(3))
        menuitem_darkmode = Gtk.CheckMenuItem(label="Dark")
        menuitem_darkmode.connect("toggled", self.on_darkmode_toggled)
        menuitem_darkmode.show()
        menu.append(menuitem_darkmode)
        menu_sep = Gtk.SeparatorMenuItem()
        menu_sep.show()
        menu.append(menu_sep)
        self.menuitem_connect = Gtk.MenuItem(label="Connect")
        self.menuitem_connect.connect("activate", self.on_connect_clicked)
        self.menuitem_connect.show()
        menu.append(self.menuitem_connect)
        self.menuitem_disconnect = Gtk.MenuItem(label="Disconnect")
        self.menuitem_disconnect.connect(
            "activate", self.on_disconnect_clicked)
        self.menuitem_disconnect.set_sensitive(False)
        self.menuitem_disconnect.show()
        menu.append(self.menuitem_disconnect)
        menuitem_quit = Gtk.MenuItem(label="Quit")
        menuitem_quit.set_action_name("app.quit")
        menuitem_quit.show()
        menu.append(menuitem_quit)
        menubutton.set_popup(menu)

        # Make everything visible (All is hidden by default in GTK 3)
        self.show_all()

        # Set up the network module
        self.net = Network(self.config)
        self.net.connect("messageFromWeechat", self._network_weechat_msg)
        self.net.connect("connectionChanged", self._connection_changed)

        # Connect to connection settings signals
        CONNECTION_SETTINGS.connect("connect", self.on_settings_connect)

        # Set up actions
        action = Gio.SimpleAction.new("buffer_next", None)
        action.connect("activate", self.buffers.on_buffer_next)
        self.add_action(action)
        action = Gio.SimpleAction.new("buffer_prev", None)
        action.connect("activate", self.buffers.on_buffer_prev)
        self.add_action(action)
        action = Gio.SimpleAction.new("copy_to_clipboard", None)
        action.connect("activate", self.buffers.on_copy_to_clipboard)
        self.add_action(action)
        action = Gio.SimpleAction.new("buffer_expand", None)
        action.connect("activate", self.on_buffer_expand)
        self.add_action(action)
        action = Gio.SimpleAction.new("buffer_collapse", None)
        action.connect("activate", self.on_buffer_collapse)
        self.add_action(action)

        # Autoconnect if necessary
        if self.net.check_settings() is True and \
                            self.config["relay"]["autoconnect"] == "on":
            if self.net.connect_weechat() is False:
                print("Failed to connect.")
            else:
                self.menuitem_connect.set_sensitive(False)
                self.menuitem_disconnect.set_sensitive(True)
        else:
            CONNECTION_SETTINGS.display()

        # Enable darkmode if enabled before
        self.dark_fallback_provider = Gtk.CssProvider()
        self.dark_fallback_provider.load_from_path(
            "{}/dark_fallback.css".format(CONFIG_DIR))
        if STATE.get_dark():
            menuitem_darkmode.set_active(True)

        # Sync our local hotlist with the weechat server
        GLib.timeout_add_seconds(60, self.request_hotlist)
Пример #33
0
from network import Network

test_img_dir = "dataset/test"
test_segmented_img_dir = "segmented_img/test"
if not os.path.exists(test_segmented_img_dir):
    os.makedirs(test_segmented_img_dir)
model_dir = "model"
height = 136
width = 296
threshold = 0.5
batch_size = 64
shuffle_batch = True
use_cuda = torch.cuda.is_available()

# load iris model
iris_model = Network()
iris_model.load_state_dict(torch.load(os.path.join(model_dir, "iris.pth")))
iris_model.eval() # test mode

# load sclera model
sclera_model = Network()
sclera_model.load_state_dict(torch.load(os.path.join(model_dir, "sclera.pth")))
sclera_model.eval() # test mode

def get_dataloader(X):
    dataloader = DataLoader(dataset=TestDataLoader(X),
                            batch_size=batch_size,
                            shuffle=shuffle_batch)
    return dataloader

def load_dataset():
Пример #34
0
class Daemon(DaemonThread):
    def __init__(self, config, fd):
        DaemonThread.__init__(self)
        self.config = config
        if config.get('offline'):
            self.network = None
            self.fx = None
        else:
            self.network = Network(config)
            self.network.start()
            self.fx = FxThread(config, self.network)
            self.network.add_jobs([self.fx])

        self.gui = None
        self.wallets = {}
        # Setup JSONRPC server
        path = config.get_wallet_path()
        default_wallet = self.load_wallet(path)
        self.cmd_runner = Commands(self.config, default_wallet, self.network)
        self.init_server(config, fd)

    def init_server(self, config, fd):
        host = config.get('rpchost', '127.0.0.1')
        port = config.get('rpcport', 0)
        try:
            server = SimpleJSONRPCServer((host, port),
                                         logRequests=False,
                                         requestHandler=RequestHandler)
        except:
            self.print_error('Warning: cannot initialize RPC server on host',
                             host)
            self.server = None
            os.close(fd)
            return
        os.write(fd, repr((server.socket.getsockname(), time.time())))
        os.close(fd)
        server.timeout = 0.1
        for cmdname in known_commands:
            server.register_function(getattr(self.cmd_runner, cmdname),
                                     cmdname)
        server.register_function(self.run_cmdline, 'run_cmdline')
        server.register_function(self.ping, 'ping')
        server.register_function(self.run_daemon, 'daemon')
        server.register_function(self.run_gui, 'gui')
        self.server = server

    def ping(self):
        return True

    def run_daemon(self, config):
        sub = config.get('subcommand')
        assert sub in [None, 'start', 'stop', 'status']
        if sub in [None, 'start']:
            response = "Daemon already running"
        elif sub == 'status':
            if self.network:
                p = self.network.get_parameters()
                response = {
                    'path': self.network.config.path,
                    'server': p[0],
                    'blockchain_height': self.network.get_local_height(),
                    'server_height': self.network.get_server_height(),
                    'spv_nodes': len(self.network.get_interfaces()),
                    'connected': self.network.is_connected(),
                    'auto_connect': p[4],
                    'version': ELECTRUM_VERSION,
                    'wallets':
                    {k: w.is_up_to_date()
                     for k, w in self.wallets.items()},
                }
            else:
                response = "Daemon offline"
        elif sub == 'stop':
            self.stop()
            response = "Daemon stopped"
        return response

    def run_gui(self, config_options):
        config = SimpleConfig(config_options)
        if self.gui:
            if hasattr(self.gui, 'new_window'):
                path = config.get_wallet_path()
                self.gui.new_window(path, config.get('url'))
                response = "ok"
            else:
                response = "error: current GUI does not support multiple windows"
        else:
            response = "Error: Electrum is running in daemon mode. Please stop the daemon first."
        return response

    def load_wallet(self, path):
        # wizard will be launched if we return
        if path in self.wallets:
            wallet = self.wallets[path]
            return wallet
        storage = WalletStorage(path)
        if not storage.file_exists:
            return
        if storage.requires_split():
            return
        if storage.requires_upgrade():
            self.print_error('upgrading wallet format')
            storage.upgrade()
        if storage.get_action():
            return
        wallet = Wallet(storage)
        wallet.start_threads(self.network)
        self.wallets[path] = wallet
        return wallet

    def add_wallet(self, wallet):
        path = wallet.storage.path
        self.wallets[path] = wallet

    def stop_wallet(self, path):
        wallet = self.wallets.pop(path)
        wallet.stop_threads()

    def run_cmdline(self, config_options):
        config = SimpleConfig(config_options)
        cmdname = config.get('cmd')
        cmd = known_commands[cmdname]
        path = config.get_wallet_path()
        wallet = self.load_wallet(path) if cmd.requires_wallet else None
        # arguments passed to function
        args = map(lambda x: config.get(x), cmd.params)
        # decode json arguments
        args = map(json_decode, args)
        # options
        args += map(lambda x: config.get(x), cmd.options)
        cmd_runner = Commands(config,
                              wallet,
                              self.network,
                              password=config_options.get('password'),
                              new_password=config_options.get('new_password'))
        func = getattr(cmd_runner, cmd.name)
        result = func(*args)
        return result

    def run(self):
        while self.is_running():
            self.server.handle_request() if self.server else time.sleep(0.1)
        for k, wallet in self.wallets.items():
            wallet.stop_threads()
        if self.network:
            self.print_error("shutting down network")
            self.network.stop()
            self.network.join()
        self.on_stop()

    def stop(self):
        self.print_error("stopping, removing lockfile")
        remove_lockfile(get_lockfile(self.config))
        DaemonThread.stop(self)

    def init_gui(self, config, plugins):
        gui_name = config.get('gui', 'qt')
        if gui_name in ['lite', 'classic']:
            gui_name = 'qt'
        gui = __import__('electrum_rubycoin_gui.' + gui_name,
                         fromlist=['electrum_rubycoin_gui'])
        self.gui = gui.ElectrumGui(config, self, plugins)
        self.gui.main()
fig, axs = plt.subplots(3, 4, figsize=(12, 8))
fig.tight_layout(pad=5.0)
for j, size in enumerate(sizes):
    data = pd.read_csv(os.path.join(PATH, f"{datasets[0]}.train.{size}.csv"))
    data_test = pd.read_csv(os.path.join(PATH, f"{datasets[0]}.test.{size}.csv"))
    mesh = (
        np.mgrid[
            min(data.x) : max(data.x) : (max(data.x) - min(data.x)) / N,
            min(data.y) : max(data.y) : (max(data.y) - min(data.y)) / N,
        ]
        .reshape(2, -1)
        .T
    )
    cls = preprocessing.OneHotEncoder().fit_transform(data[["cls"]]).todense()
    MLP = Network(
        [2, 4, 8, cls.shape[1]], n_epochs=100, batch_size=10, activation_type="sigmoid"
    )
    MLP.train(data[["x", "y"]].to_numpy(), cls)
    pred_mesh = np.argmax(MLP.fit(mesh), axis=1)
    cls_test = preprocessing.OneHotEncoder().fit_transform(data_test[["cls"]]).todense()
    y_pred = np.argmax(MLP.fit(data_test[["x", "y"]].to_numpy()), axis=1)
    axs[0][j].scatter(mesh[:, 0], mesh[:, 1], s=0.5, c=pred_mesh)
    axs[0][j].scatter(data.x, data.y, c=data.cls, cmap="ocean", s=2)
    print(
        f"Accuracy of {datasets[0]} with {size} obs: ",
        sum((y_pred + 1) == data_test["cls"]) / len(y_pred),
    )
    axs[0][j].set_title(
        f'Accuracy: {round(sum((y_pred + 1) == data_test["cls"]) / len(y_pred)*100,1)}%, \nsize: {size}, \nnetwork: {[2, 4, 8, cls.shape[1]]}'
    )
for j, size in enumerate(sizes):
Пример #36
0
    # Open data file
    datafile = open('wine.data', 'r')
    datalines = datafile.readlines()
    shuffle(datalines)    

    # Transform string lists to inputs understandable by the network
    raw_dataset = getDataSet(datalines, 3)
    norm_data = [normalize_dataset(raw_dataset[0]), raw_dataset[1]]

    # Separate training set from testing set
    limit_index = int(len(datalines) * training_fraction)
    training_set = [norm_data[0][0:limit_index], norm_data[1][0:limit_index]]
    testing_set = [norm_data[0][limit_index:], norm_data[1][limit_index:]]

    # Create the network
    n = Network(13, [7,5,5,3])
    nEpochs = int(sys.argv[1])

    # Make the graph
    errores = []
    outputs_length = len(raw_dataset[1][0])
    precitions = []
    recalls = []
    for i in range(outputs_length):
        precitions.append([])
        recalls.append([])

    for i in range(nEpochs):
        error = n.epoch(training_set[0], training_set[1])
        errores.append(error)
Пример #37
0
def run():

    # create a blockchain
    chain = Chain()

    # create a network of nodes
    network = Network(NUM_NODES)
    network.gen_random_graph(MIN_DELAY, MAX_DELAY)

    # initialize set of nodes
    nodes = []
    for i in range(NUM_NODES):
        nodes.append(Node(chain, 1, network))

    # initialize node rates (computing power) and whether they are malicious
    rates = list([1 for i in range(NUM_NODES)])
    rates[0] = 10
    for i in range(NUM_NODES):
        nodes[i].rate = rates[i]

    # actions
    rand_nodes_order = list(range(NUM_NODES))
    prev_height = 1
    x = []
    y = []
    z = []
    while chain.time <= MAX_TIME:
        if (nodes[0].get_ds_block() != chain.blocks[0]):
            nodes[0].isBad = True

        # print(chain.time)
        # random.shuffle(rand_nodes_order)
        num_nodes_at_this_time = random.randint(NUM_NODES, NUM_NODES)

        for i in range(num_nodes_at_this_time):
            cur_node = nodes[rand_nodes_order[i]]
            # cur_node.update_visible_blocks()
            cur_node.make_blocks()
            # cur_node.commit_blocks()

        for i in range(NUM_NODES):
            nodes[i].update_visible_blocks()

        chain.inc_time()

        heads = []
        for node in nodes:
            hd = node.get_chain_heads()
            heads.extend([nn.ID for nn in hd])
        # print(Counter(heads).most_common(NUM_NODES))
        conv_num = converge_number(heads)
        if (chain.time > 6 and conv_num == NUM_NODES):
            return chain.time - 6
        # y.append(conv_num * 1.0 / NUM_NODES)
        # x.append(chain.time)
        # z.append(len([l for l in Counter(heads).most_common(NUM_NODES) if l[1] == conv_num]) / 1)

    while chain.time <= MAX_TIME + CATCH_UP_TIME:
        if (chain.time > 6 and conv_num == NUM_NODES):
            return chain.time - 6
        # print(chain.time)
        for i in range(NUM_NODES):
            nodes[i].update_visible_blocks()
        chain.inc_time()

        heads = []
        for node in nodes:
            hd = node.get_chain_heads()
            heads.extend([nn.ID for nn in hd])
        # print(Counter(heads).most_common(NUM_NODES))
        conv_num = converge_number(heads)
Пример #38
0
    def __init__(self,
                 config,
                 paths,
                 dataset,
                 name='gan_compression',
                 evaluate=False):
        # Build the computational graph

        print('Building computational graph ...')
        self.G_global_step = tf.Variable(0, trainable=False)
        self.D_global_step = tf.Variable(0, trainable=False)
        self.handle = tf.placeholder(tf.string, shape=[])
        self.training_phase = tf.placeholder(tf.bool)

        # >>> Data handling
        self.path_placeholder = tf.placeholder(paths.dtype, paths.shape)
        self.test_path_placeholder = tf.placeholder(paths.dtype)

        train_dataset = Data.load_dataset(self.path_placeholder,
                                          config.batch_size,
                                          augment=False,
                                          training_dataset=dataset)
        test_dataset = Data.load_dataset(self.test_path_placeholder,
                                         config.batch_size,
                                         augment=False,
                                         training_dataset=dataset,
                                         test=True)

        self.iterator = tf.data.Iterator.from_string_handle(
            self.handle, train_dataset.output_types,
            train_dataset.output_shapes)

        self.train_iterator = train_dataset.make_initializable_iterator()
        self.test_iterator = test_dataset.make_initializable_iterator()

        self.example = self.iterator.get_next()

        if config.multiscale:
            self.example_downscaled2 = tf.layers.average_pooling2d(
                self.example, pool_size=3, strides=1, padding='same')
            self.example_downscaled4 = tf.layers.average_pooling2d(
                self.example_downscaled2,
                pool_size=3,
                strides=1,
                padding='same')

        # Global generator: Encode -> quantize -> reconstruct
        # =======================================================================================================>>>
        with tf.variable_scope('generator'):
            self.feature_map = Network.encoder(self.example, config,
                                               self.training_phase,
                                               config.channel_bottleneck)
            self.w_hat = Network.quantizer(self.feature_map, config)

            if config.sample_noise is True:
                print('Sampling noise...')
                # noise_prior = tf.contrib.distributions.Uniform(-1., 1.)
                # self.noise_sample = noise_prior.sample([tf.shape(self.example)[0], config.noise_dim])
                noise_prior = tf.contrib.distributions.MultivariateNormalDiag(
                    loc=tf.zeros([config.noise_dim]),
                    scale_diag=tf.ones([config.noise_dim]))
                v = noise_prior.sample(tf.shape(self.example)[0])
                Gv = Network.dcgan_generator(v,
                                             config,
                                             self.training_phase,
                                             C=config.channel_bottleneck,
                                             upsample_dim=config.upsample_dim)
                self.z = tf.concat([self.w_hat, Gv], axis=-1)
            else:
                self.z = self.w_hat

            self.reconstruction = Network.decoder(self.z,
                                                  config,
                                                  self.training_phase,
                                                  C=config.channel_bottleneck)

        print('Real image shape:', self.example.get_shape().as_list())
        print('Reconstruction shape:',
              self.reconstruction.get_shape().as_list())

        # Pass generated, real images to discriminator
        # =======================================================================================================>>>
        if config.multiscale:
            D_x, D_x2, D_x4, *Dk_x = Network.multiscale_discriminator(
                self.example,
                self.example_downscaled2,
                self.example_downscaled4,
                self.reconstruction,
                config,
                self.training_phase,
                use_sigmoid=config.use_vanilla_GAN,
                mode='real')
            D_Gz, D_Gz2, D_Gz4, *Dk_Gz = Network.multiscale_discriminator(
                self.example,
                self.example_downscaled2,
                self.example_downscaled4,
                self.reconstruction,
                config,
                self.training_phase,
                use_sigmoid=config.use_vanilla_GAN,
                mode='reconstructed',
                reuse=True)
        else:
            D_x = Network.discriminator(self.example,
                                        config,
                                        self.training_phase,
                                        use_sigmoid=config.use_vanilla_GAN)
            D_Gz = Network.discriminator(self.reconstruction,
                                         config,
                                         self.training_phase,
                                         use_sigmoid=config.use_vanilla_GAN,
                                         reuse=True)

        # Loss terms
        # =======================================================================================================>>>
        if config.use_vanilla_GAN is True:
            # Minimize JS divergence
            D_loss_real = tf.reduce_mean(
                tf.nn.sparse_softmax_cross_entropy_with_logits(
                    logits=D_x, labels=tf.ones_like(D_x)))
            D_loss_gen = tf.reduce_mean(
                tf.nn.sparse_softmax_cross_entropy_with_logits(
                    logits=D_Gz, labels=tf.zeros_like(D_Gz)))
            self.D_loss = D_loss_real + D_loss_gen
            # G_loss = max log D(G(z))
            self.G_loss = tf.reduce_mean(
                tf.nn.sparse_softmax_cross_entropy_with_logits(
                    logits=D_Gz, labels=tf.ones_like(D_Gz)))
        else:
            # Minimize $\chi^2$ divergence
            self.D_loss = tf.reduce_mean(tf.square(D_x - 1.)) + tf.reduce_mean(
                tf.square(D_Gz))
            self.G_loss = tf.reduce_mean(tf.square(D_Gz - 1.))

            if config.multiscale:
                self.D_loss += tf.reduce_mean(
                    tf.square(D_x2 - 1.)) + tf.reduce_mean(
                        tf.square(D_x4 - 1.))
                self.D_loss += tf.reduce_mean(
                    tf.square(D_Gz2)) + tf.reduce_mean(tf.square(D_Gz4))

        distortion_penalty = config.lambda_X * tf.losses.mean_squared_error(
            self.example, self.reconstruction)
        self.G_loss += distortion_penalty

        if config.use_feature_matching_loss:  # feature extractor for generator
            D_x_layers, D_Gz_layers = [j for i in Dk_x for j in i
                                       ], [j for i in Dk_Gz for j in i]
            feature_matching_loss = tf.reduce_sum([
                tf.reduce_mean(tf.abs(Dkx - Dkz))
                for Dkx, Dkz in zip(D_x_layers, D_Gz_layers)
            ])
            self.G_loss += config.feature_matching_weight * feature_matching_loss

        # Optimization
        # =======================================================================================================>>>
        G_opt = tf.train.AdamOptimizer(learning_rate=config.G_learning_rate,
                                       beta1=0.5)
        D_opt = tf.train.AdamOptimizer(learning_rate=config.D_learning_rate,
                                       beta1=0.5)

        theta_G = Utils.scope_variables('generator')
        theta_D = Utils.scope_variables('discriminator')
        print('Generator parameters:', theta_G)
        print('Discriminator parameters:', theta_D)
        G_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
                                         scope='generator')
        D_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,
                                         scope='discriminator')

        # Execute the update_ops before performing the train_step
        with tf.control_dependencies(G_update_ops):
            self.G_opt_op = G_opt.minimize(self.G_loss,
                                           name='G_opt',
                                           global_step=self.G_global_step,
                                           var_list=theta_G)
        with tf.control_dependencies(D_update_ops):
            self.D_opt_op = D_opt.minimize(self.D_loss,
                                           name='D_opt',
                                           global_step=self.D_global_step,
                                           var_list=theta_D)

        G_ema = tf.train.ExponentialMovingAverage(
            decay=config.ema_decay, num_updates=self.G_global_step)
        G_maintain_averages_op = G_ema.apply(theta_G)
        D_ema = tf.train.ExponentialMovingAverage(
            decay=config.ema_decay, num_updates=self.D_global_step)
        D_maintain_averages_op = D_ema.apply(theta_D)

        with tf.control_dependencies(G_update_ops + [self.G_opt_op]):
            self.G_train_op = tf.group(G_maintain_averages_op)
        with tf.control_dependencies(D_update_ops + [self.D_opt_op]):
            self.D_train_op = tf.group(D_maintain_averages_op)

        # >>> Monitoring
        # tf.summary.scalar('learning_rate', learning_rate)
        tf.summary.scalar('generator_loss', self.G_loss)
        tf.summary.scalar('discriminator_loss', self.D_loss)
        tf.summary.scalar('distortion_penalty', distortion_penalty)
        tf.summary.scalar('feature_matching_loss', feature_matching_loss)
        tf.summary.scalar('G_global_step', self.G_global_step)
        tf.summary.scalar('D_global_step', self.D_global_step)
        tf.summary.image('real_images', self.example, max_outputs=4)
        tf.summary.image('compressed_images',
                         self.reconstruction,
                         max_outputs=4)
        self.merge_op = tf.summary.merge_all()

        self.train_writer = tf.summary.FileWriter(os.path.join(
            directories.tensorboard,
            '{}_train_{}'.format(name, time.strftime('%d-%m_%I:%M'))),
                                                  graph=tf.get_default_graph())
        self.test_writer = tf.summary.FileWriter(
            os.path.join(
                directories.tensorboard,
                '{}_test_{}'.format(name, time.strftime('%d-%m_%I:%M'))))
 #%%   
    ### Parse input arguments
    args = parser.parse_args()
    max_length = args.length
    #%% Load training parameters
    model_dir = Path(args.model_dir)
    print ('Loading model from: %s' % model_dir)
    training_args = json.load(open(model_dir / 'training_args.json'))
      
    #%% Load encoder and decoder dictionaries
    number_to_char = json.load(open(model_dir / 'number_to_char.json'))
    char_to_number = json.load(open(model_dir / 'char_to_number.json'))
        
    #%% Initialize network
    net = Network(input_size=training_args['alphabet_len'], 
                  hidden_units=training_args['hidden_units'], 
                  layers_num=training_args['layers_num'])
        
    #%% Load network trained parameters
    net.load_state_dict(torch.load(model_dir / 'net_params.pth', map_location='cpu'))
    net.eval() # Evaluation mode (e.g. disable dropout)
    temp_value =0.15
    

    #%% Find initial state of the RNN
    with torch.no_grad():
        # Encode seed
        seed_encoded = encode_text(char_to_number, args.chapter_seed)
        # One hot matrix
        seed_onehot = create_one_hot_matrix(seed_encoded, training_args['alphabet_len'])
        # To tensor
Пример #40
0
from network import Network
from location import Location

if __name__ == "__main__":

    loc1 = Location("Washington")
    loc2 = Location("Chicago")

    # Timeout test
    network = Network(2)  # 2 percent chance to drop packet
    attempts = 0
    while network.networkDelay(loc1, loc2) != None:
        attempts += 1
    print "Network dropped packet after " + str(attempts + 1) + " packets sent"
Пример #41
0
class Game:
    def __init__(self, w, h):
        self.net = Network()
        self.width = w
        self.height = h
        self.player = Player(50, 50)
        self.player2 = Player(100, 100)
        self.canvas = Canvas(self.width, self.height, "Testing...")

    def run(self):
        clock = pygame.time.Clock()
        run = True
        while run:
            clock.tick(60)

            for event in pygame.event.get():
                if event.type == pygame.QUIT:
                    run = False

                if event.type == pygame.K_ESCAPE:
                    run = False

            keys = pygame.key.get_pressed()

            if keys[pygame.K_RIGHT]:
                if self.player.x <= self.width - self.player.velocity:
                    self.player.move(0)

            if keys[pygame.K_LEFT]:
                if self.player.x >= self.player.velocity:
                    self.player.move(1)

            if keys[pygame.K_UP]:
                if self.player.y >= self.player.velocity:
                    self.player.move(2)

            if keys[pygame.K_DOWN]:
                if self.player.y <= self.height - self.player.velocity:
                    self.player.move(3)

            # Send Network Stuff
            self.player2.x, self.player2.y = self.parse_data(self.send_data())

            # Update Canvas
            self.canvas.draw_background()
            self.player.draw(self.canvas.get_canvas())
            self.player2.draw(self.canvas.get_canvas())
            self.canvas.update()

        pygame.quit()

    def send_data(self):
        """
        Send position to server
        :return: None
        """
        data = str(self.net.id) + ":" + str(self.player.x) + "," + str(
            self.player.y)
        reply = self.net.send(data)
        return reply

    @staticmethod
    def parse_data(data):
        try:
            d = data.split(":")[1].split(",")
            return int(d[0]), int(d[1])
        except:
            return 0, 0
    def train(self):
        # parameters
        batch_size = self.batch_size
        learning_rate = self.learning_rate
        num_epochs = self.num_epochs
        num_train = self.num_train
        num_test = self.num_test

        # filepathes
        if not os.path.exists(self.save_path):
            os.makedirs(self.save_path)
        logPath = os.path.join(self.save_path, "logs")
        modelPath = os.path.join(self.save_path, "model")
        if not os.path.exists(modelPath):
            os.makedirs(modelPath)

        print(
            "Training Network [num_epochs={0}, num_train={1}, num_test={2}, batch_size={3}]"
            .format(num_epochs, num_train, num_test, batch_size))

        # graph of the network
        graph = self.network

        # network model
        network_model = Network(self.reader_train, self.reader_test, graph)
        net = network_model.output()
        label = network_model.label

        # regularization
        # vars = tf.trainable_variables()
        # l2_regularization_error = tf.add_n([tf.nn.l2_loss(v) for v in vars if '/W' in v.name])
        # l2_lambda = 0.0001

        # loss, train and validation functions
        mean_squared_error = tf.reduce_mean(tf.square(tf.subtract(net, label)))
        loss = mean_squared_error
        # loss_summary = tf.summary.scalar('MSE', loss)
        train_op = tf.train.AdamOptimizer(
            learning_rate=learning_rate).minimize(loss)

        loss_placeholder = tf.placeholder(dtype=tf.float32)
        loss_summary = tf.summary.scalar('MSE', loss_placeholder)

        # accuracy
        def axis_angle_from_quaternion(quaternion):
            qx = tf.slice(quaternion, [0, 1], [batch_size, 1])
            qy = tf.slice(quaternion, [0, 2], [batch_size, 1])
            qz = tf.slice(quaternion, [0, 3], [batch_size, 1])
            qw = tf.slice(quaternion, [0, 0], [batch_size, 1])
            # normalize quaternion
            q_length = tf.sqrt(
                tf.reduce_sum([
                    tf.pow(qw, 2),
                    tf.pow(qx, 2),
                    tf.pow(qy, 2),
                    tf.pow(qz, 2)
                ], 0))
            qw = tf.divide(qw, q_length)
            qx = tf.divide(qx, q_length)
            qy = tf.divide(qy, q_length)
            qz = tf.divide(qz, q_length)
            normalized_quaternion = tf.stack([qw, qx, qy, qz])
            # calculate angle and axis
            angle = tf.divide(
                tf.multiply(tf.multiply(2.0, tf.acos(qw)), 180.0), math.pi)
            axis_x = tf.divide(qx,
                               tf.sqrt(tf.subtract(1.0, tf.multiply(qw, qw))))
            axis_y = tf.divide(qy,
                               tf.sqrt(tf.subtract(1.0, tf.multiply(qw, qw))))
            axis_z = tf.divide(qz,
                               tf.sqrt(tf.subtract(1.0, tf.multiply(qw, qw))))
            return angle, axis_x, axis_y, axis_z, normalized_quaternion

        rangle, rx, ry, rz, rn = axis_angle_from_quaternion(net)
        langle, lx, ly, lz, ln = axis_angle_from_quaternion(label)
        # accuracy for quaternion angle
        angle_diff = tf.abs(tf.subtract(rangle, langle))
        correct_angle = tf.less_equal(angle_diff, self.accuracy_error)
        accuracy_angle = tf.reduce_mean(tf.cast(correct_angle, tf.float32))

        # accuracy for quaternion axis
        axis_angle_radians = tf.acos(
            tf.clip_by_value(
                tf.reduce_sum([
                    tf.multiply(rx, lx),
                    tf.multiply(ry, ly),
                    tf.multiply(rz, lz)
                ], 0), -1, 1))
        axis_angle_degrees = tf.divide(tf.multiply(axis_angle_radians, 180.0),
                                       math.pi)
        correct_axis = tf.less_equal(axis_angle_degrees, self.accuracy_error)
        accuracy_axis = tf.reduce_mean(tf.cast(correct_axis, tf.float32))

        # accuracys for both and stacked
        accuracy_both = tf.reduce_mean(
            tf.cast(tf.logical_and(correct_angle, correct_axis), tf.float32))
        accuracy = tf.stack([accuracy_angle, accuracy_axis, accuracy_both])

        # accuracy summaries
        accuracy_angle_placeholder = tf.placeholder(dtype=tf.float32)
        accuracy_angle_summary = tf.summary.scalar('Accuracy Angle',
                                                   accuracy_angle_placeholder)
        accuracy_axis_placeholder = tf.placeholder(dtype=tf.float32)
        accuracy_axis_summary = tf.summary.scalar('Accuracy Axis',
                                                  accuracy_axis_placeholder)
        accuracy_both_placeholder = tf.placeholder(dtype=tf.float32)
        accuracy_both_summary = tf.summary.scalar('Accuracy Both',
                                                  accuracy_both_placeholder)

        # init and session
        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())
        coord = tf.train.Coordinator()
        saver = tf.train.Saver()
        if self.combination:
            rating_variables = [
                v for v in tf.trainable_variables() if 'rating' in v.name
            ]
            loader = tf.train.Saver(
                network_model.graph.getRatingDict(rating_variables))
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True

        def writeSummary(writer, summary_op, dict, step):
            summary = sess.run(summary_op, dict)
            writer.add_summary(summary, step)

        def train_batch():
            global cost, train_loss, avg_batch_cost
            # load training batch
            inputs_batch, labels_batch = network_model.getTrainBatch(sess)
            feed_dict = network_model.getFeedDict(inputs_batch, labels_batch)
            # train and calculate loss
            _, cost = sess.run([train_op, loss], feed_dict)
            train_loss = sess.run(loss_summary,
                                  feed_dict={loss_placeholder: cost})
            train_batch_accuracys = sess.run(accuracy, feed_dict=feed_dict)
            return train_batch_accuracys

        def run_validation_set():
            total_test_batch = int(num_test / batch_size)
            costs = []
            accuracys = []
            angle_diffs = []
            axis_diffs = []
            for step in range(total_test_batch):
                # load validation batch
                inputs_batch, labels_batch = network_model.getTestBatch(sess)
                feed_dict = network_model.getFeedDict(inputs_batch,
                                                      labels_batch)
                cost_validation = sess.run(loss, feed_dict)
                costs.append(cost_validation)
                angle_diff_batch, axis_diff_batch = sess.run(
                    [angle_diff, axis_angle_degrees], feed_dict)
                angle_diffs.append(angle_diff_batch)
                axis_diffs.append(axis_diff_batch)
                # accuracys
                valid_batch_accuracys = sess.run(accuracy, feed_dict=feed_dict)
                accuracys.append(valid_batch_accuracys)
            cost_avg = tf.cast(tf.reduce_mean(costs), tf.float32).eval()
            validation_loss = sess.run(loss_summary,
                                       feed_dict={loss_placeholder: cost_avg})
            accu_avg = tf.cast(tf.reduce_mean(tf.stack(accuracys), 0),
                               tf.float32).eval()
            # tensorboard
            test_writer.add_summary(validation_loss, global_step)
            writeSummary(test_writer, accuracy_angle_summary,
                         {accuracy_angle_placeholder: accu_avg[0]},
                         global_step)
            writeSummary(test_writer, accuracy_axis_summary,
                         {accuracy_axis_placeholder: accu_avg[1]}, global_step)
            writeSummary(test_writer, accuracy_both_summary,
                         {accuracy_both_placeholder: accu_avg[2]}, global_step)
            # print avg angle and axis
            avgAxis = tf.cast(tf.reduce_mean(tf.stack(axis_diffs)),
                              tf.float32).eval()
            avgAngle = tf.cast(tf.reduce_mean(tf.stack(angle_diffs)),
                               tf.float32).eval()
            print("Avg-Angle: {0}, Avg-Axis: {1}".format(avgAngle, avgAxis))
            # test_writer.add_summary(validation_accuracy, global_step)
            print(
                "Test-Loss: {0:10.8f}, Test-Accuracy (Angle, Axis, Both): {1:2.2f}, {2:2.2f}, {3:2.2f} "
                .format(cost_validation, accu_avg[0], accu_avg[1],
                        accu_avg[2]))
            return accu_avg[2]

        def save_model():
            saved_path = saver.save(sess, os.path.join(modelPath,
                                                       "model.ckpt"))
            print("Model saved in file: ", saved_path)

        with tf.Session(config=config) as sess:
            # tensorboard writers
            train_writer = tf.summary.FileWriter(os.path.join(
                logPath, "train"),
                                                 graph=sess.graph)
            test_writer = tf.summary.FileWriter(os.path.join(logPath, "test"))
            sess.run(init_op)
            print("init done")
            if self.pretrained:
                ckpt = tf.train.get_checkpoint_state(
                    os.path.join(self.pretrained, "model"))
                if ckpt and ckpt.model_checkpoint_path:
                    if not self.combination:
                        saver.restore(sess, ckpt.model_checkpoint_path)
                    else:
                        loader.restore(sess, ckpt.model_checkpoint_path)
                    print("Restored Model")
                else:
                    print("Could not restore model!")
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)
            global_step = 0
            previous_best = 0.0
            # run validation set for graph
            run_validation_set()
            for epoch in range(num_epochs):
                accuracys = []
                # training with BGD
                total_batch = int(num_train / batch_size)
                avg_batch_cost = 0
                for step in range(total_batch):
                    batch_accuracy = train_batch()
                    avg_batch_cost += cost
                    # tensorboard
                    global_step = epoch * total_batch + step
                    train_writer.add_summary(train_loss, global_step)
                    accuracys.append(batch_accuracy)
                # calcuate averages over train batch
                avg_batch_cost /= total_batch
                accu_avg = tf.cast(tf.reduce_mean(tf.stack(accuracys), 0),
                                   tf.float32).eval()
                # write train summaries
                writeSummary(train_writer, accuracy_angle_summary,
                             {accuracy_angle_placeholder: accu_avg[0]},
                             global_step)
                writeSummary(train_writer, accuracy_axis_summary,
                             {accuracy_axis_placeholder: accu_avg[1]},
                             global_step)
                writeSummary(train_writer, accuracy_both_summary,
                             {accuracy_both_placeholder: accu_avg[2]},
                             global_step)
                # train_accuracy = sess.run(accuracy_summary, feed_dict={accuracy_placeholder: accu_avg})
                # train_writer.add_summary(train_accuracy, global_step)
                print(
                    "Epoch {0:5} of {1:5} ### Train-Loss: {2:10.8f} ### Train-Accuracy (Angle, Axis, Both): "
                    .format(epoch + 1, num_epochs, avg_batch_cost) +
                    "{0:2.2f}, {1:2.2f}, {2:2.2f}".format(
                        accu_avg[0], accu_avg[1], accu_avg[2]))
                # validation
                acc_valid = run_validation_set()
                if acc_valid >= previous_best:
                    previous_best = acc_valid
                    save_model()

            coord.request_stop()
            coord.join(threads)
            sess.close()
Пример #43
0
if NOISE:
    instance, maxGain, numNetwork = problem_instance.add_noise_to_instance(
        NUM_TIME_SLOT, 0.1, 15,
        lambda T : problem_instance.repeat_instance(
        T, NUM_REPEATS,
        problem_instance.PROBLEM_INSTANCES[PROBLEM_INSTANCE_NAME]
    ))
else:
    instance, maxGain, numNetwork = problem_instance.repeat_instance(
        NUM_TIME_SLOT, NUM_REPEATS,
        problem_instance.PROBLEM_INSTANCES[PROBLEM_INSTANCE_NAME]
    )

#instance, maxGain, numNetwork = problem_instance.PROBLEM_INSTANCES[PROBLEM_INSTANCE_NAME](NUM_TIME_SLOT)

networkList = [Network(0) for i in range(numNetwork)]
#networkList = [Network(NETWORK_BANDWIDTH[i]) for i in range(numNetwork)]        # create network objects and store in networkList
global_setting.constants.update({'network_list':networkList})

''' ____ configured global settings. now to import the stuff that uses it ____ '''
from mobile_device import MobileDevice
import logging_configure
import data_analyzer
logging_configure.initialize((DIR if SAVE_LOG_DETAILS else None))
''' __________________________________________________________________________ '''

if PERIOD_OPTION == 0: # EXP3
    periods = [1]
    partitions = algo_periodicexp4.make_partition_cycles(NUM_TIME_SLOT, periods)
elif PERIOD_OPTION == 1: # EXP3, but reset every day
    periods = [NUM_REPEATS]
Пример #44
0
class RealmGateway(object):
    def __init__(self, args):
        self._config = args
        # Get event loop
        self._loop = asyncio.get_event_loop()
        # Get logger
        self._logger = logging.getLogger(self._config.name)

    @asyncio.coroutine
    def run(self):
        self._logger.warning('RealmGateway_v2 is starting...')
        # Initialize Data Repository
        yield from self._init_datarepository()
        # Initialize Address Pools
        yield from self._init_pools()
        # Initialize Host table
        yield from self._init_hosttable()
        # Initialize Connection table
        yield from self._init_connectiontable()
        # Initialize Network
        yield from self._init_network()
        # Initialize Policy Based Resource Allocation
        yield from self._init_pbra()
        # Initialize PacketCallbacks
        yield from self._init_packet_callbacks()
        # Initialize CETP
        yield from self._init_cetp()
        # Initialize DNS
        yield from self._init_dns()
        # Create task: CircularPool cleanup
        _t = asyncio.ensure_future(self._init_cleanup_cpool(0.1))
        RUNNING_TASKS.append((_t, 'cleanup_cpool'))
        # Create task: Timer cleanup
        _t = asyncio.ensure_future(self._init_cleanup_pbra_timers(10.0))
        RUNNING_TASKS.append((_t, 'cleanup_pbra_timers'))
        # Create task for cleaning & synchronizing the CETP-H2H conns.
        _t = asyncio.ensure_future(self._init_cleanup_ovsConnections(2.0))
        RUNNING_TASKS.append((_t, 'DP_conn_timers'))
        # Create task: Show DNS groups
        _t = asyncio.ensure_future(self._init_show_dnsgroups(20.0))
        RUNNING_TASKS.append((_t, 'show_dnsgroups'))
        # Initialize Subscriber information
        yield from self._init_subscriberdata()

        # Initialize Subscriber information
        yield from self._init_suricata('0.0.0.0', 12346)

        # Ready!
        self._logger.warning('RealmGateway_v2 is ready!')

    @asyncio.coroutine
    def _init_datarepository(self):
        # Initialize Data Repository
        self._logger.warning('Initializing Data Repository')
        configfile = self._config.getdefault('repository_subscriber_file',
                                             None)
        configfolder = self._config.getdefault('repository_subscriber_folder',
                                               None)
        policyfile = self._config.getdefault('repository_policy_file', None)
        policyfolder = self._config.getdefault('repository_policy_folder',
                                               None)
        api_url = self._config.getdefault('repository_api_url', None)
        cetp_host_policy_location = self._config.getdefault(
            'cetp_host_policy_location', None)
        cetp_network_policy_location = self._config.getdefault(
            'cetp_network_policy_location', None)

        self._datarepository = DataRepository(configfile=configfile,
                                              configfolder=configfolder,
                                              policyfile=policyfile,
                                              policyfolder=policyfolder,
                                              api_url=api_url)

    @asyncio.coroutine
    def _init_pools(self):
        self._logger.warning('Initializing Address Pools')
        # Create container of Address Pools
        self._pooltable = PoolContainer()

        # Create specific Address Pools
        ## Service IP Pool
        ap = AddressPoolShared('servicepool', name='Service Pool')
        self._pooltable.add(ap)
        for ipaddr in self._config.getdefault('pool_serviceip', ()):
            self._logger.info('Adding resource(s) to pool {} @ <{}>'.format(
                ipaddr, ap))
            ap.add_to_pool(ipaddr)

        ## Circular IP Pool
        ap = AddressPoolShared('circularpool', name='Circular Pool')
        self._pooltable.add(ap)
        for ipaddr in self._config.getdefault('pool_cpoolip', ()):
            self._logger.info('Adding resource(s) to pool {} @ <{}>'.format(
                ipaddr, ap))
            ap.add_to_pool(ipaddr)

        # For future use
        ## CES Proxy IP Pool
        ap = AddressPoolUser('proxypool', name='CES Proxy Pool')
        self._pooltable.add(ap)

        address_pool = self._config.getdefault('pool_cespoolip', ())
        if address_pool is not None:
            for ipaddr in address_pool:
                self._logger.info(
                    'Adding resource(s) to pool {} @ <{}>'.format(ipaddr, ap))
                ap.add_to_pool(ipaddr)

    @asyncio.coroutine
    def _init_hosttable(self):
        # Create container of Hosts
        self._hosttable = HostTable()

    @asyncio.coroutine
    def _init_connectiontable(self):
        # Create container of Connections
        self._connectiontable = ConnectionTable()

    @asyncio.coroutine
    def _init_network(self):
        self._logger.warning('Initializing Network')
        self._read_cetp_params()
        self._network = Network(ipt_cpool_queue=self._config.ipt_cpool_queue,
                                ipt_cpool_chain=self._config.ipt_cpool_chain,
                                ipt_host_chain=self._config.ipt_host_chain,
                                ipt_host_unknown=self._config.ipt_host_unknown,
                                ipt_policy_order=self._config.ipt_policy_order,
                                ipt_markdnat=self._config.ipt_markdnat,
                                ipt_flush=self._config.ipt_flush,
                                ips_hosts=self._config.ips_hosts,
                                api_url=self._config.network_api_url,
                                datarepository=self._datarepository,
                                synproxy=self._config.synproxy,
                                pooltable=self._pooltable,
                                cetp_service=self._cetp_service)

    def _read_cetp_params(self):
        self.cetp_config = self._config.getdefault('cetp_config', None)
        self._cetp_service = []

        if self.cetp_config is not None:
            self.ces_conf = yaml.safe_load(open(self.cetp_config))
            cetp_servers = self.ces_conf["CETPServers"]["serverNames"]
            for s in cetp_servers:
                srv = self.ces_conf["CETPServers"][s]
                ip_addr, port, proto, order, preference = srv["ip"], srv[
                    "port"], srv["transport"], srv["order"], srv["preference"]
                self._cetp_service.append(
                    (ip_addr, port, proto, order, preference))

    @asyncio.coroutine
    def _init_pbra(self):
        # Create container of Reputation objects
        self._logger.warning('Initializing Policy Based Resource Allocation')
        self._pbra = PolicyBasedResourceAllocation(
            pooltable=self._pooltable,
            hosttable=self._hosttable,
            connectiontable=self._connectiontable,
            datarepository=self._datarepository,
            network=self._network,
            cname_soa=self._config.dns_cname_soa)

    @asyncio.coroutine
    def _init_packet_callbacks(self):
        # Create object for storing all PacketIn-related information
        self.packetcb = PacketCallbacks(network=self._network,
                                        connectiontable=self._connectiontable,
                                        pbra=self._pbra)
        # Register NFQUEUE(s) callback
        self._network.ipt_register_nfqueues(
            self.packetcb.packet_in_circularpool)

    @asyncio.coroutine
    def _init_cetp(self):
        def get_spm_services_parameter():
            """ Returns boolean of 'spm_services_boolean' parameter """
            spm_services_boolean = self._config.getdefault(
                'spm_services_boolean', False)
            if type(spm_services_boolean) == type(str()):
                if spm_services_boolean.lower() == "true":
                    return True
            return False

        if self.cetp_config is not None:
            self.cetpstate_table = CETP.CETPStateTable()
            self.ces_params = self.ces_conf['CESParameters']
            self.cesid = self.ces_params['cesid']
            self._cetp_host_policies = self._config.getdefault(
                'cetp_policies_host_file', None)
            self._cetp_network_policies = self._config.getdefault(
                'cetp_policies_network_file', None)
            cetp_host_policy_location = self._config.getdefault(
                'cetp_host_policy_location', None)
            cetp_network_policy_location = self._config.getdefault(
                'cetp_network_policy_location', None)
            spm_services_boolean = get_spm_services_parameter()

            #print("self._cetp_host_policies, self._cetp_network_policies: ", self._cetp_host_policies, self._cetp_network_policies)

            self._cetp_mgr                  = cetpManager.CETPManager(self._cetp_host_policies, self.cesid, self.ces_params, self._hosttable, self._connectiontable, self._pooltable, \
                                                                      self._network, self.cetpstate_table, spm_services_boolean, cetp_host_policy_location, cetp_network_policy_location, \
                                                                      self._cetp_network_policies, self._loop)

            for s in self._cetp_service:
                (ip_addr, port, proto, o, p) = s
                yield from self._cetp_mgr.initiate_cetp_service(
                    ip_addr, port, proto)

    @asyncio.coroutine
    def _init_dns(self):
        # Create object for storing all DNS-related information
        self._dnscb = DNSCallbacks(cachetable=None,
                                   datarepository=self._datarepository,
                                   network=self._network,
                                   hosttable=self._hosttable,
                                   pooltable=self._pooltable,
                                   connectiontable=self._connectiontable,
                                   pbra=self._pbra,
                                   cetp_mgr=self._cetp_mgr,
                                   cetp_service=self._cetp_service,
                                   cesid=self.cesid)

        # Register defined DNS timeouts
        self._dnscb.dns_register_timeout(self._config.dns_timeout, None)
        self._dnscb.dns_register_timeout(self._config.dns_timeout_a, 1)
        self._dnscb.dns_register_timeout(self._config.dns_timeout_aaaa, 28)
        self._dnscb.dns_register_timeout(self._config.dns_timeout_srv, 33)
        self._dnscb.dns_register_timeout(self._config.dns_timeout_naptr, 35)

        # Register defined SOA zones
        for soa_name in self._config.dns_soa:
            self._logger.info('Registering DNS SOA {}'.format(soa_name))
            self._dnscb.dns_register_soa(soa_name)
        soa_list = self._dnscb.dns_get_soa()

        # Register DNS resolvers
        for ipaddr, port in self._config.dns_resolver:
            self._logger.info('Creating DNS Resolver endpoint @{}:{}'.format(
                ipaddr, port))
            self._dnscb.dns_register_resolver((ipaddr, port))

        # Dynamic DNS Server for DNS update messages
        for ipaddr, port in self._config.ddns_server:
            cb_function = lambda x, y, z: asyncio.ensure_future(
                self._dnscb.ddns_process(x, y, z))
            transport, protocol = yield from self._loop.create_datagram_endpoint(
                functools.partial(DDNSServer, cb_default=cb_function),
                local_addr=(ipaddr, port))
            self._logger.info('Creating DNS DDNS endpoint @{}:{}'.format(
                ipaddr, port))
            self._dnscb.register_object('DDNS@{}:{}'.format(ipaddr, port),
                                        protocol)

        # DNS Server for WAN via UDP
        for ipaddr, port in self._config.dns_server_wan:
            cb_soa = lambda x, y, z: asyncio.ensure_future(
                self._dnscb.dns_process_rgw_wan_soa(x, y, z))
            cb_nosoa = lambda x, y, z: asyncio.ensure_future(
                self._dnscb.dns_process_rgw_wan_nosoa(x, y, z))
            transport, protocol = yield from self._loop.create_datagram_endpoint(
                functools.partial(DNSProxy,
                                  soa_list=soa_list,
                                  cb_soa=cb_soa,
                                  cb_nosoa=cb_nosoa),
                local_addr=(ipaddr, port))
            self._logger.info('Creating DNS Server endpoint @{}:{}'.format(
                ipaddr, port))
            self._dnscb.register_object('DNSServer@{}:{}'.format(ipaddr, port),
                                        protocol)

        # DNS Server for WAN via TCP
        for ipaddr, port in self._config.dns_server_wan:
            cb_soa = lambda x, y, z: asyncio.ensure_future(
                self._dnscb.dns_process_rgw_wan_soa(x, y, z))
            cb_nosoa = lambda x, y, z: asyncio.ensure_future(
                self._dnscb.dns_process_rgw_wan_nosoa(x, y, z))
            server = yield from self._loop.create_server(functools.partial(
                DNSTCPProxy,
                soa_list=soa_list,
                cb_soa=cb_soa,
                cb_nosoa=cb_nosoa),
                                                         host=ipaddr,
                                                         port=port,
                                                         reuse_address=True)
            server.connection_lost = lambda x: server.close()
            self._logger.info('Creating DNS TCP Server endpoint @{}:{}'.format(
                ipaddr, port))
            self._dnscb.register_object(
                'DNSTCPServer@{}:{}'.format(ipaddr, port), server)

        # DNS Proxy for LAN
        for ipaddr, port in self._config.dns_server_lan:
            cb_soa = lambda x, y, z: asyncio.ensure_future(
                self._dnscb.dns_process_rgw_lan_soa(x, y, z))
            cb_nosoa = lambda x, y, z: asyncio.ensure_future(
                self._dnscb.dns_process_rgw_lan_nosoa(x, y, z))
            transport, protocol = yield from self._loop.create_datagram_endpoint(
                functools.partial(DNSProxy,
                                  soa_list=soa_list,
                                  cb_soa=cb_soa,
                                  cb_nosoa=cb_nosoa),
                local_addr=(ipaddr, port))
            self._logger.info('Creating DNS Proxy endpoint @{}:{}'.format(
                ipaddr, port))
            self._dnscb.register_object('DNSProxy@{}:{}'.format(ipaddr, port),
                                        protocol)

        ## DNS Proxy for Local
        for ipaddr, port in self._config.dns_server_local:
            cb_soa = lambda x, y, z: asyncio.ensure_future(
                self._dnscb.dns_process_rgw_lan_soa(x, y, z))
            # Disable resolutions of non SOA domains for self generated DNS queries (i.e. HTTP proxy) - Answer with REFUSED
            cb_nosoa = lambda x, y, z: asyncio.ensure_future(
                self._dnscb.dns_error_response(
                    x, y, z, rcode=dns.rcode.REFUSED))
            transport, protocol = yield from self._loop.create_datagram_endpoint(
                functools.partial(DNSProxy,
                                  soa_list=soa_list,
                                  cb_soa=cb_soa,
                                  cb_nosoa=cb_nosoa),
                local_addr=(ipaddr, port))
            self._logger.info('Creating DNS Proxy endpoint @{}:{}'.format(
                ipaddr, port))
            self._dnscb.register_object('DNSProxy@{}:{}'.format(ipaddr, port),
                                        protocol)

    @asyncio.coroutine
    def _init_subscriberdata(self):
        self._logger.warning('Initializing subscriber data')
        tzero = self._loop.time()
        for subs_id, subs_data in self._datarepository.get_policy_host_all(
            {}).items():
            ipaddr = subs_data['ID']['ipv4'][0]
            fqdn = subs_data['ID']['fqdn'][0]
            self._logger.debug('Registering subscriber {} / {}@{}'.format(
                subs_id, fqdn, ipaddr))

            key = 'proxypool'
            if self._pooltable.has(key):
                ap = self._pooltable.get(key)
                ap.create_pool(fqdn)

            yield from self._dnscb.ddns_register_user(fqdn, 1, ipaddr)
        self._logger.info(
            'Completed initializacion of subscriber data in {:.3f} sec'.format(
                self._loop.time() - tzero))

    @asyncio.coroutine
    def _init_cleanup_cpool(self, delay):
        self._logger.warning(
            'Initiating cleanup of the Circular Pool every {} seconds'.format(
                delay))
        while True:
            yield from asyncio.sleep(delay)
            # Update table and remove expired elements
            self._connectiontable.update_all_rgw()

    @asyncio.coroutine
    def _init_cleanup_pbra_timers(self, delay):
        self._logger.warning(
            'Initiating cleanup of PBRA timers every {} seconds'.format(delay))
        while True:
            yield from asyncio.sleep(delay)
            # Update table and remove expired elements
            self._pbra.cleanup_timers()

    @asyncio.coroutine
    def _init_show_dnsgroups(self, delay):
        self._logger.warning(
            'Initiating display of DNSGroup information every {} seconds'.
            format(delay))
        self._pbra.debug_dnsgroups(transition=False)
        while True:
            yield from asyncio.sleep(delay)
            # Update table and remove expired elements
            self._pbra.debug_dnsgroups(transition=True)

    @asyncio.coroutine
    def _init_cleanup_ovsConnections(self, delay):
        self._logger.warning(
            'Initiating CETP Connection cleaning every {} seconds'.format(
                delay))
        try:
            while True:
                yield from asyncio.sleep(delay)
                key = connection.KEY_MAP_CETP_CONN
                cp_conns = self._connectiontable.lookup(
                    key, update=False,
                    check_expire=False)  # Get the CP H2H connection states.
                dp_stats = yield from self._network.get_dp_flow_stats(
                )  # Get the data-plane stats

                if (cp_conns is not None) and (dp_stats is not None):
                    self._network._synchronize_conns(self._connectiontable,
                                                     cp_conns, dp_stats)

        except Exception as ex:
            self._logger.error(
                "Exception in _init_cleanup_ovsH2HConnections: {}".format(ex))
            #utils3.trace()

    @asyncio.coroutine
    def shutdown(self):
        self._logger.warning('RealmGateway_v2 is shutting down...')
        self._dnscb.shutdown()
        self._network.shutdown()
        self._datarepository.shutdown()
        self._cetp_mgr.terminate()
        yield from asyncio.sleep(0.1)

        for task_obj, task_name in RUNNING_TASKS:
            with suppress(asyncio.CancelledError):
                self._logger.info('Cancelling {} task'.format(task_name))
                task_obj.cancel()
                yield from asyncio.sleep(1)
                yield from task_obj
                self._logger.warning('>> Cancelled {} task'.format(task_name))

    @asyncio.coroutine
    def _init_suricata(self, ipaddr, port):
        ## Added for Suricata testing
        transport, protocol = yield from self._loop.create_datagram_endpoint(
            SuricataAlert, local_addr=(ipaddr, port))
        self._logger.warning('Creating SuricataAlert endpoint @{}:{}'.format(
            ipaddr, port))
Пример #45
0
class App:
    def __init__(self):
        print('init')
        self.btns = [
            Button("Rock", 50, 500, (0, 0, 0)),
            Button("Scissors", 250, 500, (255, 0, 0)),
            Button("Paper", 450, 500, (0, 255, 0))
        ]
        self.player = None
        self.game = None
        self.n = None
        self.width = 1000
        self.height = 700

        self.id = -1
        self.CAPTION = "BINGO!"
        self.SCREEN_RESOLUTION = (1000, 700)
        pygame.init()
        pygame.display.set_caption(self.CAPTION)
        self.screen = pygame.display.set_mode(self.SCREEN_RESOLUTION)
        self.GAME_STATE = 1
        self.STATE_WELCOME = 1
        self.STATE_PLAY = 2
        self.STATE_WINNER = 3
        self.count_player = 1
        self.run = True

    def start(self):
        print('start')
        while self.run:
            if self.GAME_STATE == self.STATE_WELCOME:
                self.handle_welcome()
            elif self.GAME_STATE == self.STATE_PLAY:
                self.handle_play()
            elif self.GAME_STATE == self.STATE_WINNER:
                self.handle_winner()
        pygame.quit()

    def handle_welcome(self):
        self.screen.fill((128, 128, 128))
        font = pygame.font.SysFont("comicsans", 60)
        text = font.render("Klik untuk Mulai Bermain!", 1, (255, 0, 0))
        self.screen.blit(text, (600, 400))
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                self.run = False
            if event.type == pygame.MOUSEBUTTONDOWN:
                self.GAME_STATE = self.STATE_PLAY
                self.handle_play()
        pygame.display.update()

    def handle_play(self):
        print('play')
        clock = pygame.time.Clock()
        clock.tick(60)
        self.n = Network()
        self.player = int(self.n.getP())
        while self.run:
            try:
                self.game = self.n.send("get")
            except:
                self.run = False
                print("Couldn't get game")
                break

            if self.game.bothWent():
                self.redrawWindow()
                pygame.time.delay(500)
                try:
                    self.game = self.n.send("reset")
                except:
                    self.run = False
                    print("Couldn't get game")
                    break

                font = pygame.font.SysFont("comicsans", 90)
                if (self.game.winner() == 1
                        and self.player == 1) or (self.game.winner() == 0
                                                  and self.player == 0):
                    text = font.render("You Won!", 1, (255, 0, 0))
                elif self.game.winner() == -1:
                    text = font.render("Tie Game!", 1, (255, 0, 0))
                else:
                    text = font.render("You Lost...", 1, (255, 0, 0))

                self.screen.blit(text,
                                 (self.width / 2 - text.get_width() / 2,
                                  self.height / 2 - text.get_height() / 2))
                pygame.display.update()
                pygame.time.delay(2000)

            for event in pygame.event.get():
                if event.type == pygame.QUIT:
                    self.run = False
                    pygame.quit()

                if event.type == pygame.MOUSEBUTTONDOWN:
                    pos = pygame.mouse.get_pos()
                    for btn in self.btns:
                        if btn.click(pos) and self.game.connected():
                            if self.player == 0:
                                if not self.game.p1Went:
                                    self.n.send(btn.text)
                            else:
                                if not self.game.p2Went:
                                    self.n.send(btn.text)

            self.redrawWindow()
        # self.screen.fill((128, 128, 128))
        # for event in pygame.event.get():
        #     if event.type == pygame.QUIT:
        #         self.run = False
        #     if event.type == pygame.MOUSEBUTTONDOWN:
        #         self.GAME_STATE = self.STATE_WELCOME
        # pygame.display.update()

    def handle_winner(self):
        pass

    def redrawWindow(self):
        self.screen.fill((128, 128, 128))

        if not (self.game.connected()):
            font = pygame.font.SysFont("comicsans", 80)
            text = font.render("Waiting for Player...", 1, (255, 0, 0), True)
            self.screen.blit(text, (self.width / 2 - text.get_width() / 2,
                                    self.height / 2 - text.get_height() / 2))
        else:
            font = pygame.font.SysFont("comicsans", 60)
            text = font.render("Your Move", 1, (0, 255, 255))
            self.screen.blit(text, (80, 200))

            text = font.render("Opponents", 1, (0, 255, 255))
            self.screen.blit(text, (380, 200))

            move1 = self.game.get_player_move(0)
            move2 = self.game.get_player_move(1)
            if self.game.bothWent():
                text1 = font.render(move1, 1, (0, 0, 0))
                text2 = font.render(move2, 1, (0, 0, 0))
            else:
                if self.game.p1Went and self.player == 0:
                    text1 = font.render(move1, 1, (0, 0, 0))
                elif self.game.p1Went:
                    text1 = font.render("Locked In", 1, (0, 0, 0))
                else:
                    text1 = font.render("Waiting...", 1, (0, 0, 0))

                if self.game.p2Went and self.player == 1:
                    text2 = font.render(move2, 1, (0, 0, 0))
                elif self.game.p2Went:
                    text2 = font.render("Locked In", 1, (0, 0, 0))
                else:
                    text2 = font.render("Waiting...", 1, (0, 0, 0))

            if self.player == 1:
                self.screen.blit(text2, (100, 350))
                self.screen.blit(text1, (400, 350))
            else:
                self.screen.blit(text1, (100, 350))
                self.screen.blit(text2, (400, 350))

            for btn in self.btns:
                btn.draw(self.screen)

        pygame.display.update()
Пример #46
0
        print('    (Use -h or --help flag for full option list.)')
        sys.exit()

    if args.model is None:
        print('Must specify --model or (or --write-vocab) parameter.')
        print('    (Use -h or --help flag for full option list.)')
        sys.exit()

    if args.test is not None:
        from phrase_tree import PhraseTree
        from network import Network
        from parser import Parser

        test_trees = PhraseTree.load_treefile(args.test)
        print('Loaded test trees from {}'.format(args.test))
        network = Network.load(args.model)
        print('Loaded model from: {}'.format(args.model))
        accuracy = Parser.evaluate_corpus(test_trees, fm, network)
        print('Accuracy: {}'.format(accuracy))
    elif args.train is not None:
        from network import Network

        if args.np_seed is not None:
            import numpy as np
            np.random.seed(args.np_seed)

        print('L2 regularization: {}'.format(args.dynet_l2))

        Network.train(
            feature_mapper=fm,
            word_dims=args.word_dims,
Пример #47
0
cost = SSE(y, s3)

feed_dict = {
    X: train_x,
    y: train_y,
    W1: W1_,
    b1: b1_,
    W2: W2_,
    b2: b2_,
    W3: W3_,
    b3: b3_
}

hyper_parameters = [W1, b1, W2, b2]

graph = Network.topological_sort(feed_dict)

epoch = 1000
batch_size = 100
steps_per_batch = len(train_y) // batch_size

for i in tqdm(xrange(epoch)):
    for j in xrange(steps_per_batch):

        batch_x, batch_y = mini_batch(train_x, train_y)

        X.value = batch_x
        y.value = batch_y

        Network.forward_propagation(graph)
        Network.backward_propagation(graph)
Пример #48
0
torch.manual_seed(seed=seed)
rng = np.random.default_rng(seed=seed)

n_epsisodes: int = 10000
n_steps_max: int = 200

env = gym.make('CartPole-v0')  # Pendulum-v0
env.seed(seed=seed)

n_inputs: int = env.observation_space.shape[0]
n_hidden: int = 100
n_outputs: int = env.action_space.n
learning_rate: float = 2e-4

online_net = Network(n_inputs=n_inputs,
                     n_hidden=n_hidden,
                     n_outputs=n_outputs,
                     learning_rate=learning_rate)

n_steps_per_episode: List[int] = []

for episode in range(n_epsisodes):
    state = env.reset()
    el_traces = torch.zeros([n_outputs, n_hidden + 1])
    discounted_reward = 0

    for steps in range(n_steps_max):

        action, probs, hidden_activities = online_net.get_action(state, rng)

        hidden_activities = torch.cat((hidden_activities, torch.ones(1)), 0)
        log_prob = torch.log(probs.squeeze(0)[action])
Пример #49
0
def network_setup(model_file_path=None):
    freq_count = 4000
    count_bins = 88 * 20
    dataset = MapsDB('../db',
                     freq_count=freq_count,
                     count_bins=count_bins,
                     batch_size=128,
                     start_time=0.5,
                     duration=0.5)
    model = Network()
    model.add(Linear('fc1', dataset.get_vec_input_width(), 2048, 0.001))
    model.add(Sigmoid('sigmoid1'))
    model.add(Linear('fc2', 2048, dataset.get_label_width(), 0.001))
    model.add(Softmax('softmax2'))

    loss = CrossEntropyLoss(name='xent')
    # loss = EuclideanLoss(name='r2')

    optim = SGDOptimizer(learning_rate=0.00001, weight_decay=0.005, momentum=0.9)
    # optim = AdagradOptimizer(learning_rate=0.001, eps=1e-6)

    input_placeholder = T.fmatrix('input')
    label_placeholder = T.fmatrix('label')
    label_active_size_placeholder = T.ivector('label_active_size')

    if model_file_path:
        model.loads(model_file_path)
    else:
        dataset.load_cache()

    model.compile(input_placeholder, label_placeholder, label_active_size_placeholder, loss, optim)
    return model, dataset, freq_count, count_bins
Пример #50
0
 def _getFactoryEnabledClasses(self):
     return (("", "UCI", UCI()), ("", "DNS", DNS()), ("", "DHCP", DHCP()),
             ("", "PureFTP", PureFTP()), ("", "Network", Network()),
             ("", "Firewall", Firewall()), ("", "OpenWRTManager",
                                            OpenWRTManager()))
Пример #51
0
class Start_Game:
    def __init__(self):
        pygame.init()
        os.environ['SDL_VIDEO_CENTERED'] = '1'
        self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))

        # window navigation variables
        self.is_connecting_screen = False
        self.is_loading_screen = False
        self.start_game = False

        self.all_buttons = []
        self.init_buttons()

        self.home_screen = pygame.image.load(
            "Assets/background/home_screen.png").convert()
        # 0: loading screen
        # 1: loading screen animation icon
        self.connecting_screen = [
            pygame.image.load(
                "Assets/background/loading_screen.png").convert(),
            pygame.image.load(
                "Assets/background/loading_screen_animation.png").convert()
        ]
        self.loading_screen = [
            pygame.image.load("Assets/background/p1_loading.png").convert(),
            pygame.image.load("Assets/background/p2_loading.png").convert()
        ]

        self.blink_flash_logo = False

        # delay counter used to halt the loading screen for a few seconds
        self.delay = 0
        self.delay_param = 200

        self.n = Network()
        self.player_1 = self.n.fetch_initial_data()

        self.interface_loop()  # interface menu
        Game(
            self.n, self.player_1,
            self.player_2)  # after both players are connected, the game starts

    def init_buttons(self):
        self.all_buttons.append(
            Button('Assets/background/START_1.png',
                   'Assets/background/START_2.png', 564, 156, 1))
        self.all_buttons.append(
            Button('Assets/background/exit_1.png',
                   'Assets/background/exit_2.png', 608, 536, 2))

    def interface_loop(self):
        while not self.start_game:
            CLOCK.tick(FPS)
            # gets data from server and if both players are connected, continues to next window
            self.fetch_data_server()
            self.event_handling()
            button_mechanics(self.all_buttons)
            self.display()

    def check_hovered_state(self, buttons_list):
        # iterates through the buttons list and checks which buttons are clicked
        for img in buttons_list:  # check which buttons are in hovered state
            if img.hovered:
                if img.state == START:  # move to the desired window
                    self.is_connecting_screen = True
                    self.player_1.is_start_clicked = True

                elif img.state == EXIT:
                    sys.exit(0)

    def fetch_data_server(self):
        self.player_2 = self.n.send_and_receive(self.player_1)
        if self.player_1.is_start_clicked and self.player_2.is_start_clicked:
            self.is_loading_screen = True
            self.is_connecting_screen = False

    def event_handling(self):
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                sys.exit()
            # if mouse cursor within START or EXIT text, then change the icon
            if event.type == pygame.MOUSEMOTION:
                pos = pygame.mouse.get_pos()
                # print(pos)
            elif event.type == pygame.MOUSEBUTTONDOWN:
                if event.button == 1:  # if left button pressed
                    # check which buttons are in hovered state
                    self.check_hovered_state(self.all_buttons)
            if event.type == FLASH_LOGO_BLINK_INTERVAL:
                self.blink_flash_logo = not self.blink_flash_logo

    def display(self):
        if self.is_connecting_screen:
            if self.blink_flash_logo:
                self.connecting_screen[1].set_colorkey((255, 255, 255))
                self.screen.blit(self.connecting_screen[1], (571, 270))
            else:
                self.screen.blit(self.connecting_screen[0], (0, 0))
        elif self.is_loading_screen:
            self.delay += 1
            if self.delay >= self.delay_param:
                self.start_game = True
            if self.player_1.mode == 1:
                self.screen.blit(self.loading_screen[0], (0, 0))
            elif self.player_1.mode == 2:
                self.screen.blit(self.loading_screen[1], (0, 0))

        else:
            self.screen.blit(self.home_screen, (0, 0))
            # check button hovered state
            for i in self.all_buttons:
                if i.hovered:
                    i.img_hovered.set_colorkey((0, 0, 0))
                    self.screen.blit(i.img_hovered, (i.x, i.y))
                else:
                    i.img.set_colorkey((0, 0, 0))
                    self.screen.blit(i.img, (i.x, i.y))

        pygame.display.update()
Пример #52
0
def make_uniform_network():
    return Network()
Пример #53
0
class MyGame(object):
    PLAYING, DYING, GAME_OVER, STARTING, WELCOME = range(5)
    REFRESH, START, RESTART = range(pygame.USEREVENT, pygame.USEREVENT + 3)

    def __init__(self):
        pygame.mixer.init()
        pygame.mixer.pre_init(44100, -16, 2, 2048)
        pygame.init()
        # Инициализируем окно
        self.width = 1280
        self.height = 720
        self.screen = pygame.display.set_mode((self.width, self.height))
        pygame.display.set_caption('Star wars on steroids')
        # Шрифты, звуки, картинки
        self.soundtrack = load_sound('soundtrack.wav')
        self.soundtrack.set_volume(.5)
        self.big_font = pygame.font.SysFont(None, 100)
        self.normal_font = pygame.font.SysFont(None, 50)
        self.small_font = pygame.font.SysFont(None, 25)
        self.laserburst_sound = load_sound('laser.wav')
        self.gameover_text = self.big_font.render('Конец игры', True,
                                                  (255, 255, 255))
        self.gameover_text2 = self.normal_font.render('Жмякни для новой игры',
                                                      True, (255, 255, 255))
        self.lives_image = load_image('life.png')
        self.critical_distance = {"big": 100, "normal": 70, "small": 40}
        # Игровой таймер
        self.FPS = 30
        pygame.time.set_timer(self.REFRESH, 1000 // self.FPS)
        self.fire_time = datetime.datetime.now()
        # Инициализруем вступительный экран
        self.state = MyGame.WELCOME
        self.welcome_logo = load_image('logo.png')
        self.welcome_text = self.big_font.render("Star Wars on steroids", True,
                                                 (255, 255, 255))
        self.welcome_desc = self.normal_font.render("Жмякни по экрану", True,
                                                    (255, 255, 255))

    def start(self):
        self.spaceship = Spaceship((self.width // 2, self.height // 2))
        self.friendship = Spaceship((self.width // 2, self.height // 2))
        self.net = Network()
        self.bursts = []
        self.soundtrack.play(-1, 0, 1000)
        self.state = MyGame.PLAYING

    def run(self):
        """Вечный цикл игры"""
        running = True
        while running:
            event = pygame.event.wait()

            if event.type == pygame.QUIT:
                running = False
            elif event.type == MyGame.REFRESH:
                if self.state != MyGame.WELCOME:
                    keys = pygame.key.get_pressed()

                    if keys[pygame.K_SPACE]:
                        new_time = datetime.datetime.now()
                        if new_time - self.fire_time > datetime.timedelta(
                                seconds=0.5):
                            self.spaceship.fire()
                            self.laserburst_sound.play()
                            self.fire_time = new_time

                    if self.state == MyGame.PLAYING:
                        if keys[pygame.K_RIGHT]:
                            self.spaceship.angle -= 10
                            self.spaceship.angle %= 360

                        if keys[pygame.K_LEFT]:
                            self.spaceship.angle += 10
                            self.spaceship.angle %= 360

                        if keys[pygame.K_UP]:
                            self.spaceship.is_throttle_on = True
                            if self.spaceship.speed < 10:
                                self.spaceship.speed += 1
                        else:
                            if self.spaceship.speed > 0:
                                self.spaceship.speed -= 1
                            self.spaceship.is_throttle_on = False

                        if len(self.spaceship.active_bursts) > 0:
                            self.bursts_physics()

                        if len(self.hedgehoppers) > 0:
                            self.hedgehoppers_physics()

                        self.physics()

                self.draw()

            elif event.type == MyGame.START:
                pygame.time.set_timer(MyGame.START, 0)
                if self.lives < 1:
                    self.game_over()
                else:
                    self.hedgehoppers = []
                    for i in range(5):
                        self.make_hedgehopper()

                    self.start()

            elif event.type == MyGame.RESTART:
                pygame.time.set_timer(MyGame.RESTART, 0)
                self.state = MyGame.STARTING

            elif event.type == pygame.MOUSEBUTTONDOWN and (
                    self.state == MyGame.STARTING
                    or self.state == MyGame.WELCOME):
                self.hedgehoppers = []
                self.start()
                for i in range(5):
                    self.make_hedgehopper()
                self.lives = 3
                self.score = 0
            else:
                pass

    def send_data(self):
        """
        Send position to server
        :return: None
        """
        data = str(self.net.id) + ":" + str(
            self.spaceship.position[0]) + "," + str(self.spaceship.position[1])
        reply = self.net.send(data)
        return reply

    @staticmethod
    def parse_data(data):
        try:
            d = data.split(":")[1].split(",")
            return float(d[0]), float(d[1])
        except("Exception on parsing data"):
            return 0, 0

    def make_hedgehopper(self, size="big", pos=None):
        """Создаем штурмовика размера size, по умолчанию большой"""
        margin = 200
        if pos is None:
            rand_x = random.randint(margin, self.width - margin)
            rand_y = random.randint(margin, self.height - margin)
            while distance((rand_x, rand_y), self.spaceship.position) < 400:
                rand_x = random.randint(0, self.width)
                rand_y = random.randint(0, self.height)
            new_hedgehopper = Hedgehopper((rand_x, rand_y), size)
        else:
            new_hedgehopper = Hedgehopper(pos, size)

        self.hedgehoppers.append(new_hedgehopper)

    def game_over(self):
        """Игра окончена"""
        self.soundtrack.stop()
        self.state = MyGame.GAME_OVER
        pygame.time.set_timer(MyGame.RESTART, int(1000))

    def die(self):
        """Смерть"""
        self.soundtrack.stop()
        self.lives -= 1
        self.state = MyGame.DYING
        pygame.time.set_timer(MyGame.START, int(1000))

    def physics(self):
        """Движение и смерть за окном"""
        if self.state == MyGame.PLAYING:
            self.spaceship.move()

            # Отсылаем свои координаты, ловим второго игрока
            self.friendship.position[0], self.friendship.position[
                1] = self.parse_data(self.send_data())
            print(self.send_data())

            if self.spaceship.position[0] > 1280 or self.spaceship.position[
                0] < 0 \
                    or self.spaceship.position[1] > 720 or \
                    self.spaceship.position[1] < 0:
                self.die()

    def bursts_physics(self):
        """Убиение штурмовиков"""
        if len(self.spaceship.active_bursts) > 0:
            for burst in self.spaceship.active_bursts:
                burst.move()
                for hedgehopper in self.hedgehoppers:
                    if hedgehopper.size == "big":
                        if distance(burst.position, hedgehopper.position) < 70:
                            self.hedgehoppers.remove(hedgehopper)
                            if burst in self.spaceship.active_bursts:
                                self.spaceship.active_bursts.remove(burst)
                            self.make_hedgehopper("normal", (
                                hedgehopper.position[0] + 10,
                                hedgehopper.position[1]))
                            self.make_hedgehopper("normal", (
                                hedgehopper.position[0] - 10,
                                hedgehopper.position[1]))
                            self.score += 1
                    elif hedgehopper.size == "normal":
                        if distance(burst.position, hedgehopper.position) < 50:
                            self.hedgehoppers.remove(hedgehopper)
                            if burst in self.spaceship.active_bursts:
                                self.spaceship.active_bursts.remove(burst)
                            self.make_hedgehopper("small", (
                                hedgehopper.position[0] + 10,
                                hedgehopper.position[1]))
                            self.make_hedgehopper("small", (
                                hedgehopper.position[0] - 10,
                                hedgehopper.position[1]))
                            self.score += 1
                    else:
                        if distance(burst.position, hedgehopper.position) < 30:
                            self.hedgehoppers.remove(hedgehopper)
                            if burst in self.spaceship.active_bursts:
                                self.spaceship.active_bursts.remove(burst)
                            if len(self.hedgehoppers) < 10:
                                self.make_hedgehopper()
                            self.score += 1

    def hedgehoppers_physics(self):
        """Убийство игрока и выход за граница штурмовика"""
        if len(self.hedgehoppers) > 0:
            for hedgehopper in self.hedgehoppers:
                hedgehopper.move()

                if distance(hedgehopper.position, self.spaceship.position) < \
                        self.critical_distance[hedgehopper.size]:
                    self.die()
                elif distance(hedgehopper.position,
                              (self.width / 2, self.height / 2)) > math.sqrt(
                        (self.width / 2) ** 2 + (self.height / 2) ** 2):
                    self.hedgehoppers.remove(hedgehopper)

                    if len(self.hedgehoppers) < 10:
                        self.make_hedgehopper(hedgehopper.size)

    def draw(self):
        """Тут все отрисовыватеся"""
        BackGround = Background('background.jpg', [0, 0])
        self.screen.blit(BackGround.image, BackGround.rect)
        if self.state != MyGame.WELCOME:
            self.spaceship.draw_on(self.screen)
            self.friendship.draw_on(self.screen)

            if len(self.spaceship.active_bursts) > 0:
                for burst in self.spaceship.active_bursts:
                    burst.draw_on(self.screen)

            if len(self.hedgehoppers) > 0:
                for hedgehopper in self.hedgehoppers:
                    hedgehopper.draw_on(self.screen)

            if len(self.hedgehoppers) < 10:
                self.make_hedgehopper()

            scores_text = self.small_font.render(
                "Убито штурмовиков: " + str(self.score), True, (255, 255, 255))
            draw(scores_text, self.screen, (120, 100))

            if self.state == MyGame.GAME_OVER or self.state == MyGame.STARTING:
                draw(self.gameover_text, self.screen,
                     (self.width // 2, self.height // 2))
                draw(self.gameover_text2, self.screen,
                     (self.width // 2, self.height // 2 + 100))
            for i in range(self.lives):
                draw(self.lives_image, self.screen,
                     (self.lives_image.get_width() * i + 60, 50))
        else:
            draw(self.welcome_logo, self.screen, (self.width // 2, 400))
            draw(self.welcome_text, self.screen, (self.width // 2, 50))
            draw(self.welcome_desc, self.screen, (self.width // 2, 100))

        pygame.display.flip()
Пример #54
0
import time
from datetime import timedelta
import numpy as np
import cv2
import tensorflow as tf
import config as cfg
from network import Network
from py_postprocess import postprocess, draw_targets

slim = tf.contrib.slim

tfcfg = tf.ConfigProto()
tfcfg.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1

net = Network(session=tf.Session(config=tfcfg),
              im_shape=cfg.inp_size,
              is_training=False)

image_name = '01.jpg'
image = cv2.imread(os.path.join(cfg.workspace, 'test', image_name))

scaled_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
scaled_image = cv2.resize(scaled_image, cfg.inp_size)
# scaled_image -= [123.68, 116.78, 103.94]
scaled_image = scaled_image / 128.0 - 1

anchors = np.round(cfg.anchors * cfg.inp_size / 416, 2)

start_t = time.time()

box_pred, iou_pred, cls_pred = net.predict(scaled_image[np.newaxis], anchors)
Пример #55
0
def main():
    network = Network([1, 10, 20, 30, 20, 10, 1], sigmoid.f, sigmoid.df)
    network.save("neurons.save")
    print(approximable(7), network.predict([7]))
Пример #56
0
class MainWindow(Gtk.ApplicationWindow):
    """GTK Main Window."""

    def __init__(self, config, *args, **kwargs):
        Gtk.ApplicationWindow.__init__(self, *args, **kwargs)
        self.set_default_size(950, 700)
        self.connect("delete-event", self.on_delete_event)

        # Get the settings from the config file
        self.config = config

        # Set up a list of buffer objects, holding data for every buffer
        self.buffers = BufferList()
        self.buffers.connect("bufferSwitched", self.on_buffer_switched)
        self.buffers.connect_after(
            "bufferSwitched", self.after_buffer_switched)

        # Set up GTK box
        box_horizontal = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL,
                                 spacing=0)
        self.add(box_horizontal)

        # Set up a headerbar
        self.headerbar = Gtk.HeaderBar()
        self.headerbar.set_has_subtitle(True)
        self.headerbar.set_title("Gtk-WeeChat")
        self.headerbar.set_subtitle("Not connected.")
        self.headerbar.set_show_close_button(True)
        self.set_titlebar(self.headerbar)

        # Add widget showing list of buffers
        box_horizontal.pack_start(
            self.buffers.treescrolledwindow, False, False, 0)
        sep = Gtk.Separator()
        box_horizontal.pack_start(sep, False, False, 0)

        # Add stack of buffers
        box_horizontal.pack_start(self.buffers.stack, True, True, 0)

        # Set up a menu
        menubutton = Gtk.MenuButton()
        icon = Gio.ThemedIcon(name="open-menu-symbolic")
        image = Gtk.Image.new_from_gicon(icon, Gtk.IconSize.BUTTON)
        menubutton.get_child().destroy()
        menubutton.add(image)
        menubutton.show_all()
        self.headerbar.pack_end(menubutton)
        menu = Gtk.Menu()
        menu.set_halign(Gtk.Align(3))
        menuitem_darkmode = Gtk.CheckMenuItem(label="Dark")
        menuitem_darkmode.connect("toggled", self.on_darkmode_toggled)
        menuitem_darkmode.show()
        menu.append(menuitem_darkmode)
        menu_sep = Gtk.SeparatorMenuItem()
        menu_sep.show()
        menu.append(menu_sep)
        self.menuitem_connect = Gtk.MenuItem(label="Connect")
        self.menuitem_connect.connect("activate", self.on_connect_clicked)
        self.menuitem_connect.show()
        menu.append(self.menuitem_connect)
        self.menuitem_disconnect = Gtk.MenuItem(label="Disconnect")
        self.menuitem_disconnect.connect(
            "activate", self.on_disconnect_clicked)
        self.menuitem_disconnect.set_sensitive(False)
        self.menuitem_disconnect.show()
        menu.append(self.menuitem_disconnect)
        menuitem_quit = Gtk.MenuItem(label="Quit")
        menuitem_quit.set_action_name("app.quit")
        menuitem_quit.show()
        menu.append(menuitem_quit)
        menubutton.set_popup(menu)

        # Make everything visible (All is hidden by default in GTK 3)
        self.show_all()

        # Set up the network module
        self.net = Network(self.config)
        self.net.connect("messageFromWeechat", self._network_weechat_msg)
        self.net.connect("connectionChanged", self._connection_changed)

        # Connect to connection settings signals
        CONNECTION_SETTINGS.connect("connect", self.on_settings_connect)

        # Set up actions
        action = Gio.SimpleAction.new("buffer_next", None)
        action.connect("activate", self.buffers.on_buffer_next)
        self.add_action(action)
        action = Gio.SimpleAction.new("buffer_prev", None)
        action.connect("activate", self.buffers.on_buffer_prev)
        self.add_action(action)
        action = Gio.SimpleAction.new("copy_to_clipboard", None)
        action.connect("activate", self.buffers.on_copy_to_clipboard)
        self.add_action(action)
        action = Gio.SimpleAction.new("buffer_expand", None)
        action.connect("activate", self.on_buffer_expand)
        self.add_action(action)
        action = Gio.SimpleAction.new("buffer_collapse", None)
        action.connect("activate", self.on_buffer_collapse)
        self.add_action(action)

        # Autoconnect if necessary
        if self.net.check_settings() is True and \
                            self.config["relay"]["autoconnect"] == "on":
            if self.net.connect_weechat() is False:
                print("Failed to connect.")
            else:
                self.menuitem_connect.set_sensitive(False)
                self.menuitem_disconnect.set_sensitive(True)
        else:
            CONNECTION_SETTINGS.display()

        # Enable darkmode if enabled before
        self.dark_fallback_provider = Gtk.CssProvider()
        self.dark_fallback_provider.load_from_path(
            "{}/dark_fallback.css".format(CONFIG_DIR))
        if STATE.get_dark():
            menuitem_darkmode.set_active(True)

        # Sync our local hotlist with the weechat server
        GLib.timeout_add_seconds(60, self.request_hotlist)

    def on_darkmode_toggled(self, source_object):
        """Callback for when the menubutton Dark is toggled. """
        settings = Gtk.Settings().get_default()
        dark = source_object.get_active()
        if settings.props.gtk_theme_name == "Adwaita":
            if dark:
                settings.props.gtk_application_prefer_dark_theme = True
            else:
                settings.props.gtk_application_prefer_dark_theme = False
        else:
            # Non-standard theme, use fallback style provider
            style_context = self.get_style_context()
            screen = Gdk.Screen().get_default()
            if dark:
                style_context.add_provider_for_screen(
                    screen, self.dark_fallback_provider, Gtk.STYLE_PROVIDER_PRIORITY_USER)
            else:
                style_context.remove_provider_for_screen(
                    screen, self.dark_fallback_provider)
        for buf in self.buffers:
            buf.update_buffer_default_color()
            buf.emit("notifyLevelChanged")
        STATE.set_dark(dark)

    def request_hotlist(self):
        """" Ask server to send a hotlist. """
        if self.net.connection_status is ConnectionStatus.CONNECTED:
            self.net.send_to_weechat(
                "(hotlist) hdata hotlist:gui_hotlist(*)\n")
        return True

    def on_delete_event(self, *args):
        """Callback function to save buffer state when window is closed."""
        self.save_expanded_buffers()

    def expand_buffers(self):
        """Check which nodes were expanded last time when state was saved,
        and expands them.
        """
        for buf_ptr in STATE.get_expanded_nodes():
            path = self.buffers.buffer_store.get_path_from_bufptr(buf_ptr)
            if path:
                self.buffers.tree.expand_row(path, False)

    def save_expanded_buffers(self):
        """Saves the list of expanded buffers."""
        STATE.set_expanded_nodes(self.buffers.get_expanded_nodes())

    def on_settings_connect(self, *args):
        """Callback for the menubutton connect."""
        if self.net.check_settings() is False:
            CONNECTION_SETTINGS.display()
            return
        if self.net.connection_status in (ConnectionStatus.NOT_CONNECTED,
                                          ConnectionStatus.CONNECTION_LOST):
            self.net.connect_weechat()
        elif self.net.connection_status in (ConnectionStatus.CONNECTED,
                                            ConnectionStatus.CONNECTING):
            self.net.disconnect_weechat()
            self.net.connect_weechat()

    def _connection_changed(self, *args):
        """Callback for when the network module reports a changed state."""
        self.update_headerbar()
        if self.net.connection_status == ConnectionStatus.NOT_CONNECTED:
            self.menuitem_disconnect.set_sensitive(False)
            self.menuitem_connect.set_sensitive(True)
        elif self.net.connection_status == ConnectionStatus.CONNECTING:
            self.menuitem_disconnect.set_sensitive(True)
            self.menuitem_connect.set_sensitive(False)
        elif self.net.connection_status == ConnectionStatus.CONNECTED:
            self.menuitem_disconnect.set_sensitive(True)
            self.menuitem_connect.set_sensitive(False)
        elif self.net.connection_status == ConnectionStatus.CONNECTION_LOST:
            self.save_expanded_buffers()
            self.menuitem_disconnect.set_sensitive(False)
            self.menuitem_connect.set_sensitive(True)
        elif self.net.connection_status == ConnectionStatus.RECONNECTING:
            self.menuitem_disconnect.set_sensitive(False)
            self.menuitem_connect.set_sensitive(False)
            self.save_expanded_buffers()
            print("Reconnecting in 5 seconds...")
            # Lambda function makes sure we only connect once
            GLib.timeout_add_seconds(
                5, lambda: self.net.connect_weechat() and False)

    def on_connect_clicked(self, *args):
        """Callback function for when the connect button is clicked."""
        CONNECTION_SETTINGS.display()

    def on_disconnect_clicked(self, *args):
        """Callback function for when the disconnect button is clicked."""
        print("Disonnecting")
        self.net.disconnect_weechat()
        self.buffers.clear()
        self.update_headerbar()

    def on_send_message(self, source_object, entry):
        """ Callback for when enter is pressed in entry widget """
        if self.net.connection_status != ConnectionStatus.CONNECTED:
            return
        # returned string can not be stored
        text = copy.deepcopy(entry.get_text())
        full_name = source_object.data["full_name"]
        message = 'input %s %s\n' % (full_name, text)
        self.net.send_to_weechat(message)
        entry.get_buffer().delete_text(0, -1)

    def _network_weechat_msg(self, source_object, message):
        """Called when a message is received from WeeChat."""
        # pylint: disable=bare-except
        try:
            proto = protocol.Protocol()
            if len(message.get_data()) >= 5:
                decoded_message = proto.decode(message.get_data())
                self.parse_message(decoded_message)
            else:
                print("Error, length of received message is {} bytes.".format(
                    len(message.get_data())))
        except:
            print('Error while decoding message from WeeChat:\n%s'
                  % traceback.format_exc())
            self.net.disconnect_weechat()

    def parse_message(self, message):
        """Parse a WeeChat message."""
        if message.msgid.startswith('debug'):
            pass
        elif message.msgid == 'listbuffers':
            self._parse_listbuffers(message)
        elif message.msgid in ('listlines', '_buffer_line_added'):
            self._parse_line(message)
        elif message.msgid in ('_nicklist', 'nicklist'):
            self._parse_nicklist(message)
        elif message.msgid == '_nicklist_diff':
            self._parse_nicklist_diff(message)
        elif message.msgid == '_buffer_opened':
            self._parse_buffer_opened(message)
        elif message.msgid.startswith('_buffer_'):
            self._parse_buffer(message)
        elif message.msgid == '_upgrade':
            self.net.desync_weechat()
        elif message.msgid == '_upgrade_ended':
            self.net.sync_weechat()
        elif message.msgid == 'hotlist':
            self._parse_hotlist(message)

    def _parse_listbuffers(self, message):
        """Parse a WeeChat with list of buffers."""
        for obj in message.objects:
            if obj.objtype != 'hda' or obj.value['path'][-1] != 'buffer':
                continue
            self.buffers.clear()
            for item in obj.value['items']:
                buf = Buffer(self.config, item)
                self.buffers.append(buf)
                buf.connect("messageToWeechat", self.on_send_message)
                active_node = STATE.get_active_node()
                if buf.pointer() == active_node:
                    self.buffers.show(buf.pointer())
        self.expand_buffers()
        self.request_hotlist()

    def _parse_line(self, message):
        """Parse a WeeChat message with a buffer line."""
        for obj in message.objects:
            lines = []
            if obj.objtype != 'hda' or obj.value['path'][-1] != 'line_data':
                continue
            for item in obj.value['items']:
                notify_level = "default"
                if message.msgid == 'listlines':
                    ptrbuf = item['__path'][0]
                else:
                    ptrbuf = item['buffer']
                if (self.buffers.active_buffer() is not None and
                    ptrbuf != self.buffers.active_buffer().pointer() and
                    message.msgid != 'listlines'):
                    if item["highlight"] or "notify_private" in item["tags_array"]:
                        notify_level = "mention"
                    elif "notify_message" in item["tags_array"]:
                        notify_level = "message"
                    else:
                        notify_level = "low"
                buf = self.buffers.get_buffer_from_pointer(ptrbuf)
                if buf:
                    lines.append(
                        (ptrbuf,
                         (item['date'], item['prefix'],
                          item['message'], item['tags_array']))
                    )
                    buf.set_notify_level(notify_level)
            if message.msgid == 'listlines':
                lines.reverse()
            for line in lines:
                self.buffers.get_buffer_from_pointer(line[0]).chat.display(*line[1])
                self.buffers.get_buffer_from_pointer(line[0]).scrollbottom()
            # Trying not to freeze GUI on e.g. /list:
            while Gtk.events_pending():
                Gtk.main_iteration()

    def _parse_nicklist(self, message):
        """Parse a WeeChat message with a buffer nicklist."""
        buffer_refresh = set()
        for obj in message.objects:
            if obj.objtype != 'hda' or \
               obj.value['path'][-1] != 'nicklist_item':
                continue
            group = '__root'
            for item in obj.value['items']:
                bufptr = item['__path'][0]
                buf = self.buffers.get_buffer_from_pointer(bufptr)
                if buf is not None:
                    if not buf in buffer_refresh:
                        buf.nicklist = {}
                    buffer_refresh.add(buf)
                    if item['group']:
                        group = item['name']
                    buf.nicklist_add_item(
                        group, item['group'], item['prefix'], item['name'],
                        item['visible'])
        for buf in buffer_refresh:
            buf.nicklist_refresh()

    def _parse_nicklist_diff(self, message):
        """Parse a WeeChat message with a buffer nicklist diff."""
        buffer_refresh = set()
        for obj in message.objects:
            if obj.objtype != 'hda' or \
               obj.value['path'][-1] != 'nicklist_item':
                continue
            group = '__root'
            for item in obj.value['items']:
                bufptr = item['__path'][0]
                buf = self.buffers.get_buffer_from_pointer(bufptr)
                if buf is None:
                    continue
                buffer_refresh.add(buf)
                if item['_diff'] == ord('^'):
                    group = item['name']
                elif item['_diff'] == ord('+'):
                    buf.nicklist_add_item(
                        group, item['group'], item['prefix'], item['name'],
                        item['visible'])
                elif item['_diff'] == ord('-'):
                    buf.nicklist_remove_item(
                        group, item['group'], item['name'])
                elif item['_diff'] == ord('*'):
                    buf.nicklist_update_item(
                        group, item['group'], item['prefix'], item['name'],
                        item['visible'])
        for buf in buffer_refresh:
            buf.nicklist_refresh()

    def _parse_buffer_opened(self, message):
        """Parse a WeeChat message with a new buffer (opened)."""
        for obj in message.objects:
            if obj.objtype != 'hda' or obj.value['path'][-1] != 'buffer':
                continue
            for item in obj.value['items']:
                buf = Buffer(self.config, item)
                self.buffers.append(buf)
                buf.connect("messageToWeechat", self.on_send_message)
                self.buffers.show(buf.pointer())
                while Gtk.events_pending():
                    Gtk.main_iteration()

    def _parse_buffer(self, message):
        """Parse a WeeChat message with a buffer event
        (anything except a new buffer).
        """
        for obj in message.objects:
            if obj.objtype != 'hda' or obj.value['path'][-1] != 'buffer':
                continue
            for item in obj.value['items']:
                bufptr = item['__path'][0]
                buf = self.buffers.get_buffer_from_pointer(bufptr)
                if buf is None:
                    continue
                if message.msgid == '_buffer_type_changed':
                    buf.data['type'] = item['type']
                elif message.msgid in ('_buffer_moved', '_buffer_merged',
                                       '_buffer_unmerged'):
                    buf.data['number'] = item['number']
                elif message.msgid == '_buffer_renamed':
                    buf.data['full_name'] = item['full_name']
                    buf.data['short_name'] = item['short_name']
                    self.buffers.update_buffer(bufptr)
                    self.update_headerbar()
                elif message.msgid == '_buffer_title_changed':
                    buf.data['title'] = item['title']
                    self.update_headerbar()
                elif message.msgid == '_buffer_cleared':
                    buf.clear()
                elif message.msgid.startswith('_buffer_localvar_'):
                    buf.data['local_variables'] = \
                        item['local_variables']
                elif message.msgid == '_buffer_closing':
                    self.buffers.remove(bufptr)

    def _parse_hotlist(self, message):
        """Parse a WeeChat hotlist."""
        for buf in self.buffers:
            buf.reset_notify_level()
        for obj in message.objects:
            if not obj.value['path']:
                continue
            if obj.objtype != 'hda' or obj.value['path'][-1] != 'hotlist':
                continue
            for item in obj.value['items']:
                priority = item["priority"]
                buf = self.buffers.get_buffer_from_pointer(item["buffer"])
                if not buf:
                    continue
                if buf is self.buffers.active_buffer():
                    continue
                if priority == 0:
                    buf.set_notify_level("low")
                elif priority == 1:
                    buf.set_notify_level("message")
                elif priority == 2:
                    buf.set_notify_level("mention")
                elif priority == 3:
                    buf.set_notify_level("mention")

    def on_buffer_switched(self, source_object, bufptr):
        """ Called right before another buffer is switched to. """
        if self.buffers.active_buffer():
            cmd = "input {name} /buffer set hotlist -1\n".format(
                name=self.buffers.active_buffer().data["full_name"])
            self.net.send_to_weechat(cmd)

    def after_buffer_switched(self, source_object, bufptr):
        """ Called right after another buffer is switched to. """
        self.update_headerbar()
        if self.buffers.active_buffer():
            STATE.set_active_node(self.buffers.active_buffer().pointer())

    def on_buffer_expand(self, *args):
        """ Expand the currently selected server branch in buffer list. """
        bufptr = self.buffers.active_buffer().pointer()
        path = self.buffers.buffer_store.get_path_from_bufptr(bufptr)
        if path:
            self.buffers.tree.expand_row(path, False)

    def on_buffer_collapse(self, *args):
        """ Collapse the currently selected server branch in buffer list. """
        bufptr = self.buffers.active_buffer().pointer()
        path = self.buffers.buffer_store.get_path_from_bufptr(bufptr)
        if path:
            if path.get_depth() == 1:
                self.buffers.tree.collapse_row(path)
            else:
                path.up()
                # pylint: disable=unsubscriptable-object
                # buffer_store is a Gtk.TreeStore derived class
                self.buffers.show(self.buffers.buffer_store[path][2])
                self.buffers.tree.collapse_row(path)

    def update_headerbar(self):
        """ Updates headerbar title and subtitle. """
        if self.net.connection_status == ConnectionStatus.CONNECTED:
            if self.buffers.active_buffer() is not None:
                self.headerbar.set_title(self.buffers.get_title())
                self.headerbar.set_subtitle(self.buffers.get_subtitle())
                return
            self.headerbar.set_subtitle("Connected")
        elif self.net.connection_status == ConnectionStatus.NOT_CONNECTED:
            self.headerbar.set_subtitle("Not connected")
        elif self.net.connection_status == ConnectionStatus.CONNECTING:
            self.headerbar.set_subtitle("Connecting...")
        elif self.net.connection_status == ConnectionStatus.CONNECTION_LOST:
            self.headerbar.set_subtitle("Connection lost")
        self.headerbar.set_title("Gtk-WeeChat")
Пример #57
0
from network import Network
from network.algorithms import bfs, dfs

star_graph = Network(6, [(0, 1), (0, 2), (0, 3), (0, 4), (0, 5)])
regular_graph = Network(6, [(0, 1), (0, 3), (2, 5), (3, 4), (4, 1), (5, 1),
                            (4, 5)])
binary_tree = Network(7, [(0, 1), (0, 2), (1, 3), (1, 4), (2, 5), (2, 6)])


def test_bfs():
    assert bfs(star_graph, source=0) == list(range(star_graph.n))
    assert bfs(star_graph, source=1) == [1, 0, 2, 3, 4, 5]

    assert bfs(regular_graph, source=0) == [0, 1, 3, 4, 5, 2]
    assert bfs(regular_graph, source=1) == [1, 0, 4, 5, 3, 2]
    assert bfs(regular_graph, source=2) == [2, 5, 1, 4, 0, 3]

    assert bfs(binary_tree, source=0) == list(range(binary_tree.n))


def test_dfs():
    assert dfs(star_graph, source=0) == list(range(star_graph.n))

    assert dfs(regular_graph, source=0) == [0, 1, 4, 3, 5, 2]
    assert dfs(regular_graph, source=1) == [1, 0, 3, 4, 5, 2]
    assert dfs(regular_graph, source=5) == [5, 1, 0, 3, 4, 2]

    assert dfs(binary_tree, source=0) == [0, 1, 3, 4, 2, 5, 6]
Пример #58
0
class Agent:
    def __init__(self, id, trained_network = None):
        self.id = id
        self.PLAYER_TURN_INDEX = 0
        self.PROPERTY_STATUS_INDEX = 1
        self.PLAYER_POSITION_INDEX = 2
        self.PLAYER_CASH_INDEX = 3
        self.PHASE_NUMBER_INDEX = 4
        self.PHASE_PAYLOAD_INDEX = 5
        self.ACTION_TYPES = 3
        self.ACTIONS = [-1,0,1] #-1 -> earn money by selling, 0->do nothing, 1->build, buy type action
        self.ACTION_SELL = -1
        self.ACTION_NOTHING = 0
        self.ACTION_BUY = 1
        self.QTable = {}
        self.FIRST_PROP_RATIO = 'firstPropPerc'
        self.SECOND_PROP_RATIO = 'secPropPerc'
        self.MONEY_RATIO = 'moneyRatio'
        self.PROP_RATIO = 'propertyRatio'
        self.POSITION = 'position'
        self.lastState = None
        self.lastAction = None
        self.INPUT_NODES = 24
        self.network = Network()
        
        if trained_network != None:
            self.network = trained_network
        else:
            file = open("network_without_fix_2.txt", 'rb')
            trained_network = pickle.load(file)

        self.constructionException = ["Railroad", "Utility"]
        self.traces = []
        self.STATE_IDX = 'state'
        self.ACTION_IDX = 'action'
        self.VALUE_IDX = 'value'
        
        self.jailDiceRolls = 0


    def getBSMTDecision(self, state):
        # Check for Debt field and clear
        debt = self.getDebt(state)
        if debt > 0:
            cash = self.getCash(state)

            # Sell/Mortgage cheapest property
            if cash < debt:
                # Try selling first
                action = self.sell(state)

                # If nothing to sell then mortgage
                if action == None :
                    action = self.mortgage(state)
                
                return action
            # Enough cash to handle debt. Do Nothing


        action = self.agent_step(state)

        if action == 1:
            constructions = self.getMaxConstructions(state)
            if constructions != None:
                #print (state[1])
                #print ('constructions1: ' + str(constructions))
                return ("B", constructions)
            return None

        elif action == -1:
            sell_action = self.sell(state)
            return sell_action
        else:
            return None

    def respondTrade(self, state):
        pass

    def buyProperty(self, state):
        action = self.agent_step(state)
        if action == 1:
            return True
        else:
            return False

    def auctionProperty(self, state):
        position = self.getTurnPlayerPosition(state)
        price = self.getPropertyPrice(position)

        return np.random.uniform(AUCTION_BID_MIN, AUCTION_BID_MAX) * price

    def receiveState(self, state):
        pass

    def jailDecision(self, state):
        turns = state[PLAYER_TURN_INDEX]
        self.jailDiceRolls += 1

        if turns <= TURNS_JAIL_HEURISTICS:
            self.jailDiceRolls = 0

            if self.hasJailCard(state):
                return ("C", self.getJailCard(state))
            return "P"
        
        # Try Stalling and evade paying rent. Can't evade third time
        if self.jailDiceRolls == 3:
            self.jailDiceRolls = 0

            if self.hasJailCard(state):
                return ("C", self.getJailCard(state))

            return "P"

        return "R"




    # Fixed Policy Methods

    def hasJailCard(self, state):
        return self.isPropertyOwned(state[PROPERTY_STATUS_INDEX][CHANCE_GET_OUT_OF_JAIL_FREE]) \
            or self.isPropertyOwned(state[PROPERTY_STATUS_INDEX][COMMUNITY_GET_OUT_OF_JAIL_FREE])

    def getJailCard(self, state):
        if self.isPropertyOwned(state[PROPERTY_STATUS_INDEX][CHANCE_GET_OUT_OF_JAIL_FREE]):
            return CHANCE_GET_OUT_OF_JAIL_FREE
        elif self.isPropertyOwned(state[PROPERTY_STATUS_INDEX][COMMUNITY_GET_OUT_OF_JAIL_FREE]):
            return COMMUNITY_GET_OUT_OF_JAIL_FREE

        raise Exception('No jail card found')

    def getDebt(self, state):
        return state[DEBT_INDEX][(self.id - 1) * 2 + 1]

    def getCash(self, state):
        return state[PLAYER_CASH_INDEX][self.id - 1]

    def sell(self, state):
        ownedProperties = self.getOwnedProperties(state)

        sellingProperty = None
        for tup in ownedProperties:
            if tup[1] > 1:
                sellingProperty = (tup[0], 1)
                break

        if sellingProperty != None:
            return ('S', [sellingProperty])

        return None

    def mortgage(self, state):
        ownedProperties = self.getOwnedProperties(state)

        mortgagingProperty = None
        for tup in ownedProperties:
            if tup[1] == 1:
                mortgagingProperty = tup[0]
                break

        if mortgagingProperty != None:
            return ('M', [mortgagingProperty])

        return None

    def getOwnedProperties(self, state):
        properties = []
        for i, val in enumerate(state[PROPERTY_STATUS_INDEX]):
            if self.isPropertyOwned(val) \
                and i != 0 \
                and i != CHANCE_GET_OUT_OF_JAIL_FREE \
                and i != COMMUNITY_GET_OUT_OF_JAIL_FREE:
                properties.append((i, abs(val), self.getPropertyPrice(i)))
        
        properties.sort(key=lambda tup: (tup[1], tup[2]), reverse = True)

        return properties

    def isPropertyOwned(self, propertyStatus):
        return (self.id == 1 and propertyStatus > 0) or (self.id == 2 and propertyStatus < 0)

    def getPlayerTurn(self, state):
        return state[PLAYER_TURN_INDEX]%2

    def getTurnPlayerPosition(self, state):
        playerTurn = self.getPlayerTurn(state)
        return state[PLAYER_POSITION_INDEX][playerTurn]

    def getPropertyPrice(self, position):
        return constants.board[position]['price']



    # RL Agent Specific Methods

    def randomAction(self):
        return random.choice(self.ACTIONS)

    def smooth (self, reward, factor):
        return (reward/factor) / (1 + abs(reward/factor))
    
    def myId(self):
        return self.id-1

    def calculateReward(self, state):
        reward = 0

        playerSign = 1
        key = self.FIRST_PROP_RATIO

        currentPlayerId = self.myId() #state[self.PLAYER_TURN_INDEX]%2

        if currentPlayerId == 1: #player id
            playerSign = -1
            key = self.SECOND_PROP_RATIO

        for property in state[self.PROPERTY_STATUS_INDEX]:
            if playerSign * property > 0: #property owned by the player
                if abs(property) != 7: #not mortgaged
                    reward += abs(property)
            else: #property owned by opponent (or not owned by anyone, then no effect on reward)
                if abs(property) != 7:
                    reward -= abs(property)

        
        transformed_state = self.transform_state(state)
        for item in transformed_state[key]:
            if item >= 0.9: #item >= 1 - >
                reward += 1
            elif item <= 0.1: #item <= 0
                reward -= 1

        
        alivePlayers = 2.0

        assetFactor = state[self.PLAYER_CASH_INDEX][currentPlayerId]
        totalAsset = state[self.PLAYER_CASH_INDEX][0] + state[self.PLAYER_CASH_INDEX][1]

        if totalAsset == 0:
            assetFactor = 0
        else:
            assetFactor /= totalAsset
        reward = self.smooth (reward, alivePlayers*5) #aliveplayers * 5
        reward = reward + (1/alivePlayers) * assetFactor

        #print ('player: ' + str(currentPlayerId) + ', reward: ' + str(reward))
        return reward

    def getQVal(self, input_state):
        #getfromdict or getfromNN
        return self.network.run(input_state)

    def createInput(self, tstate, action = 0):
        input_state = [0] * self.INPUT_NODES
        input_state[0] = (action + 2.0) / 3.0 #normalizing action between 0 and 1

        j = 1
        for i in range(len(tstate[self.FIRST_PROP_RATIO])):
            input_state[j] = tstate[self.FIRST_PROP_RATIO][i]
            j += 1
            input_state[j] = tstate[self.SECOND_PROP_RATIO][i]
            j += 1
        
        input_state[j] = tstate[self.PROP_RATIO]
        j += 1
        input_state[j] = tstate[self.MONEY_RATIO]
        j += 1
        input_state[j] = tstate[self.POSITION]

        input_state = self.network.getTensor(input_state)

        return input_state

    #returns vals from QTable ( can be dict, can be NN )
    def calculateQValues(self, state): #transformed_state
        tstate = self.transform_state(state)
        input_state = self.createInput(tstate)
        tempQ = [0] * self.ACTION_TYPES
        for i in range(self.ACTION_TYPES):
            input_state[0] = (i+1.0)/3.0 #normalising the action part
            tempQ[i] = self.getQVal(input_state)
        return tempQ

    def findMaxValues(self, QValues):
        maxQ = QValues[0]
        selectedAction = self.ACTIONS[0]

        for i in range (self.ACTION_TYPES):
            if QValues[i] > maxQ:
                maxQ = QValues[i]
                selectedAction = i-1
            elif QValues[i] == maxQ:
                rnd1 = random.randint(0,1000)
                rnd2 = random.randint(0,1000)
                if rnd2 > rnd1:
                    maxQ = QValues[i]
                    selectedAction = i-1
        
        return selectedAction


    def e_greedySelection(self, QValues):
        action = self.ACTION_NOTHING
        rand = random.uniform(0, 1)

        if (rand >= self.network.epsilon):
            action = self.findMaxValues(QValues)
        else:
            action = self.randomAction()
        
        return action


    def QLearning (self, lastState, lastAction, newState, bestAction, reward):
        lastStateInput = self.createInput(self.transform_state(lastState), lastAction)
        newStateInput = self.createInput(self.transform_state(newState), bestAction)

        QValue = self.network.run(lastStateInput)

        previousQ = QValue
        newQ = self.network.run(newStateInput)

        QValue += self.network.alpha * (reward + self.network.gamma * newQ - previousQ)
        return QValue

    def initParams(self):
        pass

    def agent_start (self, state):
        self.network.currentEpoch += 1
        self.initParams()

        QValues = self.calculateQValues(state)
        action = self.e_greedySelection(QValues)

        self.lastAction = action
        self.lastState = state

        self.traces.append ( {self.STATE_IDX : self.lastState,
                              self.ACTION_IDX : self.lastAction, 
                              self.VALUE_IDX : 1} )

        return action

    def updateQTraces (self, state, action, reward):
        found = False
        removeIds = []
        for i in range(len(self.traces)): #item -> (state, action)
            if self.checkSimilarity(state, self.traces[i][self.STATE_IDX]) == True and self.traces[i][self.ACTION_IDX] != action:
                removeIds.append(i)

            elif self.checkSimilarity(state, self.traces[i][self.STATE_IDX]) == True and self.traces[i][self.ACTION_IDX] == action:
                found = True
                self.traces[i][self.VALUE_IDX] = 1

                qT = self.network.run( self.createInput(self.transform_state(self.traces[i][self.STATE_IDX]), self.traces[i][self.ACTION_IDX]) )

                act = self.findMaxValues(self.calculateQValues(state))
                maxQt = self.network.run (self.createInput(self.transform_state(state), act))

                act = self.findMaxValues(self.calculateQValues(self.lastState))
                maxQ = self.network.run (self.createInput(self.transform_state(self.lastState), act))
                
                qVal = qT + self.network.alpha * (self.traces[i][self.VALUE_IDX]) * (reward + self.network.gamma * maxQt - maxQ)

                self.network.train(self.createInput( self.transform_state(self.traces[i][self.STATE_IDX]), self.traces[i][self.ACTION_IDX] ), qVal)

            else:
                self.traces[i][self.VALUE_IDX] *= self.network.gamma * self.network.lamda

                qT = self.network.run( self.createInput(self.transform_state(self.traces[i][self.STATE_IDX]), self.traces[i][self.ACTION_IDX]) )
                
                act = self.findMaxValues(self.calculateQValues(state))
                maxQt = self.network.run (self.createInput(self.transform_state(state), act))

                act = self.findMaxValues(self.calculateQValues(self.lastState))
                maxQ = self.network.run (self.createInput(self.transform_state(self.lastState), act))

                qVal = qT + self.network.alpha * (self.traces[i][self.VALUE_IDX]) * (reward + self.network.gamma * maxQt - maxQ)

                self.network.train(self.createInput( self.transform_state(self.traces[i][self.STATE_IDX]), self.traces[i][self.ACTION_IDX] ), qVal)


        temp_list = []
        for j in range(len(self.traces)):
            if j not in removeIds:
                temp_list.append(self.traces[j])
        self.traces = temp_list
           
        return found



    #returns action
    def agent_step (self, state):

        tempTState = self.transform_state(state)
        if tempTState[self.POSITION] == None:
            return self.ACTION_NOTHING

        
        if self.lastState is None:
            return self.agent_start(state)
                

        #get reward on state
        reward = self.calculateReward(state) #original state reqd

        transformed_state = self.transform_state(state) #needed here ?
        input_state = self.createInput(transformed_state) #needed here ?

        #Calculate Qvalues
        QValues = self.calculateQValues(state) #transformed->input state reqd

        #Select action
        action = self.e_greedySelection(QValues)

        QValue = 0
        exists = False

        exists = self.updateQTraces (state, action, reward)

        #tranformed->input state reqd
        QValue = self.QLearning (self.lastState, self.lastAction, state, self.findMaxValues(QValues), reward)

        transformed_lastState = self.transform_state(self.lastState)
        input_lastState = self.createInput(transformed_lastState, self.lastAction)
        self.network.train(input_lastState, QValue)


        if exists == False:
            self.traces.append ( {self.STATE_IDX : self.lastState,
                              self.ACTION_IDX : self.lastAction, 
                              self.VALUE_IDX : 1} )
        
        self.lastAction = action
        self.lastState = state

        return action

    def getMaxConstructions(self, state):
        monopolyGroups = self.getPropertyGroups()
        currentPlayer = self.myId() #state[self.PLAYER_TURN_INDEX] % 2
        playerCash = state[self.PLAYER_CASH_INDEX][currentPlayer]
        propertyStatus = state[self.PROPERTY_STATUS_INDEX]
        propertiesConstructionOrder = {}

        for (groupName, groupPositions) in monopolyGroups.items():
            if groupName in self.constructionException:
                continue
            if not self.allPropertiesOfMonopolyOwned(state, currentPlayer, groupPositions):
                continue
            else:
                playerCash = self.buildPropertiesInOrder(playerCash, propertyStatus, groupPositions,
                                                         propertiesConstructionOrder)

        if len(propertiesConstructionOrder) == 0:
            return None
        else:
            constructionOrderResult = []
            for propertyId, constructions in propertiesConstructionOrder.items():
                constructionOrderResult.append((propertyId, constructions))
            return constructionOrderResult

    def buildPropertiesInOrder(self, playerCashHolding, propertyStatus, groupPositions, propertiesConstructionOrder):
        min, max, statusDict = self.getMinMaxPropertyStatus(propertyStatus, groupPositions)

        # Bringing all properties at same level
        if min < max:
            for propertyId, status in statusDict.items():
                if status == min and playerCashHolding > self.getConstructionPrice(propertyId):
                    
                    if propertiesConstructionOrder.get(propertyId, None) == None:
                        propertiesConstructionOrder[propertyId] = 1
                    else:
                        propertiesConstructionOrder[propertyId] += 1

                    statusDict[propertyId] += 1
                    playerCashHolding -= self.getConstructionPrice(propertyId)
                else:
                    return playerCashHolding

        # Incrementally, Increasing 1 construction on each property
        # Min=Max and Max construction is Hotel(6)
        sortedPropertyTyples = sorted(statusDict.items(), key=lambda x: self.getConstructionPrice(x[0]))
        # statusDict = sorted(statusDict.items(), key =lambda item: item[1])

        for (propertyId, status) in sortedPropertyTyples:
            statusDict[propertyId] = status
            if status < 6 and playerCashHolding > self.getConstructionPrice(propertyId):
                statusDict[propertyId] += 1
                if propertiesConstructionOrder.get(propertyId, None) == None:
                    propertiesConstructionOrder[propertyId] = 1
                else:
                    propertiesConstructionOrder[propertyId] += 1
                playerCashHolding -= self.getConstructionPrice(propertyId)
                max = statusDict[propertyId]
            else:
                break
        return playerCashHolding

    def getConstructionPrice(self, propertyId):
        property = constants.board[propertyId]
        return property["build_cost"]

    def getMinMaxPropertyStatus(self, propertyStatus, groupPositions):
        # Calculate Min and Max constructions on property. # Property between -7 and 7
        min = 10
        max = 0
        dict = {}
        for position in groupPositions:
            status = abs(propertyStatus[position])
            dict[position] = status
            if status < min:
                min = status
            if status > max:
                max = status
        return min, max, dict

    def allPropertiesOfMonopolyOwned(self, state, playerId, monopolyGroup):
        propertyOwner = self.getPropertyOwner(state, monopolyGroup[0])
        if playerId != propertyOwner:
            return False

        for position in monopolyGroup:
            if propertyOwner != self.getPropertyOwner(state, position):
                return False
        return True

    def getPropertyOwner(self, state, position):
        # Player 1
        propertyStatus = state[self.PROPERTY_STATUS_INDEX]
        if propertyStatus[position] > 0:
            return 0
        # Player 2
        elif propertyStatus[position] < 0:
            return 1
        else:
            return -1

    def checkSimilarity(self, firstState, secondState):
        SIMILARITY_THRESHOLD = 0.1
        obs1 = self.transform_state(firstState) #TODO: use state's playerid
        obs2 = self.transform_state(secondState) #TODO: same

        # check Diff in Money
        moneyDif = abs(obs1["propertyRatio"] - obs2["propertyRatio"]) + \
                   abs(obs1["moneyRatio"] - obs2["moneyRatio"])

        if moneyDif >= SIMILARITY_THRESHOLD:
            return False

        # Check diff in position
        if obs1["position"] != obs2["position"]:
            return False

        # check Diff in Group
        obs1Group1 = obs1["firstPropPerc"]
        obs1Group2 = obs1["secPropPerc"]
        obs2Group1 = obs2["firstPropPerc"]
        obs2Group2 = obs2["secPropPerc"]

        p1 = firstState[self.PLAYER_TURN_INDEX]%2
        p2 = secondState[self.PLAYER_TURN_INDEX]%2

        if (p1 != p2): #for comparing the player1 with player1, and vice verse
            temp = obs2Group1
            obs1Group1 = obs1Group2
            obs1Group2 = temp

        diff1 = 0
        diff2 = 0
        for i in range(len(obs1Group1)):
            diff1 += abs(obs1Group1[i] - obs2Group1[i])
            diff2 += abs(obs1Group2[i] - obs2Group2[i])
            if diff1 > SIMILARITY_THRESHOLD or diff2 > SIMILARITY_THRESHOLD:
                return False

        return True

    def transform_state(self, state, playerId = None):

        if playerId is None:
            playerId = state[self.PLAYER_TURN_INDEX] % 2

        firstPropertyPercentage, secondPropertyPercentage = self.calculatePropertyGroupPercentage(state)
        moneyRatio, propertyRatio = self.calculateFinancePercentage(state, playerId)
        position = self.getNormalizedPosition(state, playerId)

        # Temp code... Will be removed
        dict = {}
        dict["firstPropPerc"] = firstPropertyPercentage #player0's
        dict["secPropPerc"] = secondPropertyPercentage #player1's
        dict["moneyRatio"] = moneyRatio #currentplaye's
        dict["propertyRatio"] = propertyRatio #currentplayer's
        dict["position"] = position #currentplayer's
        #print(dict)
        return dict

    def getNormalizedPosition(self, state, playerId):
        properyGroup = self.getPropertyGroups()
        propertyGroupToUnifMapping = {}
        start = 0.1

        orderedPropertyGroups = collections.OrderedDict(sorted(properyGroup.items()))
        for monopolyName, monopolyProperties in orderedPropertyGroups.items():
            for propertyid in monopolyProperties:
                propertyGroupToUnifMapping[propertyid] = round(start, 2)
            start += 0.1

        position = state[self.PLAYER_POSITION_INDEX][playerId]
        return propertyGroupToUnifMapping.get(position, None)

    def calculateFinancePercentage(self, state, playerId):
        return self.calculateMoneyPercentage(state, playerId), self.calculatePropertiesPercentage(state, playerId)

    def calculateMoneyPercentage(self, state, playerId):
        # Assumption: Both player money != 0
        moneyOwned = state[self.PLAYER_CASH_INDEX][playerId]
        opponentId = (playerId + 1) % 2
        opponentMoney = state[self.PLAYER_CASH_INDEX][opponentId]
        return moneyOwned / (moneyOwned + opponentMoney)

    def calculatePropertiesPercentage(self, state, sign):
        # sign = -1 or 1
        propertyStatus = state[self.PROPERTY_STATUS_INDEX]
        total = 0
        owned = 0
        for status in propertyStatus:
            if status != 0:
                total += 1
                if sign == (status / abs(status)):
                    owned += 1

        if total == 0:
            return 0
        else:
            return owned / total

    def calculatePropertyGroupPercentage(self, state):
        propertyGroups = self.getPropertyGroups()
        propertyStatus = state[self.PROPERTY_STATUS_INDEX]
        propertyZeroPercentage = []
        propertyOnePercentage = []

        orderedPropertyGroups = collections.OrderedDict(sorted(propertyGroups.items())) #TODO : how to sort
        i = 0
        for monopolyName, monopolyProperties in orderedPropertyGroups.items():
            ownZero = 0
            ownOne = 0
            for propertyId in monopolyProperties:
                status = propertyStatus[propertyId]
                if status > 0:
                    ownZero += 1
                elif status < 0:
                    ownOne += 1
            if ownOne + ownZero > 0:
                perc = 1.0 * ownZero / (ownOne + ownZero)
                perc = round(perc, 4)
                propertyZeroPercentage.append(perc)
                perc = 1.0 * ownOne / (ownOne + ownZero)
                perc = round(perc, 4)
                propertyOnePercentage.append(perc)
            else:
                propertyZeroPercentage.append(0)
                propertyOnePercentage.append(0)

            i += 1
        return propertyZeroPercentage, propertyOnePercentage

    def getPropertyGroups(self):
        propertyGroup = {}
        for id, value in constants.board.items():
            group = propertyGroup.get(value["monopoly"], None)
            if group == None and value.get("monopoly_group_elements", None) != None:
                group = set(value.get("monopoly_group_elements", None))
                group.add(id)
            propertyGroup[value["monopoly"]] = group
        propertyGroup.pop('None', None)

        for key, value in propertyGroup.items():
            propertyGroup[key] = list(value)
        return propertyGroup
Пример #59
0
def main():
    logging.basicConfig(stream=sys.stderr, level=logging.INFO)
    protocols = cf.PROTOCOLS

    # draw_sensing_model()
    if not os.path.exists(cf.DIR_NAME):  # create dir
        os.makedirs(cf.DIR_NAME)

    result_columns = [
        'protocol', 'num_of_targets', 'num_of_nodes', 'first_nodes_die',
        'lifetime', 'avg_computation_time', 'run'
    ]
    save_csv(result_columns)

    for scenario in cf.SCENARIO_LIST:
        cf.NUM_NODES = scenario[0]
        cf.NUM_TARGETS = scenario[1]

        cf.SUB_DIR_NAME = cf.DIR_NAME + str(cf.NUM_TARGETS) + '_' + str(
            cf.NUM_NODES) + '/'
        if not os.path.exists(
                cf.SUB_DIR_NAME):  # create sub dir for each scenario
            os.makedirs(cf.SUB_DIR_NAME)

        for i in range(cf.NUM_OF_RUN):
            costs = []
            active_node_trace = []
            cf.SUB_SUB_DIR_NAME = cf.DIR_NAME + str(
                cf.NUM_TARGETS) + '_' + str(cf.NUM_NODES) + '/' + str(i) + '/'
            if not os.path.exists(
                    cf.SUB_SUB_DIR_NAME
            ):  # create sub sub dir for one run of the scenario
                os.makedirs(cf.SUB_SUB_DIR_NAME)

            network = Network(i)
            plot_network(
                network,
                filename='net_model_' + str(i),
                file_type='png',
                save_dir=cf.SUB_SUB_DIR_NAME,
            )

            # simulation start
            for p in protocols:
                logging.info(str(p) + ' simulation start----------------')
                protocol_class = eval(p[0])
                network.scheduling_protocol = protocol_class()

                if p[1] is not None:
                    for var in p[1:]:
                        network.scheduling_protocol.replace(var[0], var[1])

                network.simulate()

                result = network.get_result()
                result.append(i)
                save_csv(result)

                active_node_trace.append(
                    network.get_num_of_active_nodes_trace())
                costs.append(network.get_cost_trace())

                logging.info('simulation end----------------')
                network.reset()

            # save trace results
            for p in range(len(protocols)):
                save_csv(costs[p],
                         file_name='cost_' + str(i),
                         save_dir=cf.SUB_DIR_NAME)
                save_csv(active_node_trace[p],
                         file_name='active_node_trace_' + str(i),
                         save_dir=cf.SUB_DIR_NAME)
Пример #60
0
import glob
import io
from network import Network
from language import Language
from pybrain.tools.validation import CrossValidator
from pybrain.tools.validation import ModuleValidator

languages = []

for g in glob.glob("./data/*.txt"):
  language, num = g.split("/")[-1].split("_")
  languages.append(Language(io.open(g, 'r+'), language))

n = Network(languages)
n.train()
n.trainer.verbose = True
n.trainer.trainUntilConvergence()

def correctValFunc(output, target):
  assert len(output) == len(target)

  n_correct = 0

  for idx, instance in enumerate(output):
    # This will find the maximum liklihood language
    classification = instance.argmax(axis=0)
    objective = target[idx].argmax(axis=0)
    if objective == classification:
      n_correct += 1

  return 1 - (float(n_correct) / float(len(output)))