def attention(hidden_conv, inf_hidden):
     """
     """
     shape = hidden_conv.get_shape().as_list()
     num_regions = shape[1] * shape[2]
     hidden_conv = tf.reshape(hidden_conv, [-1, num_regions, shape[3]])
     inf_hidden_vec = []
     for head in range(self.parameters['num_heads']):
         linear_conv = net.fcn(hidden_conv, 1,
                               self.parameters['num_att_units'], None,
                               'att', 'att1_' + str(head))
         linear_hidden = net.fcn(inf_hidden, 1,
                                 self.parameters['num_att_units'], None,
                                 'att', 'att2_' + str(head))
         context = tf.nn.tanh(linear_conv +
                              tf.expand_dims(linear_hidden, 1))
         attention_weights = net.fcn(context, 1, [1], None, 'att',
                                     'att3_' + str(head))
         attention_weights = tf.nn.softmax(attention_weights, axis=1)
         d_patch = tf.reduce_sum(attention_weights * hidden_conv,
                                 axis=1)
         inf_hidden_vec.append(
             tf.concat([
                 d_patch,
                 tf.reshape(attention_weights, shape=[-1, num_regions])
             ],
                       axis=1))
     inf_hidden = tf.concat(inf_hidden_vec, axis=1)
     return inf_hidden
Exemple #2
0
 def __init__(self, ocp_client, sampling_client):
     self.ocp_client = ocp_client
     self.sampling_client = sampling_client
     # Define an estimator
     self.estimator = Networks(NX, NU,
                               x_range=np.array([X_MIN, X_MAX]),
                               u_range=np.array([U_MIN, U_MAX]))
Exemple #3
0
        def automatic_dpatch(hidden):
            """
            """
            if self.parameters['obs_type'] == 'image':
                shape = hidden.get_shape().as_list()
                num_regions = shape[1]*shape[2]
                hidden = tf.reshape(hidden, [-1, num_regions, shape[3]])
                inf_hidden = net.fcn(hidden, 1, self.parameters["inf_num_predictors"],
                                     None, 'fcn_auto_dset')
                # for predictor in range(self.parameters['inf_num_predictors']):
                #     name = "weights"+str(predictor)
                    # weights = tf.get_variable(name, shape=(num_regions,1), dtype=tf.dtypes.float32,
                                            #   initializer=tf.ones_initializer, trainable=True)
                    # softmax_weights = tf.contrib.distributions.RelaxedOneHotCategorical(0.1, weights)
                    # softmax_weights = tf.reshape(softmax_weights,[num_regions,1])
                    # softmax_weights = tf.nn.softmax(weights, axis=0)
                    # inf_hidden.append(tf.reduce_sum(softmax_weights*hidden, axis=1))

                # inf_hidden = tf.stack(inf_hidden, axis=1)
                hidden_size = inf_hidden.get_shape().as_list()[2]*self.parameters['inf_num_predictors']
                inf_hidden = tf.reshape(inf_hidden, shape=[-1, hidden_size])
            else:
                inf_hidden = net.fcn(hidden, 1, self.parameters["inf_num_predictors"],
                                     None, 'fcn_auto_dset')
                # shape = hidden.get_shape().as_list()
                # num_variables = shape[1]
                # for predictor in range(self.parameters['inf_num_predictors']):
                #     name = "weights"+str(predictor)
                #     weights = tf.get_variable(name, shape=(1, num_variables), dtype=tf.dtypes.float32,
                #                               initializer=tf.ones_initializer, trainable=True)
                #     softmax_weights = tf.nn.softmax(weights, axis=0)
                #     inf_hidden.append(tf.reduce_sum(softmax_weights*hidden, axis=1))
                # inf_hidden = tf.stack(inf_hidden, axis=1)
            return inf_hidden#, softmax_weights
Exemple #4
0
    def __init__(self, params):
        self.batch_size = params.batch_size
        self.batch_size_val = params.batch_size_val
        self.initial_temperature = params.temperature
        self.decay_temperature = params.decay_temperature
        self.num_epochs = params.num_epochs
        self.loss_type = params.loss_type
        self.num_classes = params.num_classes
        self.w_gauss = params.w_gaussian
        self.w_categ = params.w_categorical
        self.w_recon = params.w_reconstruction
        self.decay_temp_rate = params.decay_temp_rate
        self.gaussian_size = params.gaussian_size
        self.min_temperature = params.min_temperature
        self.temperature = params.temperature  # current temperature
        self.verbose = params.verbose

        self.sess = tf.Session()
        self.network = Networks(params)
        self.losses = LossFunctions()

        self.learning_rate = tf.placeholder(tf.float32, [])
        self.lr = params.learning_rate
        self.decay_epoch = params.decay_epoch
        self.lr_decay = params.lr_decay

        self.dataset = params.dataset
        self.metrics = Metrics()
    def build_main_model(self):
        """
        Builds neural network model to approximate policy and value functions
        """
        if self.parameters['obs_type'] == 'image':
            self.observation = tf.placeholder(shape=[
                None, self.parameters["frame_height"],
                self.parameters["frame_width"], self.parameters["num_frames"]
            ],
                                              dtype=tf.float32,
                                              name='observation')
        else:
            self.observation = tf.placeholder(
                shape=[None, self.parameters['obs_size']],
                dtype=tf.float32,
                name='observation')
        # normalize input
        if self.parameters['env_type'] == 'atari':
            self.observation = tf.cast(self.observation, tf.float32) / 255.

        if self.convolutional:
            self.feature_vector = net.cnn(self.observation,
                                          self.parameters["num_conv_layers"],
                                          self.parameters["num_filters"],
                                          self.parameters["kernel_sizes"],
                                          self.parameters["strides"],
                                          tf.nn.relu, False, 'cnn')
            network_input = c_layers.flatten(self.feature_vector)
        else:
            self.feature_vector = self.observation
            network_input = self.feature_vector

        if self.fully_connected:
            hidden = net.fcn(network_input, self.parameters["num_fc_layers"],
                             self.parameters["num_fc_units"], tf.nn.relu,
                             'fcn')

        if self.recurrent:
            self.prev_action = tf.placeholder(shape=[None],
                                              dtype=tf.int32,
                                              name='prev_action')
            self.prev_action_onehot = c_layers.one_hot_encoding(
                self.prev_action, self.act_size)
            # network_input = tf.concat([network_input, self.prev_action_onehot], axis=1)

            c_in = tf.placeholder(tf.float32,
                                  [None, self.parameters['num_rec_units']],
                                  name='c_state')
            h_in = tf.placeholder(tf.float32,
                                  [None, self.parameters['num_rec_units']],
                                  name='h_state')
            self.seq_len = tf.placeholder(shape=None,
                                          dtype=tf.int32,
                                          name='sequence_length')
            self.state_in = tf.contrib.rnn.LSTMStateTuple(c_in, h_in)
            hidden, self.state_out = net.rnn(network_input, self.state_in,
                                             self.parameters['num_rec_units'],
                                             self.seq_len, 'rnn')
        self.hidden = hidden
Exemple #6
0
    def __init__(self):
        """
        Create ROS clients and a the estimator model
        """
        self.ocp_client = actionlib.SimpleActionClient(
            OPT_CONTROL_ACTION_SERVER, OptControlAction)
        self.ocp_client.wait_for_server()
        rospy.loginfo('End of wait for ocp action server')
        rospy.wait_for_service('create_samples')
        self.sampling_client = rospy.ServiceProxy('create_samples', Samples)

        self.estimator = Networks(NX,
                                  NU,
                                  x_range=np.array([X_MIN, X_MAX]),
                                  u_range=np.array([U_MIN, U_MAX]))
Exemple #7
0
def set_plugin_parameters(status: bool = False, alerts: bool = False):
    # Store args and monitor_plugins for lazy loading.
    global default_args, pool_collection, node_info

    if not default_args:
        # Create plugin instance and set default args
        default_monitor_plugins = PluginCollection('plugins')
        parser = argparse.ArgumentParser()
        parser.add_argument("-v",
                            "--verbose",
                            default=(os.environ.get(
                                'VERBOSE', 'False').lower() == 'true'),
                            action="store_true")
        default_monitor_plugins.get_parse_args(parser)
        default_args, unknown = parser.parse_known_args()
        enable_verbose(default_args.verbose)
        pool_collection = PoolCollection(default_args.verbose, Networks())
        node_info = FetchStatus(default_args.verbose, pool_collection)

    # Create namespace with default args and load them into api_args
    api_args = argparse.Namespace()
    for name, value in default_args._get_kwargs():
        setattr(api_args, name, value)

    # Set api_args with the values from the parameters
    setattr(api_args, 'status', status)
    setattr(api_args, 'alerts', alerts)

    # Create and load plugins with api_args
    monitor_plugins = PluginCollection('plugins')
    monitor_plugins.load_all_parse_args(api_args)

    return monitor_plugins
Exemple #8
0
        def unroll(iter, state, hidden_states):#, softmax_weights):
            """
            """
            hidden = tf.cond(self.update_bool,
                                  lambda: tf.gather_nd(self.feature_vector,
                                                       self.indices+iter),
                                  lambda: self.feature_vector)
            inf_prev_action = tf.cond(self.update_bool,
                                      lambda: tf.gather_nd(self.inf_prev_action,
                                                           self.indices+iter),
                                      lambda: self.inf_prev_action)
            inf_hidden = state.h
            if self.parameters['attention']:
                inf_hidden = attention(hidden, inf_hidden)
            elif self.parameters['automatic_dpatch']:
                inf_hidden = automatic_dpatch(hidden)
            else:
                inf_hidden = manual_dpatch(hidden)


            inf_prev_action_onehot = c_layers.one_hot_encoding(inf_prev_action,
                                                               self.act_size)
            # inf_hidden = tf.concat([inf_hidden, inf_prev_action_onehot], axis=1)
            inf_hidden, state = net.rnn(inf_hidden, state,
                                        self.parameters['inf_num_rec_units'],
                                        self.inf_seq_len, 'inf_rnn')
            hidden_states = hidden_states.write(iter, inf_hidden)
            iter += 1

            return [iter, state, hidden_states]#, softmax_weights]
Exemple #9
0
    def __init__(self):
        self.estimator = Networks(NX,
                                  NU,
                                  x_range=np.array([X_MIN, X_MAX]),
                                  u_range=np.array([U_MIN, U_MAX]))
        self.estimator.load()
        # Last state trajectory calculated
        self.X = np.array([])
        self.U = np.array([])
        self.time = 0
        self.current_state = np.zeros(NX)
        self.end = np.zeros(NX)
        # time from the start of the current trajectory in ns
        self.t = 0  # useless as attribute?
        self.t_idx = 0

        self.client = actionlib.SimpleActionClient(OPT_CONTROL_ACTION_SERVER,
                                                   OptControlAction)
        self.client.wait_for_server()
        self.pub = rospy.Publisher('controller', Control, queue_size=10)
    def __init__(self):
        """
        Load the irepa built estimator then create clients for simulation
        and optimal control nodes
        """
        self.estimator = Networks(NX,
                                  NU,
                                  x_range=np.array([X_MIN, X_MAX]),
                                  u_range=np.array([U_MIN, U_MAX]))
        self.estimator.load()
        # Last state trajectory calculated
        self.X = np.array([])
        self.U = np.array([])
        self.u = np.zeros(NU)
        self.time = 0
        self.current_state = np.zeros(NX)
        self.end_state = np.zeros(NX)
        # time from the start of the current trajectory in ns
        self.t_idx = 0
        # update trajectory every update_times iteration
        self.update_times = int(CPS / TUPS)

        self.ocp_client = actionlib.SimpleActionClient(OPT_CONTROL_SERVER,
                                                       OptControlAction)
        self.ocp_client.wait_for_server()
        self.pub = rospy.Publisher(COMMAND_TOPIC, Command, queue_size=10)
        rospy.Subscriber(CURRENT_STATE_TOPIC, State, self.update_current_state)
        rospy.Subscriber(END_STATE_TOPIC, State, self.update_end_state)

        # test state rate
        self.t1 = rospy.get_rostime()
        self.t2 = rospy.get_rostime()

        # control steps
        self.stop_update = False
        self.stop_controls = False
        self.started = False
Exemple #11
0
    def __init__(self, system_settings, websocket, snmp_websocket, **kwargs):
        super(SleepyMeshBase, self).__init__(**kwargs)

        if 'last_syncs' not in self._defaults:
            self._defaults.update({'last_syncs': list()})

        # Internal Members #
        self._mesh_awake = True
        self._sync_type = 'timeout'

        self._save_in_progress = False

        self._sync_average = None
        self._delay_average = None

        # Instances #
        # TODO: Eliminate as many dependencies as possible
        self.system_settings = system_settings
        self.websocket = websocket
        self.snmp_websocket = snmp_websocket

        self.modbus_server = ModbusServer()
        self.snmp_server = SNMPTrapServer(self)
        self.update_interfaces = UpdateInterfaces(self)
        self.update_in_progress = self.update_interfaces.update_in_progress

        self.bridge = Bridge(self.system_settings)
        self.uploader = Uploader(self)

        self.nodes = Nodes(self.system_settings)
        self.platforms = Platforms(self.nodes)
        self.networks = Networks(self)

        self.error = BaseError(self.system_settings)

        if self.system_settings.modbus_enable:
            system_settings_dict = self.system_settings.attr_dict()
            # LOGGER.debug('Modbus Attribute Dictionary: ' + str(system_settings_dict))
            self.modbus_server.start(system_settings_dict)

        if self.system_settings.snmp_enable:
            self.snmp_server.start()

            # Overload Node Error Methods (SNMP Error Methods)#
            NodeError.send_snmp = self.snmp_server.send_snmp
            NodeError.clear_snmp = self.snmp_server.clear_snmp
Exemple #12
0
class Core:
    def __init__(self):
        user_dir = "%s%s.kismon%s" % (os.path.expanduser("~"), os.sep, os.sep)
        if not os.path.isdir(user_dir):
            print("Creating Kismon user directory %s" % user_dir)
            os.mkdir(user_dir)
        config_file = "%skismon.conf" % user_dir
        self.config_handler = Config(config_file)
        self.config_handler.read()
        self.config = self.config_handler.config

        self.sources = {}
        self.crypt_cache = {}
        self.networks = Networks(self.config)
        self.client_threads = {}
        self.init_client_threads()
        self.tracks = Tracks("%stracks.json" % user_dir)
        self.tracks.load()

        if "--disable-map" in sys.argv:
            self.map_error = "--disable-map used"
        else:
            self.map_error = check_osmgpsmap()

        if self.map_error is not None:
            self.map_error = "%s\nMap disabled" % self.map_error
            print(self.map_error, "\n")

        self.init_map()

        self.main_window = MainWindow(self.config, self.client_start,
                                      self.client_stop, self.map,
                                      self.networks, self.sources, self.tracks,
                                      self.client_threads)
        self.main_window.log_list.add("Kismon", "started")
        if self.map_error is not None:
            self.main_window.log_list.add("Kismon", self.map_error)

        self.networks_file = "%snetworks.json" % user_dir
        if os.path.isfile(self.networks_file):
            try:
                self.networks.load(self.networks_file)
            except:
                error = sys.exc_info()[1]
                print(error)
                dialog_message = "Could not read the networks file '%s':\n%s\n\nDo you want to continue?" % (
                    self.networks_file, error)
                dialog = Gtk.MessageDialog(self.main_window.gtkwin,
                                           Gtk.DialogFlags.DESTROY_WITH_PARENT,
                                           Gtk.MessageType.ERROR,
                                           Gtk.ButtonsType.YES_NO,
                                           dialog_message)

                def dialog_response(dialog, response_id):
                    self.dialog_response = response_id

                dialog.connect("response", dialog_response)
                dialog.run()
                dialog.destroy()
                if self.dialog_response == -9:
                    print("exit")
                    self.clients_stop()
                    self.main_window.gtkwin = None
                    return
        self.networks.set_autosave(self.config["networks"]["autosave"],
                                   self.networks_file,
                                   self.main_window.log_list.add)

        if self.map is not None:
            self.networks.notify_add_list["map"] = self.add_network_to_map
            self.networks.notify_remove_list["map"] = self.map.remove_marker
            GLib.timeout_add(100, self.map.set_last_from_config)

        self.main_window.network_list.crypt_cache = self.crypt_cache

        GLib.timeout_add(500, self.queues_handler)
        GLib.timeout_add(300, self.queues_handler_networks)
        GLib.idle_add(self.networks.apply_filters)

    def init_map(self):
        if self.map_error is not None:
            self.map = None
        else:
            try:
                from .map import Map
            except SystemError:
                from map import Map
            user_agent = 'kismon/%s' % utils.get_version()
            self.map = Map(self.config["map"], user_agent=user_agent)
            self.map.set_last_from_config()

    def init_client_thread(self, server_id):
        server = self.config["kismet"]["servers"][server_id]
        self.client_threads[server_id] = ClientThread(server)
        self.client_threads[server_id].client.set_capabilities(
            ('status', 'source', 'info', 'gps', 'bssid', 'bssidsrc', 'ssid'))
        if "--create-kismet-dump" in sys.argv:
            self.client_threads[server_id].client.enable_dump()

    def init_client_threads(self):
        server_id = 0
        for server in self.config["kismet"]["servers"]:
            self.init_client_thread(server_id)
            server_id += 1

    def client_start(self, server_id):
        if server_id in self.client_threads and self.client_threads[
                server_id].is_running:
            self.client_stop(server_id)
        self.sources[server_id] = {}
        self.init_client_thread(server_id)
        if "--load-kismet-dump" in sys.argv:
            self.client_threads[server_id].client.load_dump(sys.argv[2])
        self.client_threads[server_id].start()

    def client_stop(self, server_id):
        if self.client_threads[server_id].client.connecting:
            # kill connecting sockets, don't wait for the timeout
            try:
                self.client_threads[server_id].client.s.shutdown(
                    socket.SHUT_RDWR)
            except OSError:
                pass
        self.client_threads[server_id].stop()

    def clients_stop(self):
        for server_id in self.client_threads:
            self.client_stop(server_id)
        return True

    def queue_handler(self, server_id):
        server_name = self.config['kismet']['servers'][server_id]
        if self.main_window.gtkwin is None:
            return False

        thread = self.client_threads[server_id]
        if len(thread.client.error) > 0:
            for error in thread.client.error:
                self.main_window.log_list.add(server_name, error)
            thread.client.error = []
            self.main_window.server_tabs[server_id].server_switch.set_active(
                False)
            page_num = self.main_window.notebook.page_num(
                self.main_window.log_list.widget)
            self.main_window.notebook.set_current_page(page_num)

        #gps
        gps = None
        fix = None
        gps_queue = thread.get_queue("gps")
        while True:
            try:
                data = gps_queue.pop()
                if gps is None:
                    gps = data
                if data["fix"] > 1:
                    fix = (data["lat"], data["lon"])
                    if self.config['tracks']['store'] == True:
                        self.tracks.add_point_to_track(server_name,
                                                       data['lat'],
                                                       data['lon'],
                                                       data['alt'])
                    break
            except IndexError:
                break
        if gps is not None:
            self.main_window.server_tabs[server_id].update_gps_table(gps)
            if fix is not None and self.map is not None:
                server = "server%s" % (server_id + 1)
                if server_id == 0:
                    self.map.set_position(fix[0], fix[1])
                else:
                    self.map.add_marker(server, server, fix[0], fix[1])
                self.map.add_track(fix[0], fix[1], server_id)

        #status
        for data in thread.get_queue("status"):
            self.main_window.log_list.add(server_name, data["text"])

        #info
        info_queue = thread.get_queue("info")
        try:
            data = info_queue.pop()
            self.main_window.server_tabs[server_id].update_info_table(data)
        except IndexError:
            pass

        #source
        update = False
        for data in thread.get_queue("source"):
            uuid = data["uuid"]
            if uuid == "00000000-0000-0000-0000-000000000000":
                continue
            self.sources[server_id][uuid] = data

            update = True
        if update is True:
            self.main_window.server_tabs[server_id].update_sources_table(
                self.sources[server_id])

    def queues_handler(self):
        for server_id in self.client_threads:
            self.queue_handler(server_id)
        return True

    def queue_handler_networks(self, server_id):
        thread = self.client_threads[server_id]

        #ssid
        for data in thread.get_queue("ssid"):
            self.networks.add_ssid_data(data)

        #bssid
        bssids = {}
        for data in thread.get_queue("bssid"):
            mac = data["bssid"]
            self.networks.add_bssid_data(data, server_id)
            if mac in self.main_window.signal_graphs and "signal_dbm" not in thread.client.capabilities[
                    "bssidsrc"]:
                self.main_window.signal_graphs[mac].add_value(
                    None, None, data["signal_dbm"], server_id)

            bssids[mac] = True

        #bssidsrc
        for data in thread.get_queue("bssidsrc"):
            if "signal_dbm" not in data or data["uuid"] not in self.sources[
                    server_id]:
                continue

            mac = data["bssid"]
            if mac in self.main_window.signal_graphs:
                self.main_window.signal_graphs[mac].add_value(
                    self.sources[server_id][data["uuid"]], data,
                    data["signal_dbm"], server_id)

        if len(self.networks.notify_add_queue) > 0:
            self.networks.start_queue()
            if len(self.networks.notify_add_queue) > 500:
                self.networks.disable_refresh()
                self.main_window.networks_queue_progress()

        self.main_window.update_statusbar()

    def queues_handler_networks(self):
        for server_id in self.client_threads:
            self.queue_handler_networks(server_id)
        return True

    def quit(self):
        self.clients_stop()

        if self.map is not None:
            lat = self.map.osm.get_property("latitude")
            lon = self.map.osm.get_property("longitude")
            self.config["map"]["last_position"] = "%.6f/%.6f" % (lat, lon)

        while None in self.config['kismet']['servers']:
            self.config['kismet']['servers'].remove(None)
        self.config_handler.write()
        self.networks.save(self.networks_file, force=True)
        if self.config['tracks']['store'] == True:
            self.tracks.save()

    def add_network_to_map(self, mac):
        network = self.networks.get_network(mac)

        try:
            crypt = self.crypt_cache[network["cryptset"]]
        except KeyError:
            crypt = decode_cryptset(network["cryptset"], True)
            self.crypt_cache[network["cryptset"]] = crypt

        if "AES_CCM" in crypt or "AES_OCB" in crypt:
            color = "red"
        elif "WPA" in crypt:
            color = "orange"
        elif "WEP" in crypt:
            color = "yellow"
        else:
            color = "green"

        self.map.add_marker(mac, color, network["lat"], network["lon"])
Exemple #13
0
class Controller:
    def __init__(self):
        self.estimator = Networks(NX,
                                  NU,
                                  x_range=np.array([X_MIN, X_MAX]),
                                  u_range=np.array([U_MIN, U_MAX]))
        self.estimator.load()
        # Last state trajectory calculated
        self.X = np.array([])
        self.U = np.array([])
        self.time = 0
        self.current_state = np.zeros(NX)
        self.end = np.zeros(NX)
        # time from the start of the current trajectory in ns
        self.t = 0  # useless as attribute?
        self.t_idx = 0

        self.client = actionlib.SimpleActionClient(OPT_CONTROL_ACTION_SERVER,
                                                   OptControlAction)
        self.client.wait_for_server()
        self.pub = rospy.Publisher('controller', Control, queue_size=10)

    def next_control(self):
        """Choose next control depending on the current_state and self.U
        Callback to service controller"""
        self.t += 1 / CPS  # Maybe useless after init
        self.t_idx += 1
        u = self.U[self.t_idx, :]
        print('Control:', u)
        # self.pub(u)
        return u

    def update_trajectory(self, state, resp):
        """Callback to topic simulation"""
        print('UPDATE TRAJECTORYYYYY')
        print(state)
        print(resp)

        if resp.success:
            X = np.array(resp.states).reshape(len(resp.states) // NX, NX)
            U = np.array(resp.controls).reshape(len(resp.controls) // NU, NU)
            self.time = resp.time
            # Resample the trajectories
            nb_control = int(resp.time * CPS) + 1
            self.X = resample(X, nb_control)  # maybe not necessary
            self.U = resample(U, nb_control)
            tend = rospy.get_rostime()
            self.t = tend.secs - self.tstart.secs
            print(self.t)
            self.t_idx = int(self.t * CPS)

        else:
            # TODO
            print()
            print('FAILURE OF CONTROL!!!!')
            print()

    def call_update_trajectory_action(self):
        """
        Call ACADO warm started by the estimator.
        Update the current trajectory (X, U, time).
        Maybe start a timer at the beginning?
        """
        self.tstart = rospy.get_rostime()
        Xe, Ue, Ve = self.estimator.trajectories(self.current_state, self.end)

        Xe = Xe.flatten()
        Ue = Ue.flatten()
        # print(Xe)
        # print(Ue)
        # print(Ve)

        goal = OptControlGoal(list(self.current_state), list(self.end), Xe, Ue,
                              Ve, NX, NU)
        # Fill in the goal here
        self.client.send_goal(goal, self.update_trajectory)
        # self.client.wait_for_result(rospy.Duration.from_sec(5.0))  # Nope!

    def new_end(self, end_state):
        """More after that?"""
        self.end = end_state
        self.call_update_trajectory_action()
Exemple #14
0
def main():

    # Get opts
    parser = argparse.ArgumentParser(
        description='Automated lab setup in Horizon')
    parser.add_argument("--headless",
                        help="Must have pyvirtualdisplay module",
                        required=False,
                        action='store_true')
    args = parser.parse_args()
    try:

        # Prep ---------------------------------------------------------------------------------------------------------
        Images.get_guest_image()
        print "Test: PASS - Get cgcs-guest.img"
        # End of Prep --------------------------------------------------------------------------------------------------

        # LAB_SETUP GUI VERSION ----------------------------------------------------------------------------------------
        project_name_one = "admin"
        project_name_two = "tenant1"
        project_name_three = "tenant2"
        tenant_list = [project_name_two, project_name_three]

        # Start web driver with firefox and set URL address to 10.10.10.2
        DriverUtils.open_driver(settings.DEFAULT_BROWSER, args.headless)
        DriverUtils.set_url(settings.DEFAULT_URL)

        # Wait for elements on page to load
        DriverUtils.wait_for_elements(settings.DEFAULT_ELEMENT_LOAD_TIME)

        # Call login module
        Login.login(project_name_one, project_name_one)

        # Wait for elements on page to load
        DriverUtils.wait_for_elements(settings.DEFAULT_ELEMENT_LOAD_TIME)
        print "Test: PASS - Begin Lab Setup"

        # Create Tenants -----------------------------------------------------------------------------------------------
        tenant_two = [
            project_name_two, project_name_two, "*****@*****.**",
            project_name_two
        ]
        tenant_three = [
            project_name_three, project_name_three, "*****@*****.**",
            project_name_three
        ]

        Tenants.tenants(tenant_two[0], tenant_two[1], tenant_two[2],
                        tenant_two[3])
        Tenants.tenants(tenant_three[0], tenant_three[1], tenant_three[2],
                        tenant_three[3])
        print "Test: PASS - Create tenants"
        # End of Create Tenants ----------------------------------------------------------------------------------------

        # Modify Quotas ------------------------------------------------------------------------------------------------
        # params (user_name, metadata Items, VCPUs, Instances, Injected Files, Injected File Content (Bytes), Volumes,
        # Volume Snapshots ,Total Size of Volumes and Snapshots (GB), RAM (MB), Security Groups, Security Groups Rules,
        # Floating IPs, Networks, Ports, Routers, Subnets)
        quota_dict_tenant_one = {
            'id_metadata_items': None,
            'id_cores': [0, 'text'],
            'id_instances': [0, 'text'],
            'id_injected_files': None,
            'id_injected_file_content_bytes': None,
            'id_volumes': [2, 'text'],
            'id_snapshots': [2, 'text'],
            'id_gigabytes': None,
            'id_ram': None,
            'id_security_group': None,
            'id_security_group_rule': None,
            'id_floatingip': [0, 'text'],
            'id_network': [4, 'text'],
            'id_port': [10, 'text'],
            'id_router': None,
            'id_subnet': [3, 'text']
        }
        quota_dict_tenant_two = {
            'id_metadata_items': None,
            'id_cores': [2, 'text'],
            'id_instances': [1, 'text'],
            'id_injected_files': None,
            'id_injected_file_content_bytes': None,
            'id_volumes': [2, 'text'],
            'id_snapshots': [2, 'text'],
            'id_gigabytes': None,
            'id_ram': None,
            'id_security_group': None,
            'id_security_group_rule': None,
            'id_floatingip': [1, 'text'],
            'id_network': [3, 'text'],
            'id_port': [39, 'text'],
            'id_router': None,
            'id_subnet': [13, 'text']
        }
        quota_dict_tenant_three = {
            'id_metadata_items': None,
            'id_cores': [2, 'text'],
            'id_instances': [1, 'text'],
            'id_injected_files': None,
            'id_injected_file_content_bytes': None,
            'id_volumes': [2, 'text'],
            'id_snapshots': [2, 'text'],
            'id_gigabytes': None,
            'id_ram': None,
            'id_security_group': None,
            'id_security_group_rule': None,
            'id_floatingip': [1, 'text'],
            'id_network': [3, 'text'],
            'id_port': [39, 'text'],
            'id_router': None,
            'id_subnet': [13, 'text']
        }
        ModifyQuotas.quotas(project_name_one, quota_dict_tenant_one)
        ModifyQuotas.quotas(project_name_two, quota_dict_tenant_two)
        ModifyQuotas.quotas(project_name_three, quota_dict_tenant_three)
        print "Test: PASS - Modify quotas"
        # End of Modify Quotas -----------------------------------------------------------------------------------------

        # Create Flavors -----------------------------------------------------------------------------------------------
        # Params (flavor_name, vcpus, ram, root_disk, ephemeral_disk, swap_disk)
        flavor_one = [
            "small", 1, 512, 1, 0, 0, "CPU Policy", "Dedicated",
            "Memory Page Size", "2048"
        ]
        flavor_two = [
            "medium.dpdk", 2, 1024, 1, 0, 0, "CPU Policy", "Dedicated",
            "Memory Page Size", "2048", "VCPU Model",
            "Intel Core i7 9xx (Nehalem Class Core i7)"
        ]
        flavor_three = [
            "small.float", 1, 512, 1, 0, 0, "CPU Policy", "Dedicated",
            "Memory Page Size", "2048"
        ]

        flavor_full_link = Flavors.flavors(flavor_one[0], flavor_one[1],
                                           flavor_one[2], flavor_one[3],
                                           flavor_one[4], flavor_one[5])
        Flavors.create_extra_spec(flavor_full_link, flavor_one[6],
                                  flavor_one[7])
        Flavors.create_extra_spec(flavor_full_link, flavor_one[8],
                                  flavor_one[9])
        flavor_full_link = Flavors.flavors(flavor_two[0], flavor_two[1],
                                           flavor_two[2], flavor_two[3],
                                           flavor_two[4], flavor_two[5])
        Flavors.create_extra_spec(flavor_full_link, flavor_two[6],
                                  flavor_two[7])
        Flavors.create_extra_spec(flavor_full_link, flavor_two[8],
                                  flavor_two[9])
        Flavors.create_extra_spec(flavor_full_link, flavor_two[10],
                                  flavor_two[11])
        flavor_full_link = Flavors.flavors(flavor_three[0], flavor_three[1],
                                           flavor_three[2], flavor_three[3],
                                           flavor_three[4], flavor_three[5])
        Flavors.create_extra_spec(flavor_full_link, flavor_three[6],
                                  flavor_three[7])
        Flavors.create_extra_spec(flavor_full_link, flavor_three[8],
                                  flavor_three[9])
        print "Test: PASS - Create flavors"
        # End of Create Flavors ----------------------------------------------------------------------------------------

        # Create Key Pairs ---------------------------------------------------------------------------------------------
        Logout.logout()
        Login.login(project_name_two, project_name_two)
        KeyPairs.key_pairs("keypair-tenant1")
        Logout.logout()
        Login.login(project_name_three, project_name_three)
        KeyPairs.key_pairs("keypair-tenant2")
        Logout.logout()
        Login.login(project_name_one, project_name_one)
        print "Test: PASS - Create key pairs"
        # End of Create Key Pairs --------------------------------------------------------------------------------------
        # Provider Networks --------------------------------------------------------------------------------------------
        provider_network_one = [
            "group0-ext0", "vlan", 1500, True, "group0-ext0-r0-0", True, None,
            10, 10
        ]
        provider_network_two = [
            "group0-data0", "vlan", 1500, True, "group0-data0-r1-0", True,
            project_name_two, 600, 615
        ]
        provider_network_three = [
            "group0-data0b", "vlan", 1500, True, "group0-data0b-r2-0", True,
            None, 700, 731
        ]
        provider_network_four = [
            "group0-data1", "vlan", 1500, True, "group0-data1-r3-0", True,
            project_name_three, 616, 631
        ]

        provider_net_link = Networks.networks(provider_network_one[0],
                                              provider_network_one[1],
                                              provider_network_one[2],
                                              provider_network_one[3])
        print provider_net_link
        # Create range for provider net above
        Networks.provider_net_range_create(provider_net_link,
                                           provider_network_one[4],
                                           provider_network_one[5],
                                           provider_network_one[6],
                                           provider_network_one[7],
                                           provider_network_one[8])
        provider_net_link = Networks.networks(provider_network_two[0],
                                              provider_network_two[1],
                                              provider_network_two[2],
                                              provider_network_two[3])
        # Create range for provider net above
        Networks.provider_net_range_create(provider_net_link,
                                           provider_network_two[4],
                                           provider_network_two[5],
                                           provider_network_two[6],
                                           provider_network_two[7],
                                           provider_network_two[8])
        provider_net_link = Networks.networks(provider_network_three[0],
                                              provider_network_three[1],
                                              provider_network_three[2],
                                              provider_network_three[3])
        # Create range for provider net above
        Networks.provider_net_range_create(provider_net_link,
                                           provider_network_three[4],
                                           provider_network_three[5],
                                           provider_network_three[6],
                                           provider_network_three[7],
                                           provider_network_three[8])
        provider_net_link = Networks.networks(provider_network_four[0],
                                              provider_network_four[1],
                                              provider_network_four[2],
                                              provider_network_four[3])
        # Create range for provider net above
        Networks.provider_net_range_create(provider_net_link,
                                           provider_network_four[4],
                                           provider_network_four[5],
                                           provider_network_four[6],
                                           provider_network_four[7],
                                           provider_network_four[8])
        print "Test: PASS - Create provider networks"
        # END Of Provider Networks--------------------------------------------------------------------------------------

        # SCRIPT EXPECTS COMPUTES TO BE ONLINE NOW
        # Check computes have 'Availability State' as 'Online'
        host_list = Hosts.hosts()

        # Create Interfaces & Modify Hosts -----------------------------------------------------------------------------
        provider_net_list1 = ["group0-ext0", "group0-data0", "group0-data0b"]
        provider_net_list2 = ["group0-data1"]

        for hostname in host_list:
            Hosts.modify_interface(hostname[0], "data0", "data", "eth2", 1500,
                                   provider_net_list1)
            Hosts.modify_interface(hostname[0], "data1", "data", "eth3", 1500,
                                   provider_net_list2)
            # Local storage changes:
            Hosts.add_local_storage(hostname[0], 2048)

        # Controller-1 Changes:
        # Modify OAM interfaces for controller-1
        Hosts.modify_interface("controller-1", "oam0", "oam", "eth0", 1500,
                               None)
        # Add cinder device configuration for controller-1
        Hosts.create_cinder_device("controller-1")

        for host in host_list:
            # Host Profiles
            if_profile_name = "ifprofile-" + host[0]
            cpu_profile_name = "cpuprofile-" + host[0]
            mem_profile_name = "memprofile-" + host[0]
            Hosts.create_interface_profile(host[0], if_profile_name)
            Hosts.create_cpu_profile(host[0], cpu_profile_name)
            Hosts.create_mem_profile(host[0], mem_profile_name)
            # Not sure how to apply profiles? Look like lab_setup.sh profiles when I create them
        print "Test: PASS - Create & modify host interfaces"
        # Create Interfaces & Modify Hosts -----------------------------------------------------------------------------

        # Create Images ------------------------------------------------------------------------------------------------
        # FIX TODO: Jira: https://jira.wrs.com:8443/browse/CGTS-3158
        # TODO: If Jira above is fixed, please uncomment the two lines of code (Images.images) below.
        # TODO: You no longer need to manually createthe guest image.
        # Images.images("cgcs-guest", "http://yow-cgcs-test.wrs.com/files/cgcs_guestimg/cgcs-guest.img", "Raw", False,
        #              None, None, True, False)
        print "Test: PASS - Create image"
        # End of Create Images -----------------------------------------------------------------------------------------

        # Create Volumes -----------------------------------------------------------------------------------------------
        Logout.logout()
        Login.login(project_name_two, project_name_two)
        Volumes.volumes("vol-tenant1-avp1", "Image", "cgcs-guest (608.0 MB)",
                        "nova")
        Logout.logout()
        Login.login(project_name_three, project_name_three)
        Volumes.volumes("vol-tenant2-avp1", "Image", "cgcs-guest (608.0 MB)",
                        "nova")
        Logout.logout()
        Login.login(project_name_one, project_name_one)
        print "Test: PASS - Create volumes"
        # End of Create Volumes ----------------------------------------------------------------------------------------

        # Create QoS Policies ------------------------------------------------------------------------------------------
        qos_policy_list = ["external-qos", "internal-qos"]

        Networks.create_qos_policy("external-qos", "External Network Policy",
                                   16, project_name_one)
        Networks.create_qos_policy("internal-qos", "Internal Network Policy",
                                   4, project_name_one)

        for tenant in tenant_list:
            name = tenant + "-mgmt-qos"
            desc = tenant + "Management Network Policy"
            Networks.create_qos_policy(name, desc, 8, tenant)
            qos_policy_list.append(name)
        print "Test: PASS - QoS policies"
        # End of Create QoS Policies -----------------------------------------------------------------------------------

        # Create Networks ----------------------------------------------------------------------------------------------
        network_one = [
            "external-net0", project_name_one, "vlan", "group0-ext0", 10,
            "external-qos", True, True, False
        ]
        network_two = [
            "internal0-net0", project_name_one, "vlan", "group0-data0b", 700,
            "internal-qos", True, False, False
        ]
        network_three = [
            "tenant1-mgmt-net", project_name_two, "vlan", "group0-data0", None,
            "tenant1-mgmt-qos", False, False, False
        ]
        subnet_one = [
            "external-subnet0", "192.168.1.0/24", "192.168.1.1", False, True,
            False, "192.168.1.2,192.168.1.254", None, None, None
        ]
        subnet_two = [
            "internal0-subnet0-1", "10.0.1.0/24", None, True, False, False,
            None, None, None, 1
        ]

        network_name = Networks.create_network(network_one[0], network_one[1],
                                               network_one[2], network_one[3],
                                               network_one[4], network_one[5],
                                               network_one[6], network_one[7],
                                               network_one[8])
        # Create subnet for network above
        Networks.create_subnet(network_name, subnet_one[0], subnet_one[1],
                               subnet_one[2], subnet_one[3], subnet_one[4],
                               subnet_one[5], subnet_one[6], subnet_one[7],
                               subnet_one[8], subnet_one[9])

        # Create Network
        network_name = Networks.create_network(network_two[0], network_two[1],
                                               network_two[2], network_two[3],
                                               network_two[4], network_two[5],
                                               network_two[6], network_two[7],
                                               network_two[8])
        # Create subnet for network above
        Networks.create_subnet(network_name, subnet_two[0], subnet_two[1],
                               subnet_two[2], subnet_two[3], subnet_two[4],
                               subnet_two[5], subnet_two[6], subnet_two[7],
                               subnet_two[8], subnet_two[9])

        network_name = Networks.create_network(
            network_three[0], network_three[1], network_three[2],
            network_three[3], network_three[4], network_three[5],
            network_three[6], network_three[7], network_three[8])
        print "Test: PASS - Create networks"
        # End of Create Networks ---------------------------------------------------------------------------------------

        # Create Router, router interfaces and Subnets -----------------------------------------------------------------
        # ...............................................Tenant 1.......................................................
        project_subnet_t1_zero = [
            "tenant1-mgmt-subnet0", "192.168.101.0/27", "192.168.101.1", False,
            True, True, "192.168.101.2,192.168.101.30",
            "147.11.57.133\n128.224.144.130\n147.11.57.128", None, None
        ]
        project_subnet_t1_one = [
            "tenant1-mgmt-subnet1", "192.168.101.32/27", "192.168.101.33",
            False, True, True, "192.168.101.34,192.168.101.62",
            "147.11.57.133\n128.224.144.130\n147.11.57.128", None, None
        ]
        project_subnet_t1_two = [
            "tenant1-mgmt-subnet2", "192.168.101.64/27", "192.168.101.65",
            False, True, True, "192.168.101.66,192.168.101.94",
            "147.11.57.133\n128.224.144.130\n147.11.57.128", None, None
        ]
        project_subnet_t1_three = [
            "tenant1-mgmt-subnet3", "10.101.1.0/27", "10.101.1.1", False, True,
            True, "10.101.1.2,10.101.1.30",
            "147.11.57.133\n128.224.144.130\n147.11.57.128", None, 1
        ]
        project_subnet_t1_four = [
            "tenant1-mgmt-subnet4", "10.101.1.32/27", "10.101.1.33", False,
            True, True, "10.101.1.34,10.101.1.62",
            "147.11.57.133\n128.224.144.130\n147.11.57.128", None, 1
        ]
        project_subnet_t1_five = [
            "tenant1-mgmt-subnet5", "10.101.1.64/27", "10.101.1.65", False,
            True, True, "10.101.1.66,10.101.1.94",
            "147.11.57.133\n128.224.144.130\n147.11.57.128", None, 1
        ]
        Logout.logout()
        # Tenant 1 Router and Subnets
        Login.login(project_name_two, project_name_two)
        router_one = "tenant1-router"
        router_link = Routers.routers(router_one, "external-net0")
        Networks.create_project_subnet(
            network_name, project_subnet_t1_zero[0], project_subnet_t1_zero[1],
            project_subnet_t1_zero[2], project_subnet_t1_zero[3],
            project_subnet_t1_zero[4], project_subnet_t1_zero[5],
            project_subnet_t1_zero[6], project_subnet_t1_zero[7],
            project_subnet_t1_zero[8], project_subnet_t1_zero[9])
        # Create router interface
        subnet_name = "tenant1-mgmt-net: " + "192.168.101.0/27 " + "(tenant1-mgmt-subnet0)"
        Routers.create_router_interface(router_link, subnet_name,
                                        "192.168.101.1")
        Networks.create_project_subnet(
            network_name, project_subnet_t1_one[0], project_subnet_t1_one[1],
            project_subnet_t1_one[2], project_subnet_t1_one[3],
            project_subnet_t1_one[4], project_subnet_t1_one[5],
            project_subnet_t1_one[6], project_subnet_t1_one[7],
            project_subnet_t1_one[8], project_subnet_t1_one[9])
        # Create router interface
        subnet_name = "tenant1-mgmt-net: " + "192.168.101.32/27 " + "(tenant1-mgmt-subnet1)"
        Routers.create_router_interface(router_link, subnet_name,
                                        "192.168.101.33")
        Networks.create_project_subnet(
            network_name, project_subnet_t1_two[0], project_subnet_t1_two[1],
            project_subnet_t1_two[2], project_subnet_t1_two[3],
            project_subnet_t1_two[4], project_subnet_t1_two[5],
            project_subnet_t1_two[6], project_subnet_t1_two[7],
            project_subnet_t1_two[8], project_subnet_t1_two[9])
        # Create router interface
        subnet_name = "tenant1-mgmt-net: " + "192.168.101.64/27 " + "(tenant1-mgmt-subnet2)"
        Routers.create_router_interface(router_link, subnet_name,
                                        "192.168.101.65")
        Networks.create_project_subnet(
            network_name, project_subnet_t1_three[0],
            project_subnet_t1_three[1], project_subnet_t1_three[2],
            project_subnet_t1_three[3], project_subnet_t1_three[4],
            project_subnet_t1_three[5], project_subnet_t1_three[6],
            project_subnet_t1_three[7], project_subnet_t1_three[8],
            project_subnet_t1_three[9])
        # Create router interface
        subnet_name = "tenant1-mgmt-net: " + "10.101.1.0/27 " + "(tenant1-mgmt-subnet3)"
        Routers.create_router_interface(router_link, subnet_name, "10.101.1.1")
        Networks.create_project_subnet(
            network_name, project_subnet_t1_four[0], project_subnet_t1_four[1],
            project_subnet_t1_four[2], project_subnet_t1_four[3],
            project_subnet_t1_four[4], project_subnet_t1_four[5],
            project_subnet_t1_four[6], project_subnet_t1_four[7],
            project_subnet_t1_four[8], project_subnet_t1_four[9])
        # Create router interface
        subnet_name = "tenant1-mgmt-net: " + "10.101.1.32/27 " + "(tenant1-mgmt-subnet4)"
        Routers.create_router_interface(router_link, subnet_name,
                                        "10.101.1.33")
        Networks.create_project_subnet(
            network_name, project_subnet_t1_five[0], project_subnet_t1_five[1],
            project_subnet_t1_five[2], project_subnet_t1_five[3],
            project_subnet_t1_five[4], project_subnet_t1_five[5],
            project_subnet_t1_five[6], project_subnet_t1_five[7],
            project_subnet_t1_five[8], project_subnet_t1_five[9])
        # Create router interface
        subnet_name = "tenant1-mgmt-net: " + "10.101.1.64/27 " + "(tenant1-mgmt-subnet5)"
        Routers.create_router_interface(router_link, subnet_name,
                                        "10.101.1.65")
        Logout.logout()
        Login.login(project_name_one, project_name_one)
        Routers.router_distributed(router_link, False)
        print "Test: PASS - Create router & subnets for tenant 1"
        # ............................................End of Tenant 1...................................................

        # ...............................................Tenant 2.......................................................

        project_subnet_t2_zero = [
            "tenant2-mgmt-subnet0", "192.168.201.0/27", "192.168.201.1", False,
            True, True, "192.168.201.2,192.168.201.30",
            "147.11.57.133\n128.224.144.130\n147.11.57.128", None, None
        ]
        project_subnet_t2_one = [
            "tenant2-mgmt-subnet1", "192.168.201.32/27", "192.168.201.33",
            False, True, True, "192.168.201.34,192.168.201.62",
            "147.11.57.133\n128.224.144.130\n147.11.57.128", None, None
        ]
        project_subnet_t2_two = [
            "tenant2-mgmt-subnet2", "192.168.201.64/27", "192.168.201.65",
            False, True, True, "192.168.201.66,192.168.201.94",
            "147.11.57.133\n128.224.144.130\n147.11.57.128", None, None
        ]
        project_subnet_t2_three = [
            "tenant2-mgmt-subnet3", "10.201.1.0/27", "10.201.1.1", False, True,
            True, "10.201.1.2,10.201.1.30",
            "147.11.57.133\n128.224.144.130\n147.11.57.128", None, 1
        ]
        project_subnet_t2_four = [
            "tenant2-mgmt-subnet4", "10.201.1.32/27", "10.201.1.33", False,
            True, True, "10.201.1.34,10.201.1.62",
            "147.11.57.133\n128.224.144.130\n147.11.57.128", None, 1
        ]
        project_subnet_t2_five = [
            "tenant2-mgmt-subnet5", "10.201.1.64/27", "10.201.1.65", False,
            True, True, "10.201.1.66,10.201.1.94",
            "147.11.57.133\n128.224.144.130\n147.11.57.128", None, 1
        ]

        # Tenant 2 Router and Subnets
        # Create Network
        network_name = Networks.create_network("tenant2-mgmt-net",
                                               project_name_three, "vlan",
                                               "group0-data1", None,
                                               "tenant2-mgmt-qos", False,
                                               False, False)
        Logout.logout()
        Login.login(project_name_three, project_name_three)
        router_two = "tenant2-router"
        router_link = Routers.routers(router_two, "external-net0")
        Networks.create_project_subnet(
            network_name, project_subnet_t2_zero[0], project_subnet_t2_zero[1],
            project_subnet_t2_zero[2], project_subnet_t2_zero[3],
            project_subnet_t2_zero[4], project_subnet_t2_zero[5],
            project_subnet_t2_zero[6], project_subnet_t2_zero[7],
            project_subnet_t2_zero[8], project_subnet_t2_zero[9])
        # Create router interface
        subnet_name = "tenant2-mgmt-net: " + "192.168.201.0/27 " + "(tenant2-mgmt-subnet0)"
        Routers.create_router_interface(router_link, subnet_name,
                                        "192.168.201.1")
        Networks.create_project_subnet(
            network_name, project_subnet_t2_one[0], project_subnet_t2_one[1],
            project_subnet_t2_one[2], project_subnet_t2_one[3],
            project_subnet_t2_one[4], project_subnet_t2_one[5],
            project_subnet_t2_one[6], project_subnet_t2_one[7],
            project_subnet_t2_one[8], project_subnet_t2_one[9])
        # Create router interface
        subnet_name = "tenant2-mgmt-net: " + "192.168.201.32/27 " + "(tenant2-mgmt-subnet1)"
        Routers.create_router_interface(router_link, subnet_name,
                                        "192.168.201.33")
        Networks.create_project_subnet(
            network_name, project_subnet_t2_two[0], project_subnet_t2_two[1],
            project_subnet_t2_two[2], project_subnet_t2_two[3],
            project_subnet_t2_two[4], project_subnet_t2_two[5],
            project_subnet_t2_two[6], project_subnet_t2_two[7],
            project_subnet_t2_two[8], project_subnet_t2_two[9])
        # Create router interface
        subnet_name = "tenant2-mgmt-net: " + "192.168.201.64/27 " + "(tenant2-mgmt-subnet2)"
        Routers.create_router_interface(router_link, subnet_name,
                                        "192.168.201.65")
        Networks.create_project_subnet(
            network_name, project_subnet_t2_three[0],
            project_subnet_t2_three[1], project_subnet_t2_three[2],
            project_subnet_t2_three[3], project_subnet_t2_three[4],
            project_subnet_t2_three[5], project_subnet_t2_three[6],
            project_subnet_t2_three[7], project_subnet_t2_three[8],
            project_subnet_t2_three[9])
        # Create router interface
        subnet_name = "tenant2-mgmt-net: " + "10.201.1.0/27 " + "(tenant2-mgmt-subnet3)"
        Routers.create_router_interface(router_link, subnet_name, "10.201.1.1")
        Networks.create_project_subnet(
            network_name, project_subnet_t2_four[0], project_subnet_t2_four[1],
            project_subnet_t2_four[2], project_subnet_t2_four[3],
            project_subnet_t2_four[4], project_subnet_t2_four[5],
            project_subnet_t2_four[6], project_subnet_t2_four[7],
            project_subnet_t2_four[8], project_subnet_t2_four[9])
        # Create router interface
        subnet_name = "tenant2-mgmt-net: " + "10.201.1.32/27 " + "(tenant2-mgmt-subnet4)"
        Routers.create_router_interface(router_link, subnet_name,
                                        "10.201.1.33")
        Networks.create_project_subnet(
            network_name, project_subnet_t2_five[0], project_subnet_t2_five[1],
            project_subnet_t2_five[2], project_subnet_t2_five[3],
            project_subnet_t2_five[4], project_subnet_t2_five[5],
            project_subnet_t2_five[6], project_subnet_t2_five[7],
            project_subnet_t2_five[8], project_subnet_t2_five[9])
        # Create router interface
        subnet_name = "tenant2-mgmt-net: " + "10.201.1.64/27 " + "(tenant2-mgmt-subnet5)"
        Routers.create_router_interface(router_link, subnet_name,
                                        "10.201.1.65")
        Logout.logout()
        Login.login(project_name_one, project_name_one)
        Routers.router_distributed(router_link, True)
        network_name = Networks.create_network("tenant1-net0",
                                               project_name_two, "vlan",
                                               "group0-data0", None, None,
                                               False, False, False)
        # Create Subnet
        Networks.create_subnet(network_name, "tenant1-subnet0",
                               "172.16.0.0/24", None, True, False, False, None,
                               None, None, None)
        network_name = Networks.create_network("tenant2-net0",
                                               project_name_three, "vlan",
                                               "group0-data1", None, None,
                                               False, False, False)
        # Create Subnet
        Networks.create_subnet(network_name, "tenant2-subnet0",
                               "172.18.0.0/24", None, True, False, False, None,
                               None, None, None)
        print "Test: PASS - Create router & subnets for tenant 2"
        # ............................................End of Tenant 2...................................................

        print "Test: UI Tests finished"
        DriverUtils.driver.quit()

    except:
        print "Test: FAIL - Exception Thrown"
        traceback.print_exc()
        print "Common exceptions are: Timeout,\nElementNotVisibleException,\nElementNotSelectableException,\n" \
              "NoSuchElementException"
Exemple #15
0
class Irepa:
    """Irepa algorithm as described in Nicolas Mansard's paper."""
    def __init__(self):
        """
        Create ROS clients and a the estimator model
        """
        self.ocp_client = actionlib.SimpleActionClient(
            OPT_CONTROL_ACTION_SERVER, OptControlAction)
        self.ocp_client.wait_for_server()
        rospy.loginfo('End of wait for ocp action server')
        rospy.wait_for_service('create_samples')
        self.sampling_client = rospy.ServiceProxy('create_samples', Samples)

        self.estimator = Networks(NX,
                                  NU,
                                  x_range=np.array([X_MIN, X_MAX]),
                                  u_range=np.array([U_MIN, U_MAX]))

    def irepa_algo(self):
        """
        Build the PRM and Neural Networks then start the IREPA loop.
        """
        tstart = time.time()

        # Initialize PRM with a sampling function,
        # a connect function and an heuristic distance
        prm = PRM(sample_fun=self.sample,
                  connect_fun=self.connect,
                  hdistance=euclid)

        # Add NN_SAMPLE random nodes to the PRM
        prm.add_nodes(NB_SAMPLE, verbose=VERBOSE)

        print('PRM initialized,', len(prm.graph.nodes), 'nodes')

        # For plotting purposes
        astar_successes = np.zeros(IREPA_ITER)
        est_successes = np.zeros(IREPA_ITER)
        nb_attempts = np.zeros(IREPA_ITER)
        edge_numbers_arr = np.zeros(IREPA_ITER)
        total_edges_cost_before = np.zeros(IREPA_ITER)
        total_edges_cost_after = np.zeros(IREPA_ITER)

        # Loop control variables
        i = 0
        stop = False
        while not stop and i < IREPA_ITER:
            print('\n' * 5)
            print('################')
            print((('--- IREPA %d ---' % i) + '---' * 10 + '\n') * 3,
                  time.ctime())

            # Expand PRM
            # -----------------
            # Pick a pair of unconnected nearest neighbors
            # if distance > visibility horizon: # equivalent to longer_traj
            #   p* <- shortest path in PRM
            #   E <- ACADO(init = p*)
            # else: # equivalent to densify knn
            #   E <- ACADO(init = 0 or estimator)
            print('\n\n\n######################')
            print('EXPAND')
            nb_astar, nb_est, nb_attempt = prm.expand(self.estimator,
                                                      first=(not bool(i)))

            astar_successes[i] = nb_astar
            est_successes[i] = nb_est
            nb_attempts[i] = nb_attempt
            print()
            print('Edge number:', len(prm.graph.edges))
            edge_numbers_arr[i] = len(prm.graph.edges)
            print('######################\n\n\n')

            stop = prm.is_fully_connected()

            # Build a dataset of subtrajectories to train the estimator
            dset = Dataset(prm.graph)

            # Train the estimator on the dataset
            self.estimator.train(dset)

            # Improve the PRM when the estimator gives better results
            print('\n\n\n######################')
            print('IMPROVE')
            # Return true if on at least one of the edges the estimator was
            # better than the PRM and ACADO initialized by this Estimations
            # also did better thant the PRM
            total_edges_cost_before[i] = prm.graph.total_cost()
            stop = prm.improve(self.estimator)
            total_edges_cost_after[i] = prm.graph.total_cost()

            i += 1

            # Test the estimation of a random trajectory (not useful)
            # self.test(dset, i)

        tend = time.time()
        print('\n##############')
        print('IREPA was executed in ', (tend - tstart) / 60, 'minutes')
        print()

        if SAVE:
            print('Saving estimator weights')
            self.estimator.save()
            print('Saved')

        if PLOT:
            self.plot_results(astar_successes, est_successes, nb_attempts,
                              edge_numbers_arr, total_edges_cost_before,
                              total_edges_cost_after)

    def connect(self, s1, s2, init=None):
        """
        Tries to connect 2 states by calling the Acado optimizer service.
        If init trajectory is passed, warm start of the optimization process
        using initialization trajectories.

        :param s1: start state
        :param s2: start state
        :param init: tuple containing 3 fields
                     - states trajectory
                     - controls trajectory
                     - value of the trajectory
        :type s1: numpy.array
        :type s1: numpy.array
        :type init: tuple of size 3 (numpy.array, numpy.array, float)
        """
        print('Try to connect', s1, s2)

        if init is not None:
            X_init, U_init, V_init = init
            print('Using initialization, value:', V_init, ', length:',
                  X_init.shape[0])
            X_init = X_init.flatten()
            U_init = U_init.flatten()
        else:
            X_init, U_init, V_init = [], [], 0

        goal = OptControlGoal(s1, s2, X_init, U_init, V_init, NX, NU)
        res = self.ocp_client.send_goal(goal)
        # Force waiting for results to avoid overloading acado solver
        self.ocp_client.wait_for_result()
        res = self.ocp_client.get_result()

        if res.success:
            print('  SUCCESS of optimization, time:', res.time, 'Path length:',
                  len(res.states) // NX)
            X = np.array(res.states).reshape(len(res.states) // NX, NX)
            U = np.array(res.controls).reshape(len(res.controls) // NU, NU)
            return res.success, X, U, res.time
        else:
            print('  FAILURE of optimization')
            return res.success, [], [], 0

    def connect_test(self, s1, s2, init=None):
        """
        Legacy dummy function. Was used instead of connect when acado node
        was not implemented. Trajectories are calculated using linear
        interpolation and value using euclidian distance.

        :param s1: start state
        :param s2: start state
        :param init: tuple containing 3 fields
                     - states trajectory
                     - controls trajectory
                     - value of the trajectory
        :type s1: numpy.array
        :type s1: numpy.array
        :type init: tuple of size 3 (numpy.array, numpy.array, float)
        """
        success = random.randint(0, 1)

        trajlength = random.randint(10, 30)

        sxarr = np.array([s1[0], s2[0]])
        syarr = np.array([s1[1], s2[1]])
        sthetaarr = np.array([s1[2], s2[2]])

        Xx = np.linspace(s1[0], s2[0], trajlength)
        Xy = np.interp(Xx, sxarr, syarr)
        Xtheta = np.interp(Xx, sxarr, sthetaarr)

        X = np.vstack([Xx, Xy, Xtheta]).T
        U = X.copy()[:, 0:2]
        V = euclid(s1, s2) + 0.02 * random.random()

        return success, X, U, V

    def sample(self, n):
        """
        Use the sampling client to generate n samples from the acceptable
        state space.

        :param n: number of samples to be returned
        :type n: int
        """
        resp = self.sampling_client(n)
        return np.array(resp.samples).reshape(n, int(len(resp.samples) / n))

    def plot_results(self, astar_successes, est_successes, nb_attempts,
                     edge_numbers_arr, total_edges_cost_before,
                     total_edges_cost_after):
        """
        Plot the results of the IREPA loop.

        :param astar_successes: number of successfully calculated trajectories
                                at each iteration using astar initialization
        :param est_successes: number of successfully calculated trajectories
                              at each iteration using estimator initialization
        :param nb_attempts: number of connection attempted at each iteration
                            (in fact, number of remaining edges to create)
        :param edge_numbers_arr: number of edges in the graph at each iteration
        :param total_edges_cost_before: sum of all edges costs before the
                                        improve step
        :param total_edges_cost_after: sum of all edges costs after the
                                       improve step
        """
        MAX_EDGE_NB = NB_SAMPLE * (NB_SAMPLE - 1)
        iters = np.arange(IREPA_ITER, dtype=np.int64)

        plt.plot(iters,
                 astar_successes,
                 color='blue',
                 label='astar',
                 marker='.',
                 markersize=15,
                 linestyle='dashed')
        plt.plot(iters,
                 est_successes,
                 color='green',
                 label='estimator',
                 marker='.',
                 markersize=15,
                 linestyle='dashed')
        plt.plot(iters,
                 nb_attempts,
                 color='orange',
                 label='attempts',
                 marker='.',
                 markersize=15,
                 linestyle='dashed')
        plt.xticks(iters)
        plt.legend()
        plt.show()

        plt.axhline(y=MAX_EDGE_NB, color='black', linestyle='dashed')
        plt.plot(iters,
                 edge_numbers_arr,
                 color='green',
                 label='attempts',
                 marker='.',
                 markersize=15,
                 linestyle='dashed')
        plt.xticks(iters)
        plt.legend()
        plt.show()

        plt.plot(iters,
                 total_edges_cost_before,
                 color='gray',
                 label='total cost before',
                 marker='.',
                 markersize=15,
                 linestyle='dashed')
        plt.plot(iters,
                 total_edges_cost_after,
                 color='green',
                 label='total cost after',
                 marker='.',
                 markersize=15,
                 linestyle='dashed')
        plt.xticks(iters)
        plt.legend()
        plt.show()

    def test(self, dset, i):
        # Test the estimator networks
        metrics = self.estimator.test(dset)
        print('\n##########')
        print('TEST ESTIMATOR')
        print('    value', metrics[0])
        print('    controls', metrics[1])
        print('    value', metrics[2])
        print('##########\n')

        print()
        print("\nEstimations at iteration", i)
        test_traj_idx = random.sample(range(len(dset.us)), 1)
        # test_traj_idx = 18
        print('Dataset size:', len(dset.x1s), 'trajectories')
        x0 = dset.x1s[test_traj_idx, :].T
        x1 = dset.x2s[test_traj_idx, :].T
        print('x0 x1')
        print(x0)
        print(x1)
        print('Nets trajectories')
        X, U, V = self.estimator.trajectories(x0, x1)
        print('State trajectory')
        print(X)
        print('Control trajectory')
        print(U)
        print('Value')
        print(V)
        print('Euclidian value')
        print(euclid(x0, x1))
class Controller:
    """
    Online controller sending commands/controls to the simulation node
    """
    def __init__(self):
        """
        Load the irepa built estimator then create clients for simulation
        and optimal control nodes
        """
        self.estimator = Networks(NX,
                                  NU,
                                  x_range=np.array([X_MIN, X_MAX]),
                                  u_range=np.array([U_MIN, U_MAX]))
        self.estimator.load()
        # Last state trajectory calculated
        self.X = np.array([])
        self.U = np.array([])
        self.u = np.zeros(NU)
        self.time = 0
        self.current_state = np.zeros(NX)
        self.end_state = np.zeros(NX)
        # time from the start of the current trajectory in ns
        self.t_idx = 0
        # update trajectory every update_times iteration
        self.update_times = int(CPS / TUPS)

        self.ocp_client = actionlib.SimpleActionClient(OPT_CONTROL_SERVER,
                                                       OptControlAction)
        self.ocp_client.wait_for_server()
        self.pub = rospy.Publisher(COMMAND_TOPIC, Command, queue_size=10)
        rospy.Subscriber(CURRENT_STATE_TOPIC, State, self.update_current_state)
        rospy.Subscriber(END_STATE_TOPIC, State, self.update_end_state)

        # test state rate
        self.t1 = rospy.get_rostime()
        self.t2 = rospy.get_rostime()

        # control steps
        self.stop_update = False
        self.stop_controls = False
        self.started = False

    def next_control(self):
        """
        Choose next control to send to the simulation.
        """
        self.t_idx += 1
        if euclid(self.current_state, self.end_state) < STOP_RADIUS:
            self.stop_update = True
            self.stop_controls = True
        else:
            self.stop_update = False
            self.stop_controls = False

        if self.stop_controls:
            self.u = np.zeros(NU)
        else:
            if self.t_idx < self.U.shape[0]:
                self.u = self.U[self.t_idx, :]
                # print('  CONTROL:', self.u)
            else:
                if self.started:
                    print('  !! No more control --> previous')
        self.pub.publish(self.u)
        return self.u

    def update_current_state(self, msg):
        """
        Callback function for the simulation Subscriber for current state.
        """
        # print('CURRENT STATE received:', msg.x)
        self.current_state = np.array(msg.x)

    def update_end_state(self, msg):
        """
        Callback function for the simulation Subscriber for end state.
        """
        # print('END STATE received:', msg.x)
        self.end_state = np.array(msg.x)

    def update_trajectory(self, state, resp):
        """
        Callback function for the optimal control action server.

        Once a control trajectory is received, it is resampled using trajectory
        time so that the time difference between to consecutive controls is
        at the CPS.

        :param state: state of the action server (nothing to do with
                      the state of the system)
        :param resp: response of the action server containing
                     - states: states trajectory
                     - controls: controls trajectory
                     - time: time length of the trajectory
        """
        self.started = True  # control started at the first result
        if resp.success:
            X = np.array(resp.states).reshape(len(resp.states) // NX, NX)
            U = np.array(resp.controls).reshape(len(resp.controls) // NU, NU)
            self.time = resp.time
            # if resp.time < UPDATE_TIME_THRES:
            #     self.stop_update = True

            dt_acado = self.time / (X.shape[0] - 1)
            nb_control = int(resp.time * CPS) + 1
            self.X = resample(X, nb_control)
            self.U = resample(U, nb_control)
            tend = time.time()
            t_calc = (tend - self.tstart)
            self.t_idx = int(t_calc * CPS) if SHIFT else 0

            print()
            print('RESULT TRAJECTORY')
            print('UPDATE TOOK:', round(t_calc, 2))
            print('TIME TRAJ:  ', round(resp.time, 2))
            print('Dt acado', dt_acado, 'nb_control', nb_control, 'SIZE X',
                  X.shape[0])
            print(self.t_idx)

        else:
            print()
            print('FAILURE OF CONTROL!!!!')
            print()

    def call_update_trajectory_action(self):
        """
        Call ACADO warm started by the estimator. The result is handled by
        update_trajectory function.
        """
        self.tstart = time.time()
        if ESTIMATOR_INIT:
            Xe, Ue, Ve = self.estimator.trajectories(self.current_state,
                                                     self.end_state)
            Xe = Xe.flatten()
            Ue = Ue.flatten()
        else:
            Xe, Ue, Ve = [], [], 0

        goal = OptControlGoal(list(self.current_state), list(self.end_state),
                              Xe, Ue, Ve, NX, NU)
        self.ocp_client.send_goal(goal, self.update_trajectory)

    def start_control(self):
        """
        Start the control loop at rate CPS
        """
        print('Control started')
        rate = rospy.Rate(CPS)
        i = 0
        while not rospy.is_shutdown():
            i += 1
            if i % self.update_times == 0:
                if not self.stop_update:
                    self.call_update_trajectory_action()
                i = 0
            self.next_control()
            rate.sleep()
Exemple #17
0
class Core:
	def __init__(self):
		user_dir = "%s%s.kismon%s" % (os.path.expanduser("~"), os.sep, os.sep)
		if not os.path.isdir(user_dir):
			print("Creating Kismon user directory %s" % user_dir)
			os.mkdir(user_dir)
		config_file = "%skismon.conf" % user_dir
		self.config_handler = Config(config_file)
		self.config_handler.read()
		self.config = self.config_handler.config
		
		self.marker_text = """Encryption: %s
MAC: %s
Manuf: %s
Type: %s
Channel: %s
First seen: %s
Last seen: %s"""
		
		self.sources = {}
		self.crypt_cache = {}
		self.networks = Networks(self.config)
		
		self.init_client_thread()
		if self.config["kismet"]["connect"] is True:
			self.client_start()
		
		if "--disable-map" in sys.argv:
			self.map_error = "--disable-map used"
		else:
			self.map_error = check_osmgpsmap()
		
		if self.map_error is not None:
			self.map_error =  "%s\nMap disabled" % self.map_error
			print(self.map_error, "\n")
		
		self.init_map()
		
		self.main_window = MainWindow(self.config,
			self.client_start,
			self.client_stop,
			self.map,
			self.networks,
			self.sources,
			self.client_thread.client)
		self.main_window.log_list.add("Kismon started")
		if self.map_error is not None:
			self.main_window.log_list.add(self.map_error)
		
		self.networks_file = "%snetworks.json" % user_dir
		if os.path.isfile(self.networks_file):
			try:
				self.networks.load(self.networks_file)
			except:
				error = sys.exc_info()[1]
				print(error)
				dialog_message = "Could not read the networks file '%s':\n%s\n\nDo you want to continue?" % (self.networks_file, error)
				dialog = Gtk.MessageDialog(self.main_window.gtkwin, Gtk.DialogFlags.DESTROY_WITH_PARENT, Gtk.MessageType.ERROR, Gtk.ButtonsType.YES_NO, dialog_message)
				def dialog_response(dialog, response_id):
					self.dialog_response = response_id
				dialog.connect("response", dialog_response)
				dialog.run()
				dialog.destroy()
				if self.dialog_response == -9:
					print("exit")
					self.client_thread.stop()
					self.main_window.gtkwin = None
					return
		self.networks.set_autosave(self.config["networks"]["autosave"], self.networks_file, self.main_window.log_list.add)
		
		if self.map is not None:
			self.networks.notify_add_list["map"] = self.add_network_to_map
			self.networks.notify_remove_list["map"] = self.map.remove_marker
		
		self.main_window.network_list.crypt_cache = self.crypt_cache
		
		self.battery_max = None
		self.battery = None
		path = "/proc/acpi/battery/"
		if os.path.exists(path):
			for name in os.listdir(path):
				self.battery = name
				f = open("%s%s/info" % (path, name))
				for line in f.readlines():
					if line.startswith("last full capacity:"):
						max = line.split(":")[1].strip()
						self.battery_max = int(max.split()[0])
						break
				GObject.timeout_add(30000, self.update_battery_bar)
				break
		self.update_battery_bar()
		
		GLib.timeout_add(500, self.queue_handler)
		GLib.timeout_add(300, self.queue_handler_networks)
		GLib.idle_add(self.networks.apply_filters)
		
	def init_map(self):
		if self.map_error is not None:
			self.map = None
		else:
			try:
				from .map import Map
			except SystemError:
				from map import Map
			self.map = Map(self.config["map"])
			self.map.set_zoom(16)
			pos = self.config["map"]["last_position"].split("/")
			self.map.set_position(float(pos[0]), float(pos[1]), True)
		
	def init_client_thread(self):
		self.client_thread = ClientThread(self.config["kismet"]["server"])
		self.client_thread.client.set_capabilities(
			('status', 'source', 'info', 'gps', 'bssid', 'bssidsrc', 'ssid'))
		if "--create-kismet-dump" in sys.argv:
			self.client_thread.client.enable_dump()
		
	def client_start(self):
		if self.client_thread.is_running is True:
			self.client_stop()
		self.sources = {}
		self.init_client_thread()
		if "--load-kismet-dump" in sys.argv:
			self.client_thread.client.load_dump(sys.argv[2])
		self.client_thread.start()
		
	def client_stop(self):
		self.client_thread.stop()
		
	def queue_handler(self):
		if self.main_window.gtkwin is None:
			return False
			
		if len(self.client_thread.client.error) > 0:
			for error in self.client_thread.client.error:
				self.main_window.log_list.add(error)
			self.client_thread.client.error = []
		
		#gps
		gps = None
		fix = None
		gps_queue = self.client_thread.get_queue("gps")
		while True:
			try:
				data = gps_queue.pop()
				if gps is None:
					gps = data
				if data["fix"] > 1:
					fix = (data["lat"], data["lon"])
					break
			except IndexError:
				break
		if gps is not None:
			self.main_window.update_gps_table(gps)
			if fix is not None and self.map is not None:
				self.map.set_position(fix[0], fix[1])
		
		#status
		for data in self.client_thread.get_queue("status"):
			self.main_window.log_list.add(data["text"])
		
		#info
		info_queue = self.client_thread.get_queue("info")
		try:
			data = info_queue.pop()
			self.main_window.update_info_table(data)
		except IndexError:
			pass
			
		#source
		update = False
		for data in self.client_thread.get_queue("source"):
			uuid = data["uuid"]
			if uuid == "00000000-0000-0000-0000-000000000000":
				continue
			self.sources[uuid] = data
			
			update = True
		if update is True:
			self.main_window.update_sources_table(self.sources)
		
		return True
		
	def queue_handler_networks(self):
		#ssid
		for data in self.client_thread.get_queue("ssid"):
			self.networks.add_ssid_data(data)
		
		#bssid
		bssids = {}
		for data in self.client_thread.get_queue("bssid"):
			mac = data["bssid"]
			self.networks.add_bssid_data(data)
			if mac in self.main_window.signal_graphs and "signal_dbm" not in self.client_thread.client.capabilities["bssidsrc"]:
				self.main_window.signal_graphs[mac].add_value(None, None, data["signal_dbm"])
			
			bssids[mac] = True
			
		#bssidsrc
		for data in self.client_thread.get_queue("bssidsrc"):
			if "signal_dbm" not in data or data["uuid"] not in self.sources:
				continue
			
			mac = data["bssid"]
			if mac in self.main_window.signal_graphs:
				self.main_window.signal_graphs[mac].add_value(self.sources[data["uuid"]], data, data["signal_dbm"])
		
		if len(self.networks.notify_add_queue) > 0:
			self.networks.start_queue()
			if len(self.networks.notify_add_queue) > 500:
				self.networks.disable_refresh()
				self.main_window.networks_queue_progress()
		
		self.main_window.update_statusbar()
		return True
		
	def quit(self):
		self.client_thread.stop()
		self.config_handler.write()
		self.networks.save(self.networks_file)
		
	def get_battery_capacity(self):
		filename = "/proc/acpi/battery/%s/state" % self.battery
		if not os.path.isfile(filename):
			return False
		f = open(filename)
		for line in f.readlines():
			if line.startswith("remaining capacity:"):
				current = line.split(":")[1].strip()
				current = int(current.split()[0])
				return round(100.0 / self.battery_max * current, 1)
		return False
		
	def update_battery_bar(self):
		battery = self.get_battery_capacity()
		self.main_window.set_battery_bar(battery)
		return True
		
	def add_network_to_map(self, mac):
		network = self.networks.get_network(mac)
		
		try:
			crypt = self.crypt_cache[network["cryptset"]]
		except KeyError:
			crypt = decode_cryptset(network["cryptset"], True)
			self.crypt_cache[network["cryptset"]] = crypt
		
		if "WPA" in crypt:
			color = "red"
		elif "WEP" in crypt:
			color = "orange"
		else:
			color = "green"
		
		ssid = network["ssid"]
		if ssid == "":
			ssid = "<no ssid>"
		evils = (("&", "&amp;"),("<", "&lt;"),(">", "&gt;"))
		for evil, good in evils:
			ssid = ssid.replace(evil, good)
		
		time_format = "%d.%m.%Y %H:%M:%S"
		
		text = self.marker_text % (crypt, mac, network["manuf"],
			network["type"], network["channel"],
			time.strftime(time_format, time.localtime(network["firsttime"])),
			time.strftime(time_format, time.localtime(network["lasttime"]))
			)
		text = text.replace("&", "&amp;")
		
		self.map.add_marker(mac, color, network["lat"], network["lon"])
Exemple #18
0
# iterate through all the years
print("Loading team databases ...")
all_teams = pd.read_csv(
    'http://igem.org/Team_List.cgi?year=all&team_list_download=1')
all_teams = all_teams.rename(
    columns={c: c.replace(' ', '')
             for c in all_teams.columns})

if YEARS:
    years = [int(i) for i in YEARS.split(',')]
else:
    years = all_teams['Year'].unique()

if NETWORKS:
    Networks = Networks(all_teams)
# print(YEARS)
# print(years)

for y in years:
    if y not in all_teams['Year'].unique():
        continue
    if y < 2008:
        continue
    # We parse the data
    # y = year.split('.')[0]
    print(("Grabbing team list for year: %s" % y))
    # df = pd.read_csv('Teams/%s' % year)
    df = all_teams[(all_teams['Year'] == y)
                   & (all_teams['Status'] == 'Accepted')]
    teams = df['Team'].values
Exemple #19
0
def main():
    env = make(game='SonicTheHedgehog2-Genesis', state='EmeraldHillZone.Act1')

    # Parameters for observation image size processing.
    img_rows = 128
    img_cols = 128
    img_stack = 4

    action_size = 8  # 8 valid button combinations

    # Inputs to the agent's prediction network will have the following shape.
    input_size = (img_rows, img_cols, img_stack)

    # File paths
    stat_path = '../statistics/dqn_n-step'
    model_path = '../models/dqn_n-step'

    # Priortized Experience Replay.
    if (PER_AGENT):
        print('PER agent')
        stat_path += '_PER'
        model_path += '_PER'
        dqn_agent = DQN_PER_Agent(input_size, action_size)
    else:
        dqn_agent = DQN_Agent(input_size, action_size)

    # Use the Noisy Dueling Network.
    if (NOISY):
        stat_path += '_noisy_dueling'
        model_path += '_noisy_dueling'
        print('NOISY Dueling agent')
        dqn_agent.main_model = Networks.noisy_dueling_dqn(
            input_size, action_size, dqn_agent.main_lr)
        dqn_agent.target_model = Networks.noisy_dueling_dqn(
            input_size, action_size, dqn_agent.target_lr)
        dqn_agent.noisy = True
    # Use the normal dueling network.
    elif (DUELING):
        stat_path += '_dueling'
        model_path += '_dueling'
        print('Dueling agent')
        dqn_agent.main_model = Networks.dueling_dqn(input_size, action_size,
                                                    dqn_agent.main_lr)
        dqn_agent.target_model = Networks.dueling_dqn(input_size, action_size,
                                                      dqn_agent.target_lr)
    # Normal DQN.
    else:
        dqn_agent.main_model = Networks.dqn(input_size, action_size,
                                            dqn_agent.main_lr)
        dqn_agent.target_model = Networks.dqn(input_size, action_size,
                                              dqn_agent.target_lr)

    # Append correct suffix and filetype to paths.
    stat_path += '_stats.csv'
    main_model_path = model_path + '_main.h5'
    target_model_path = model_path + '_target.h5'

    # Load previous models, or instantiate new networks.
    if (LOAD_MODELS):
        dqn_agent.load_models(main_model_path, target_model_path)

    # Modify statrting epsilon value
    if (EPSILON == START):
        dqn_agent.epsilon = dqn_agent.initial_epsilon
    elif (EPSILON == MIDDLE):
        dqn_agent.epsilon = (
            (dqn_agent.initial_epsilon - dqn_agent.final_epsilon) / 2)
    else:
        dqn_agent.epsilon = dqn_agent.final_epsilon

    # Store rewards and states from the previous n state, action pairs to
    # create experiences.
    prev_n_rewards = deque(maxlen=dqn_agent.n_step)
    prev_n_exp = deque(maxlen=dqn_agent.n_step)

    # One episode is 4500 steps if not completed
    # 5 minutes of frames at 1/15th of a second = 4 60Hz frames
    total_timestep = 0  # Total number of timesteps over all episodes.
    for episode in range(EPISODES):
        done = False
        reward_sum = 0  # Average reward within episode.
        timestep = 0  # Track timesteps within the episode.

        # Rewards and states must be consecutive to improve temporal awareness.
        # Reset at the start of each episode to compensate for sudden scene change.
        prev_n_rewards.clear()
        prev_n_exp.clear()

        # Experiences are a stack of the img_stack most frames to provide
        # temporal information. Initialize this sequence to the first
        # observation stacked 4 times.
        first_obs = env.reset()
        processed = preprocess_obs(first_obs, size=(img_rows, img_cols))
        # (img_rows, img_cols, img_stack)
        exp_stack = np.stack(([processed] * img_stack), axis=2)
        # Expand dimension to stack and submit multiple exp_stacks in  a batch
        # (1, img_rows, img_cols, img_stack).
        exp_stack = np.expand_dims(exp_stack, axis=0)  # 1x64x64x4

        # Continue until the end of the zone is reached or 4500 timesteps have
        # passed.
        while not done:
            # Predict an action to take based on the most recent
            # experience.
            #
            # Note that the first dimension
            # (1, img_rows, img_cols, img_stack) is ignored by the
            # network here as it represents a batch size of 1.
            act_idx, action = dqn_agent.act(exp_stack)
            obs, reward, done, info = env.step(action)
            # env.render()

            timestep += 1
            total_timestep += 1
            reward_sum += reward

            # Create a 1st dimension for stacking experiences and a 4th for
            # stacking img_stack frames.
            obs = preprocess_obs(obs, size=(img_rows, img_cols))
            obs = np.reshape(obs, (1, img_rows, img_cols, 1))

            # Append the new observation to the front of the stack and remove
            # the oldest (4th) frame.
            exp_stack_new = np.append(obs, exp_stack[:, :, :, :3], axis=3)

            # Save the previous state, selected action, and resulting reward
            prev_n_rewards.appendleft(reward)
            prev_n_exp.append((exp_stack, act_idx, done))
            exp_stack = exp_stack_new

            # Once sufficent steps have been taken, discount rewards and save nth
            # previous experience
            if (len(prev_n_rewards) >= dqn_agent.n_step):
                # Compute discounted reward
                discounted_reward = 0
                for idx in range(len(prev_n_rewards)):
                    prev_reward = prev_n_rewards[idx]
                    # rewards are append left so that the most recent rewards are
                    # discounted the least.
                    discounted_reward += ((dqn_agent.gamma**idx) * prev_reward)

                # Experiences are pushed forward into the deque as more are appened. The
                # nth previous experience is at the last index.
                original_state, original_act, _ = prev_n_exp[-1]
                nth_state, _, nth_done = prev_n_exp[0]

                # Save the nth previous state and predicted action the discounted sum of rewards
                # and final state over the next n steps.
                dqn_agent.save_memory(original_state, original_act,
                                      discounted_reward, nth_state, nth_done)

            # In the observation phase skip training updates and decrmenting epsilon.
            if (total_timestep >= dqn_agent.observation_timesteps):

                # Update the target model with the main model's weights.
                if ((total_timestep % dqn_agent.update_target_freq) == 0):
                    dqn_agent.update_target_model()

                # Train the agent on saved experiences.
                if ((total_timestep % dqn_agent.timestep_per_train) == 0):
                    dqn_agent.replay_update()
                    dqn_agent.save_models(main_model_path, target_model_path)

                if (dqn_agent.epsilon > dqn_agent.final_epsilon):
                    # Decrease epsilon by a fraction of the range such that epsilon decreases
                    # for "exploration_timesteps".
                    dec = (
                        (dqn_agent.initial_epsilon - dqn_agent.final_epsilon) /
                        dqn_agent.exploration_timesteps)
                    dqn_agent.epsilon -= dec

            # print(info)
            print("Epsisode:", episode, " Timestep:", timestep, " Action:",
                  act_idx, " Episode Reward Sum:", reward_sum, " Epsilon:",
                  dqn_agent.epsilon)

        # Save mean episode reward at the end of the episode - append to stats file
        with open(stat_path, "a") as stats_fd:
            reward_str = "Epsiode Cummulative Reward: " + str(
                reward_sum) + ", Episode Timestpes: " + str(timestep) + ",\n"
            stats_fd.write(str(reward_str))
Exemple #20
0
import asyncio
import os
import json
import indy_vdr
from util import (enable_verbose, log, create_did)
from fetch_ledger_tx import FetchLedgerTX
from pool import PoolCollection
from networks import Networks
from plugin_collection import PluginCollection

if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description="Fetch transaction related details from indy based ledgers"
    )
    parser.add_argument("--net",
                        choices=Networks.get_ids(),
                        help="Connect to a known network using an ID.")
    parser.add_argument("--list-nets",
                        action="store_true",
                        help="List known networks.")
    parser.add_argument(
        "--genesis-url",
        default=os.environ.get('GENESIS_URL'),
        help=
        "The url to the genesis file describing the ledger pool.  Can be specified using the 'GENESIS_URL' environment variable."
    )
    parser.add_argument(
        "--genesis-path",
        default=os.getenv("GENESIS_PATH"),
        help=
        "The path to the genesis file describing the ledger pool.  Can be specified using the 'GENESIS_PATH' environment variable."
Exemple #21
0
class GMVAE:
    def __init__(self, params):
        self.batch_size = params.batch_size
        self.batch_size_val = params.batch_size_val
        self.initial_temperature = params.temperature
        self.decay_temperature = params.decay_temperature
        self.num_epochs = params.num_epochs
        self.loss_type = params.loss_type
        self.num_classes = params.num_classes
        self.w_gauss = params.w_gaussian
        self.w_categ = params.w_categorical
        self.w_recon = params.w_reconstruction
        self.decay_temp_rate = params.decay_temp_rate
        self.gaussian_size = params.gaussian_size
        self.min_temperature = params.min_temperature
        self.temperature = params.temperature  # current temperature
        self.verbose = params.verbose

        self.sess = tf.Session()
        self.network = Networks(params)
        self.losses = LossFunctions()

        self.learning_rate = tf.placeholder(tf.float32, [])
        self.lr = params.learning_rate
        self.decay_epoch = params.decay_epoch
        self.lr_decay = params.lr_decay

        self.dataset = params.dataset
        self.metrics = Metrics()

    def create_dataset(self, is_training, data, labels, batch_size):
        """Create dataset given input data

      Args:
          is_training: (bool) whether to use the train or test pipeline.
                       At training, we shuffle the data and have multiple epochs
          data: (array) corresponding array containing the input data
          labels: (array) corresponding array containing the labels of the input data
          batch_size: (int) size of each batch to consider from the data
 
      Returns:
          output: (dict) contains what will be the input of the tensorflow graph
      """
        num_samples = data.shape[0]

        # create dataset object
        if labels is None:
            dataset = tf.data.Dataset.from_tensor_slices(data)
        else:
            dataset = tf.data.Dataset.from_tensor_slices((data, labels))

        # shuffle data in training phase
        if is_training:
            dataset = dataset.shuffle(num_samples).repeat()

        dataset = dataset.batch(batch_size)
        dataset = dataset.prefetch(1)

        # create reinitializable iterator from dataset
        iterator = dataset.make_initializable_iterator()

        if labels is None:
            data = iterator.get_next()
        else:
            data, labels = iterator.get_next()

        iterator_init = iterator.initializer
        output = {
            'data': data,
            'labels': labels,
            'iterator_init': iterator_init
        }
        return output

    def unlabeled_loss(self, data, latent_spec, output_size, is_training=True):
        """Model function defining the loss functions derived from the variational lower bound

      Args:
          data: (array) corresponding array containing the input data
          latent_spec: (dict) contains the graph operations or nodes of the latent variables
          output_size: (int) size of the output layer
          is_training: (bool) whether we are in training phase or not

      Returns:
          loss_dic: (dict) contains the values of each loss function and predictions
      """
        gaussian, mean, var = latent_spec['gaussian'], latent_spec[
            'mean'], latent_spec['var']
        categorical, prob, log_prob = latent_spec['categorical'], latent_spec[
            'prob_cat'], latent_spec['log_prob_cat']
        _logits, features = latent_spec['logits'], latent_spec['features']

        output, y_mean, y_var = latent_spec['output'], latent_spec[
            'y_mean'], latent_spec['y_var']

        # reconstruction loss
        if self.loss_type == 'bce':
            loss_rec = self.w_recon * self.losses.binary_cross_entropy(
                data, output)
        elif self.loss_type == 'mse':
            loss_rec = self.w_recon * tf.losses.mean_squared_error(
                data, output)
        else:
            raise "invalid loss function... try bce or mse..."

        # gaussian loss
        loss_gaussian = self.w_gauss * self.losses.labeled_loss(
            gaussian, mean, var, y_mean, y_var)

        # categorical loss
        loss_categorical = self.w_categ * -self.losses.entropy(_logits, prob)

        # obtain predictions
        predicted_labels = tf.argmax(_logits, axis=1)

        # total_loss
        loss_total = loss_rec + loss_gaussian + loss_categorical

        loss_dic = {
            'total': loss_total,
            'predicted_labels': predicted_labels,
            'reconstruction': loss_rec,
            'gaussian': loss_gaussian,
            'categorical': loss_categorical
        }
        return loss_dic

    def create_model(self, is_training, inputs, output_size):
        """Model function defining the graph operations.

      Args:
          is_training: (bool) whether we are in training phase or not
          inputs: (dict) contains the inputs of the graph (features, labels...)
                  this can be `tf.placeholder` or outputs of `tf.data`
          output_size: (int) size of the output layer

      Returns:
          model_spec: (dict) contains the graph operations or nodes needed for training / evaluation
      """
        data, _labels = inputs['data'], inputs['labels']
        latent_spec = self.network.encoder(data, self.num_classes, is_training)

        out_logits, y_mean, y_var, output = self.network.decoder(
            latent_spec['gaussian'], latent_spec['categorical'], output_size,
            is_training)

        latent_spec['output'] = out_logits
        latent_spec['y_mean'] = y_mean
        latent_spec['y_var'] = y_var

        # unlabeled losses
        unlabeled_loss_dic = self.unlabeled_loss(data, latent_spec,
                                                 output_size, is_training)

        loss_total = unlabeled_loss_dic['total']

        if is_training:
            # use adam for optimization
            optimizer = tf.train.AdamOptimizer(self.learning_rate)

            # needed for batch normalization layer
            update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            with tf.control_dependencies(update_ops):
                train_op = optimizer.minimize(loss_total)

        # create model specification
        model_spec = inputs
        model_spec['variable_init_op'] = tf.global_variables_initializer()

        # optimizers are only available in training phase
        if is_training:
            model_spec['train_op'] = train_op
        else:
            model_spec['output'] = output

        model_spec['loss_total'] = loss_total
        model_spec['loss_rec_ul'] = unlabeled_loss_dic['reconstruction']
        model_spec['loss_gauss_ul'] = unlabeled_loss_dic['gaussian']
        model_spec['loss_categ_ul'] = unlabeled_loss_dic['categorical']
        model_spec['true_labels'] = _labels
        model_spec['predicted'] = unlabeled_loss_dic['predicted_labels']

        return model_spec

    def evaluate_dataset(self, is_training, num_batches, model_spec):
        """Evaluate the model

      Args:
          is_training: (bool) whether we are training or not
          num_batches: (int) number of batches to train/test
          model_spec: (dict) contains the graph operations or nodes needed for evaluation

      Returns:
          (dic) average of loss functions and metrics for the given number of batches
      """
        avg_accuracy = 0.0
        avg_nmi = 0.0
        avg_loss_cat = 0.0
        avg_loss_total = 0.0
        avg_loss_rec = 0.0
        avg_loss_gauss = 0.0

        list_predicted_labels = []
        list_true_labels = []

        # initialize dataset iteratior
        self.sess.run(model_spec['iterator_init'])

        if is_training:

            train_optimizer = model_spec['train_op']

            # training phase
            for j in range(num_batches):
                _, loss_total, loss_cat_ul, loss_rec_ul, loss_gauss_ul, true_labels, predicted_labels = self.sess.run(
                    [
                        train_optimizer, model_spec['loss_total'],
                        model_spec['loss_categ_ul'], model_spec['loss_rec_ul'],
                        model_spec['loss_gauss_ul'], model_spec['true_labels'],
                        model_spec['predicted']
                    ],
                    feed_dict={
                        self.network.temperature: self.temperature,
                        self.learning_rate: self.lr
                    })

                # save values
                list_predicted_labels.append(predicted_labels)
                list_true_labels.append(true_labels)
                avg_loss_rec += loss_rec_ul
                avg_loss_gauss += loss_gauss_ul
                avg_loss_cat += loss_cat_ul
                avg_loss_total += loss_total
        else:
            # validation phase
            for j in range(num_batches):
                # run the tensorflow flow graph
                loss_rec_ul, loss_gauss_ul, loss_cat_ul, loss_total, true_labels, predicted_labels = self.sess.run(
                    [
                        model_spec['loss_rec_ul'], model_spec['loss_gauss_ul'],
                        model_spec['loss_categ_ul'], model_spec['loss_total'],
                        model_spec['true_labels'], model_spec['predicted']
                    ],
                    feed_dict={
                        self.network.temperature: self.temperature,
                        self.learning_rate: self.lr
                    })

                # save values
                list_predicted_labels.append(predicted_labels)
                list_true_labels.append(true_labels)
                avg_loss_rec += loss_rec_ul
                avg_loss_gauss += loss_gauss_ul
                avg_loss_cat += loss_cat_ul
                avg_loss_total += loss_total

        # average values by the given number of batches
        avg_loss_rec /= num_batches
        avg_loss_gauss /= num_batches
        avg_loss_cat /= num_batches
        avg_loss_total /= num_batches

        # average accuracy and nmi of all the data
        predicted_labels = np.hstack(list_predicted_labels)
        true_labels = np.hstack(list_true_labels)
        avg_nmi = self.metrics.nmi(predicted_labels, true_labels)
        avg_accuracy = self.metrics.cluster_acc(predicted_labels, true_labels)

        return {
            'loss_rec': avg_loss_rec,
            'loss_gauss': avg_loss_gauss,
            'loss_cat': avg_loss_cat,
            'loss_total': avg_loss_total,
            'accuracy': avg_accuracy,
            'nmi': avg_nmi
        }

    def train(self, train_data, train_labels, val_data, val_labels):
        """Train the model

      Args:
          train_data: (array) corresponding array containing the training data
          train_labels: (array) corresponding array containing the labels of the training data
          val_data: (array) corresponding array containing the validation data
          val_labels: (array) corresponding array containing the labels of the validation data

      Returns:
          output: (dict) contains the history of train/val loss
      """
        train_history_loss, val_history_loss = [], []
        train_history_acc, val_history_acc = [], []
        train_history_nmi, val_history_nmi = [], []

        # create training and validation dataset
        train_dataset = self.create_dataset(True, train_data, train_labels,
                                            self.batch_size)
        val_dataset = self.create_dataset(False, val_data, val_labels,
                                          self.batch_size_val)

        self.output_size = train_data.shape[1]

        # create train and validation models
        train_model = self.create_model(True, train_dataset, self.output_size)
        val_model = self.create_model(False, val_dataset, self.output_size)

        # set number of batches
        num_train_batches = int(
            np.ceil(train_data.shape[0] / (1.0 * self.batch_size)))
        num_val_batches = int(
            np.ceil(val_data.shape[0] / (1.0 * self.batch_size_val)))

        # initialize global variables
        self.sess.run(train_model['variable_init_op'])

        # training and validation phases
        print('Training phase...')
        for i in range(self.num_epochs):

            # decay learning rate according to decay_epoch parameter
            if self.decay_epoch > 0 and (i + 1) % self.decay_epoch == 0:
                self.lr = self.lr * self.lr_decay
                print('Decaying learning rate: %lf' % self.lr)

            # evaluate train and validation datasets
            train_loss = self.evaluate_dataset(True, num_train_batches,
                                               train_model)
            val_loss = self.evaluate_dataset(False, num_val_batches, val_model)

            # get training results for printing
            train_loss_rec = train_loss['loss_rec']
            train_loss_gauss = train_loss['loss_gauss']
            train_loss_cat = train_loss['loss_cat']
            train_accuracy = train_loss['accuracy']
            train_nmi = train_loss['nmi']
            train_total_loss = train_loss['loss_total']

            # get validation results for printing
            val_loss_rec = val_loss['loss_rec']
            val_loss_gauss = val_loss['loss_gauss']
            val_loss_cat = val_loss['loss_cat']
            val_accuracy = val_loss['accuracy']
            val_nmi = val_loss['nmi']
            val_total_loss = val_loss['loss_total']

            # if verbose then print specific information about training
            if self.verbose == 1:
                print("(Epoch %d / %d)" % (i + 1, self.num_epochs))
                print("Train - REC: %.5lf;  Gauss: %.5lf;  Cat: %.5lf;" % \
                      (train_loss_rec, train_loss_gauss, train_loss_cat))
                print("Valid - REC: %.5lf;  Gauss: %.5lf;  Cat: %.5lf;" % \
                      (val_loss_rec, val_loss_gauss, val_loss_cat))
                print("Accuracy=Train: %.5lf; Val: %.5lf   NMI=Train: %.5lf; Val: %.5lf   Total Loss=Train: %.5lf; Val: %.5lf" % \
                     (train_accuracy, val_accuracy, train_nmi, val_nmi, train_total_loss, val_total_loss))
            else:
                print("(Epoch %d / %d) Train Loss: %.5lf; Val Loss: %.5lf   Train ACC: %.5lf; Val ACC: %.5lf   Train NMI: %.5lf; Val NMI: %.5lf" % \
                      (i + 1, self.num_epochs, train_total_loss, val_total_loss, train_accuracy, val_accuracy, train_nmi, val_nmi))

            # save loss and accuracy of each epoch
            train_history_loss.append(train_total_loss)
            val_history_loss.append(val_total_loss)
            train_history_acc.append(train_accuracy)
            val_history_acc.append(val_accuracy)

            if self.decay_temperature == 1:
                # decay temperature of gumbel-softmax
                self.temperature = np.maximum(
                    self.initial_temperature * np.exp(-self.decay_temp_rate *
                                                      (i + 1)),
                    self.min_temperature)
                if self.verbose == 1:
                    print("Gumbel Temperature: %.5lf" % self.temperature)

        return {
            'train_history_loss': train_history_loss,
            'val_history_loss': val_history_loss,
            'train_history_acc': train_history_acc,
            'val_history_acc': val_history_acc
        }

    def test(self, test_data, test_labels, batch_size=-1):
        """Test the model with new data

      Args:
          test_data: (array) corresponding array containing the testing data
          test_labels: (array) corresponding array containing the labels of the testing data
          batch_size: (int) batch size used to run the model
          
      Return:
          accuracy for the given test data

      """
        # if batch_size is not specified then use all data
        if batch_size == -1:
            batch_size = test_data.shape[0]

        # create dataset
        test_dataset = self.create_dataset(False, test_data, test_labels,
                                           batch_size)
        true_labels = test_dataset['labels']

        # perform a forward call on the encoder to obtain predicted labels
        latent = self.network.encoder(test_dataset['data'], self.num_classes)
        logits = latent['logits']
        predicted_labels = tf.argmax(logits, axis=1)

        # initialize dataset iterator
        self.sess.run(test_dataset['iterator_init'])

        # calculate number of batches given batch size
        num_batches = int(np.ceil(test_data.shape[0] / (1.0 * batch_size)))

        # evaluate the model
        list_predicted_labels = []
        list_true_labels = []

        for j in range(num_batches):
            _predicted_labels, _true_labels = self.sess.run(
                [predicted_labels, true_labels],
                feed_dict={
                    self.network.temperature: self.temperature,
                    self.learning_rate: self.lr
                })

            # save values
            list_predicted_labels.append(_predicted_labels)
            list_true_labels.append(_true_labels)

        # average accuracy and nmi of all the data
        predicted_labels = np.hstack(list_predicted_labels)
        true_labels = np.hstack(list_true_labels)
        avg_nmi = self.metrics.nmi(predicted_labels, true_labels)
        avg_accuracy = self.metrics.cluster_acc(predicted_labels, true_labels)

        return avg_accuracy, avg_nmi

    def latent_features(self, data, batch_size=-1):
        """Obtain latent features learnt by the model

      Args:
          data: (array) corresponding array containing the data
          batch_size: (int) size of each batch to consider from the data

      Returns:
          features: (array) array containing the features from the data
      """
        # if batch_size is not specified then use all data
        if batch_size == -1:
            batch_size = data.shape[0]

        # create dataset
        dataset = self.create_dataset(False, data, None, batch_size)

        # we will use only the encoder network
        latent = self.network.encoder(dataset['data'], self.num_classes)
        encoder = latent['features']

        # obtain the features from the input data
        self.sess.run(dataset['iterator_init'])
        num_batches = data.shape[0] // batch_size

        features = np.zeros((data.shape[0], self.gaussian_size))
        for j in range(num_batches):
            features[j * batch_size:j * batch_size +
                     batch_size] = self.sess.run(encoder,
                                                 feed_dict={
                                                     self.network.temperature:
                                                     self.temperature,
                                                     self.learning_rate:
                                                     self.lr
                                                 })
        return features

    def reconstruct_data(self, data, batch_size=-1):
        """Reconstruct Data

      Args:
          data: (array) corresponding array containing the data
          batch_size: (int) size of each batch to consider from the data

      Returns:
          reconstructed: (array) array containing the reconstructed data
      """
        # if batch_size is not specified then use all data
        if batch_size == -1:
            batch_size = data.shape[0]

        # create dataset
        dataset = self.create_dataset(False, data, None, batch_size)

        # reuse model used in training
        model_spec = self.create_model(False, dataset, data.shape[1])

        # obtain the reconstructed data
        self.sess.run(model_spec['iterator_init'])
        num_batches = data.shape[0] // batch_size
        reconstructed = np.zeros(data.shape)
        pos = 0
        for j in range(num_batches):
            reconstructed[pos:pos + batch_size] = self.sess.run(
                model_spec['output'],
                feed_dict={
                    self.network.temperature: self.temperature,
                    self.learning_rate: self.lr
                })
            pos += batch_size
        return reconstructed

    def plot_latent_space(self, data, labels, save=False):
        """Plot the latent space learnt by the model

      Args:
          data: (array) corresponding array containing the data
          labels: (array) corresponding array containing the labels
          save: (bool) whether to save the latent space plot

      Returns:
          fig: (figure) plot of the latent space
      """
        # obtain the latent features
        features = self.latent_features(data)

        # plot only the first 2 dimensions
        fig = plt.figure(figsize=(8, 6))
        plt.scatter(features[:, 0],
                    features[:, 1],
                    c=labels,
                    marker='o',
                    edgecolor='none',
                    cmap=plt.cm.get_cmap('jet', 10),
                    s=10)
        plt.colorbar()
        if (save):
            fig.savefig('latent_space.png')
        return fig

    def generate_data(self, num_elements=1, category=0):
        """Generate data for a specified category

      Args:
          num_elements: (int) number of elements to generate
          category: (int) category from which we will generate data

      Returns:
          generated data according to num_elements
      """
        indices = (np.ones(num_elements) * category).astype(int).tolist()

        # category is specified with a one-hot array
        categorical = tf.one_hot(indices, self.num_classes)

        # infer the gaussian distribution according to the category
        mean, var = self.network.gaussian_from_categorical(categorical)

        # gaussian random sample by using the mean and variance
        gaussian = tf.random_normal(tf.shape(mean), mean, tf.sqrt(var))

        # generate new samples with the given gaussian
        _, out = self.network.output_from_gaussian(gaussian, self.output_size)

        return self.sess.run(out,
                             feed_dict={
                                 self.network.temperature: self.temperature,
                                 self.learning_rate: self.lr
                             })

    def random_generation(self, num_elements=1):
        """Random generation for each category

      Args:
          num_elements: (int) number of elements to generate

      Returns:
          generated data according to num_elements
      """
        # categories for each element
        arr = np.array([])
        for i in range(self.num_classes):
            arr = np.hstack([arr, np.ones(num_elements) * i])
        indices = arr.astype(int).tolist()
        categorical = tf.one_hot(indices, self.num_classes)

        # infer the gaussian distribution according to the category
        mean, var = self.network.gaussian_from_categorical(categorical)

        # gaussian random sample by using the mean and variance
        gaussian = tf.random_normal(tf.shape(mean), mean, tf.sqrt(var))

        # generate new samples with the given gaussian
        _, out = self.network.output_from_gaussian(gaussian, self.output_size)

        return self.sess.run(out,
                             feed_dict={
                                 self.network.temperature: self.temperature,
                                 self.learning_rate: self.lr
                             })
Exemple #22
0
	def __init__(self):
		user_dir = "%s%s.kismon%s" % (os.path.expanduser("~"), os.sep, os.sep)
		if not os.path.isdir(user_dir):
			print("Creating Kismon user directory %s" % user_dir)
			os.mkdir(user_dir)
		config_file = "%skismon.conf" % user_dir
		self.config_handler = Config(config_file)
		self.config_handler.read()
		self.config = self.config_handler.config
		
		self.marker_text = """Encryption: %s
MAC: %s
Manuf: %s
Type: %s
Channel: %s
First seen: %s
Last seen: %s"""
		
		self.sources = {}
		self.crypt_cache = {}
		self.networks = Networks(self.config)
		
		self.init_client_thread()
		if self.config["kismet"]["connect"] is True:
			self.client_start()
		
		if "--disable-map" in sys.argv:
			self.map_error = "--disable-map used"
		else:
			self.map_error = check_osmgpsmap()
		
		if self.map_error is not None:
			self.map_error =  "%s\nMap disabled" % self.map_error
			print(self.map_error, "\n")
		
		self.init_map()
		
		self.main_window = MainWindow(self.config,
			self.client_start,
			self.client_stop,
			self.map,
			self.networks,
			self.sources,
			self.client_thread.client)
		self.main_window.log_list.add("Kismon started")
		if self.map_error is not None:
			self.main_window.log_list.add(self.map_error)
		
		self.networks_file = "%snetworks.json" % user_dir
		if os.path.isfile(self.networks_file):
			try:
				self.networks.load(self.networks_file)
			except:
				error = sys.exc_info()[1]
				print(error)
				dialog_message = "Could not read the networks file '%s':\n%s\n\nDo you want to continue?" % (self.networks_file, error)
				dialog = Gtk.MessageDialog(self.main_window.gtkwin, Gtk.DialogFlags.DESTROY_WITH_PARENT, Gtk.MessageType.ERROR, Gtk.ButtonsType.YES_NO, dialog_message)
				def dialog_response(dialog, response_id):
					self.dialog_response = response_id
				dialog.connect("response", dialog_response)
				dialog.run()
				dialog.destroy()
				if self.dialog_response == -9:
					print("exit")
					self.client_thread.stop()
					self.main_window.gtkwin = None
					return
		self.networks.set_autosave(self.config["networks"]["autosave"], self.networks_file, self.main_window.log_list.add)
		
		if self.map is not None:
			self.networks.notify_add_list["map"] = self.add_network_to_map
			self.networks.notify_remove_list["map"] = self.map.remove_marker
		
		self.main_window.network_list.crypt_cache = self.crypt_cache
		
		self.battery_max = None
		self.battery = None
		path = "/proc/acpi/battery/"
		if os.path.exists(path):
			for name in os.listdir(path):
				self.battery = name
				f = open("%s%s/info" % (path, name))
				for line in f.readlines():
					if line.startswith("last full capacity:"):
						max = line.split(":")[1].strip()
						self.battery_max = int(max.split()[0])
						break
				GObject.timeout_add(30000, self.update_battery_bar)
				break
		self.update_battery_bar()
		
		GLib.timeout_add(500, self.queue_handler)
		GLib.timeout_add(300, self.queue_handler_networks)
		GLib.idle_add(self.networks.apply_filters)
Exemple #23
0
class Core:
	def __init__(self):
		user_dir = "%s%s.kismon%s" % (os.path.expanduser("~"), os.sep, os.sep)
		if not os.path.isdir(user_dir):
			print("Creating Kismon user directory %s" % user_dir)
			os.mkdir(user_dir)
		config_file = "%skismon.conf" % user_dir
		self.config_handler = Config(config_file)
		self.config_handler.read()
		self.config = self.config_handler.config
		
		self.sources = {}
		self.crypt_cache = {}
		self.networks = Networks(self.config)
		self.client_threads = {}
		self.init_client_threads()
		
		if "--disable-map" in sys.argv:
			self.map_error = "--disable-map used"
		else:
			self.map_error = check_osmgpsmap()
		
		if self.map_error is not None:
			self.map_error =  "%s\nMap disabled" % self.map_error
			print(self.map_error, "\n")
		
		self.init_map()
		
		self.main_window = MainWindow(self.config,
			self.client_start,
			self.client_stop,
			self.map,
			self.networks,
			self.sources,
			self.client_threads)
		self.main_window.log_list.add("Kismon", "started")
		if self.map_error is not None:
			self.main_window.log_list.add("Kismon", self.map_error)
		
		self.networks_file = "%snetworks.json" % user_dir
		if os.path.isfile(self.networks_file):
			try:
				self.networks.load(self.networks_file)
			except:
				error = sys.exc_info()[1]
				print(error)
				dialog_message = "Could not read the networks file '%s':\n%s\n\nDo you want to continue?" % (self.networks_file, error)
				dialog = Gtk.MessageDialog(self.main_window.gtkwin, Gtk.DialogFlags.DESTROY_WITH_PARENT, Gtk.MessageType.ERROR, Gtk.ButtonsType.YES_NO, dialog_message)
				def dialog_response(dialog, response_id):
					self.dialog_response = response_id
				dialog.connect("response", dialog_response)
				dialog.run()
				dialog.destroy()
				if self.dialog_response == -9:
					print("exit")
					self.clients_stop()
					self.main_window.gtkwin = None
					return
		self.networks.set_autosave(self.config["networks"]["autosave"], self.networks_file, self.main_window.log_list.add)
		
		if self.map is not None:
			self.networks.notify_add_list["map"] = self.add_network_to_map
			self.networks.notify_remove_list["map"] = self.map.remove_marker
			GLib.timeout_add(100, self.map.set_last_from_config)
		
		self.main_window.network_list.crypt_cache = self.crypt_cache
		
		GLib.timeout_add(500, self.queues_handler)
		GLib.timeout_add(300, self.queues_handler_networks)
		GLib.idle_add(self.networks.apply_filters)
		
	def init_map(self):
		if self.map_error is not None:
			self.map = None
		else:
			try:
				from .map import Map
			except SystemError:
				from map import Map
			user_agent = 'kismon/%s' % utils.get_version()
			self.map = Map(self.config["map"], user_agent=user_agent)
			self.map.set_last_from_config()
		
	def init_client_thread(self, server_id):
		server = self.config["kismet"]["servers"][server_id]
		self.client_threads[server_id] = ClientThread(server)
		self.client_threads[server_id].client.set_capabilities(
			('status', 'source', 'info', 'gps', 'bssid', 'bssidsrc', 'ssid'))
		if "--create-kismet-dump" in sys.argv:
			self.client_threads[server_id].client.enable_dump()
		
	def init_client_threads(self):
		server_id=0
		for server in self.config["kismet"]["servers"]:
			self.init_client_thread(server_id)
			server_id += 1
		
	def client_start(self, server_id):
		if server_id in self.client_threads and self.client_threads[server_id].is_running:
			self.client_stop(server_id)
		self.sources[server_id] = {}
		self.init_client_thread(server_id)
		if "--load-kismet-dump" in sys.argv:
			self.client_threads[server_id].client.load_dump(sys.argv[2])
		self.client_threads[server_id].start()
		
	def client_stop(self, server_id):
		if self.client_threads[server_id].client.connecting:
			# kill connecting sockets, don't wait for the timeout
			try:
				self.client_threads[server_id].client.s.shutdown(socket.SHUT_RDWR)
			except OSError:
				pass
		self.client_threads[server_id].stop()
		
	def clients_stop(self):
		for server_id in self.client_threads:
			self.client_stop(server_id)
		return True
		
	def queue_handler(self, server_id):
		server_name = self.config['kismet']['servers'][server_id]
		if self.main_window.gtkwin is None:
			return False
		
		thread = self.client_threads[server_id]
		if len(thread.client.error) > 0:
			for error in thread.client.error:
				self.main_window.log_list.add(server_name, error)
			thread.client.error = []
			self.main_window.server_tabs[server_id].server_switch.set_active(False)
			page_num = self.main_window.notebook.page_num(self.main_window.log_list.widget)
			self.main_window.notebook.set_current_page(page_num)
		
		#gps
		gps = None
		fix = None
		gps_queue = thread.get_queue("gps")
		while True:
			try:
				data = gps_queue.pop()
				if gps is None:
					gps = data
				if data["fix"] > 1:
					fix = (data["lat"], data["lon"])
					break
			except IndexError:
				break
		if gps is not None:
			self.main_window.server_tabs[server_id].update_gps_table(gps)
			if fix is not None and self.map is not None:
				server = "server%s" % (server_id + 1)
				if server_id == 0:
					self.map.set_position(fix[0], fix[1])
				else:
					self.map.add_marker(server, server, fix[0], fix[1])
				self.map.add_track(fix[0], fix[1], server_id)
		
		#status
		for data in thread.get_queue("status"):
			self.main_window.log_list.add(server_name, data["text"])
		
		#info
		info_queue = thread.get_queue("info")
		try:
			data = info_queue.pop()
			self.main_window.server_tabs[server_id].update_info_table(data)
		except IndexError:
			pass
			
		#source
		update = False
		for data in thread.get_queue("source"):
			uuid = data["uuid"]
			if uuid == "00000000-0000-0000-0000-000000000000":
				continue
			self.sources[server_id][uuid] = data
			
			update = True
		if update is True:
			self.main_window.server_tabs[server_id].update_sources_table(self.sources[server_id])
		
	def queues_handler(self):
		for server_id in self.client_threads:
			self.queue_handler(server_id)
		return True
		
	def queue_handler_networks(self, server_id):
		thread = self.client_threads[server_id]
		
		#ssid
		for data in thread.get_queue("ssid"):
			self.networks.add_ssid_data(data)
		
		#bssid
		bssids = {}
		for data in thread.get_queue("bssid"):
			mac = data["bssid"]
			self.networks.add_bssid_data(data, server_id)
			if mac in self.main_window.signal_graphs and "signal_dbm" not in thread.client.capabilities["bssidsrc"]:
				self.main_window.signal_graphs[mac].add_value(None, None, data["signal_dbm"], server_id)
			
			bssids[mac] = True
			
		#bssidsrc
		for data in thread.get_queue("bssidsrc"):
			if "signal_dbm" not in data or data["uuid"] not in self.sources[server_id]:
				continue
			
			mac = data["bssid"]
			if mac in self.main_window.signal_graphs:
				self.main_window.signal_graphs[mac].add_value(self.sources[server_id][data["uuid"]], data, data["signal_dbm"], server_id)
		
		if len(self.networks.notify_add_queue) > 0:
			self.networks.start_queue()
			if len(self.networks.notify_add_queue) > 500:
				self.networks.disable_refresh()
				self.main_window.networks_queue_progress()
		
		self.main_window.update_statusbar()
		
	def queues_handler_networks(self):
		for server_id in self.client_threads:
			self.queue_handler_networks(server_id)
		return True
		
	def quit(self):
		self.clients_stop()

		lat = self.map.osm.get_property("latitude")
		lon = self.map.osm.get_property("longitude")
		self.config["map"]["last_position"] = "%.6f/%.6f" % (lat, lon)

		while None in self.config['kismet']['servers']:
			self.config['kismet']['servers'].remove(None)
		self.config_handler.write()
		self.networks.save(self.networks_file, force=True)
		
	def add_network_to_map(self, mac):
		network = self.networks.get_network(mac)
		
		try:
			crypt = self.crypt_cache[network["cryptset"]]
		except KeyError:
			crypt = decode_cryptset(network["cryptset"], True)
			self.crypt_cache[network["cryptset"]] = crypt
		
		if "AES_CCM" in crypt or "AES_OCB" in crypt:
			color = "red"
		elif "WPA" in crypt:
			color = "orange"
		elif "WEP" in crypt:
			color = "yellow"
		else:
			color = "green"
		
		self.map.add_marker(mac, color, network["lat"], network["lon"])
Exemple #24
0
def networks():
	try:
		from .config import Config
		from .networks import Networks
	except SystemError:
		from config import Config
		from networks import Networks

	def dummy(bla):
		return
	test_data = get_client_test_data()[2]
	test_config = Config(None).default_config
	
	networks = Networks(test_config)
	networks.notify_add_list["map"] = dummy
	networks.notify_add_list["network_list"] = dummy
	networks.notify_remove_list["map"] = dummy
	networks.notify_remove_list["network_list"] = dummy
	for x in range(2):
		for data in test_data:
			if data is not None and data[0] == "bssid":
				networks.add_bssid_data(data[1], 0)
				data[1]["lasttime"] = data[1]["lasttime"] + 1
		for data in test_data:
			if data is not None and data[0] == "ssid":
				networks.add_ssid_data(data[1])
				data[1]["lasttime"] = data[1]["lasttime"] + 1
	
	tmp_csv_file = "%s%stest-%s.csv" % (tempfile.gettempdir(), os.sep, int(time.time()))
	tmp_csv = open(tmp_csv_file, "w")
	tmp_csv.write("""Network;NetType;ESSID;BSSID;Info;Channel;Cloaked;Encryption;Decrypted;MaxRate;MaxSeenRate;Beacon;LLC;Data;Crypt;Weak;Total;Carrier;Encoding;FirstTime;LastTime;BestQuality;BestSignal;BestNoise;GPSMinLat;GPSMinLon;GPSMinAlt;GPSMinSpd;GPSMaxLat;GPSMaxLon;GPSMaxAlt;GPSMaxSpd;GPSBestLat;GPSBestLon;GPSBestAlt;DataSize;IPType;IP;
1;infrastructure;asd;11:22:33:44:55:66;;3;No;WEP,WPA,PSK,AES-CCM;No;18.0;1000;25600;148;0;0;0;148;IEEE 802.11g;;Thu Jan 22 05:48:23 2009;Thu Jan 22 05:51:46 2009;0;65;-98;52.123456;13.123456;120.120003;0.000000;52.123456;13.123456;120.120003;2.934490;0.000000;0.000000;0.000000;0;None;0.0.0.0;""")
	tmp_csv.close()
	for x in range(2):
		networks.import_networks("csv", tmp_csv_file)
	networks.import_networks("netxml", "")
	
	networks_file = "%s%snetworks-%s.json" % (tempfile.gettempdir(), os.sep, int(time.time()))
	networks.save(networks_file)
	networks.load(networks_file)
	networks.import_networks("networks",networks_file)
	networks.apply_filters()
	networks.save(networks_file)
	networks.export_networks_netxml(tempfile.gettempdir() + os.sep + "test.netxml", networks.networks)
	networks.import_networks("netxml", tempfile.gettempdir() + os.sep + "test.netxml")
	networks.export_networks_kmz(tempfile.gettempdir() + os.sep + "test.kmz", networks.networks)
	
	return networks
Exemple #25
0
    game.new_episode()
    game_state = game.get_state()
    misc = game_state.game_variables  # [KILLCOUNT, AMMO, HEALTH]
    prev_misc = misc

    action_size = game.get_available_buttons_size()

    img_rows, img_cols = 64, 64
    # Convert image into Black and white
    img_channels = 4  # We stack 4 frames

    state_size = (img_rows, img_cols, img_channels)
    agent = REINFORCEAgent(state_size, action_size)

    agent.model = Networks.policy_reinforce(state_size, action_size,
                                            agent.learning_rate)

    # Start training
    GAME = 0
    t = 0
    max_life = 0  # Maximum episode life (Proxy for agent performance)

    # Buffer to compute rolling statistics
    life_buffer, ammo_buffer, kills_buffer = [], [], []

    for i in range(max_episodes):

        game.new_episode()
        game_state = game.get_state()
        misc = game_state.game_variables
        prev_misc = misc
Exemple #26
0
    game.new_episode()
    game_state = game.get_state()
    misc = game_state.game_variables  # [KILLCOUNT, AMMO, HEALTH]
    prev_misc = misc

    action_size = game.get_available_buttons_size()

    img_rows, img_cols = 64, 64
    img_channels = 3  # Color channel
    trace_length = 4  # Temporal Dimension

    state_size = (trace_length, img_rows, img_cols, img_channels)
    agent = DoubleDQNAgent(state_size, action_size, trace_length)

    agent.model = Networks.drqn(state_size, action_size, agent.learning_rate)
    agent.target_model = Networks.drqn(state_size, action_size,
                                       agent.learning_rate)

    s_t = game_state.screen_buffer  # 480 x 640
    s_t = preprocessImg(s_t, size=(img_rows, img_cols))

    is_terminated = game.is_episode_finished()

    # Start training
    epsilon = agent.initial_epsilon
    GAME = 0
    t = 0
    max_life = 0  # Maximum episode life (Proxy for agent performance)
    life = 0
    episode_buf = []  # Save entire episode
Exemple #27
0
    def __init__(self):
        user_dir = "%s%s.kismon%s" % (os.path.expanduser("~"), os.sep, os.sep)
        if not os.path.isdir(user_dir):
            print("Creating Kismon user directory %s" % user_dir)
            os.mkdir(user_dir)
        config_file = "%skismon.conf" % user_dir
        self.config_handler = Config(config_file)
        self.config_handler.read()
        self.config = self.config_handler.config

        self.sources = {}
        self.crypt_cache = {}
        self.networks = Networks(self.config)
        self.client_threads = {}
        self.init_client_threads()
        self.tracks = Tracks("%stracks.json" % user_dir)
        self.tracks.load()

        if "--disable-map" in sys.argv:
            self.map_error = "--disable-map used"
        else:
            self.map_error = check_osmgpsmap()

        if self.map_error is not None:
            self.map_error = "%s\nMap disabled" % self.map_error
            print(self.map_error, "\n")

        self.init_map()

        self.main_window = MainWindow(self.config, self.client_start,
                                      self.client_stop, self.map,
                                      self.networks, self.sources, self.tracks,
                                      self.client_threads)
        self.main_window.log_list.add("Kismon", "started")
        if self.map_error is not None:
            self.main_window.log_list.add("Kismon", self.map_error)

        self.networks_file = "%snetworks.json" % user_dir
        if os.path.isfile(self.networks_file):
            try:
                self.networks.load(self.networks_file)
            except:
                error = sys.exc_info()[1]
                print(error)
                dialog_message = "Could not read the networks file '%s':\n%s\n\nDo you want to continue?" % (
                    self.networks_file, error)
                dialog = Gtk.MessageDialog(self.main_window.gtkwin,
                                           Gtk.DialogFlags.DESTROY_WITH_PARENT,
                                           Gtk.MessageType.ERROR,
                                           Gtk.ButtonsType.YES_NO,
                                           dialog_message)

                def dialog_response(dialog, response_id):
                    self.dialog_response = response_id

                dialog.connect("response", dialog_response)
                dialog.run()
                dialog.destroy()
                if self.dialog_response == -9:
                    print("exit")
                    self.clients_stop()
                    self.main_window.gtkwin = None
                    return
        self.networks.set_autosave(self.config["networks"]["autosave"],
                                   self.networks_file,
                                   self.main_window.log_list.add)

        if self.map is not None:
            self.networks.notify_add_list["map"] = self.add_network_to_map
            self.networks.notify_remove_list["map"] = self.map.remove_marker
            GLib.timeout_add(100, self.map.set_last_from_config)

        self.main_window.network_list.crypt_cache = self.crypt_cache

        GLib.timeout_add(500, self.queues_handler)
        GLib.timeout_add(300, self.queues_handler_networks)
        GLib.idle_add(self.networks.apply_filters)
Exemple #28
0
    game.new_episode()
    game_state = game.get_state()
    misc = game_state.game_variables  # [KILLCOUNT, AMMO, HEALTH]
    prev_misc = misc

    action_size = game.get_available_buttons_size()

    img_rows, img_cols = 64, 64
    img_channels = 3  # Color Channels
    # Convert image into Black and white
    trace_length = 4  # RNN states

    state_size = (trace_length, img_rows, img_cols, img_channels)
    agent = A2CAgent(state_size, action_size, trace_length)
    agent.model = Networks.a2c_lstm(state_size, action_size, agent.value_size,
                                    agent.learning_rate)

    # Start training
    GAME = 0
    t = 0
    max_life = 0  # Maximum episode life (Proxy for agent performance)

    # Buffer to compute rolling statistics
    life_buffer, ammo_buffer, kills_buffer = [], [], []

    for i in range(max_episodes):

        game.new_episode()
        game_state = game.get_state()
        misc = game_state.game_variables
        prev_misc = misc
Exemple #29
0
class Irepa:

    def __init__(self, ocp_client, sampling_client):
        self.ocp_client = ocp_client
        self.sampling_client = sampling_client
        # Define an estimator
        self.estimator = Networks(NX, NU,
                                  x_range=np.array([X_MIN, X_MAX]),
                                  u_range=np.array([U_MIN, U_MAX]))

    def irepa_algo(self):
        tstart = time.time()

        # Initialize PRM with a sampling function,
        # a connect function and an heuristic distance
        prm = PRM(sample_fun=self.sample, connect_fun=self.connect,
                  hdistance=self.euclid)

        # Add NN_SAMPLE random nodes to the PRM
        prm.add_nodes(NB_SAMPLE, verbose=VERBOSE)
        # prm.densify_knn(NB_CONNECT)

        print('PRM initialized,', len(prm.graph.nodes), 'nodes')

        # Try to connect the nearest neighbors in the PRM
        # prm.connexify(None, NB_ATTEMPT_PER_CONNEX_PAIR)
        # prm.densify_longer_traj(NB_ATTEMPS_DENSIFY_LONGER, MIN_PATH_LEN)
        # prm.densify_longer_traj()

        i = 0
        stop = False
        astar_successes = np.zeros(IREPA_ITER)
        est_successes = np.zeros(IREPA_ITER)
        nb_attempts = np.zeros(IREPA_ITER)
        while not stop and i < IREPA_ITER:
            print((('--- IREPA %d ---' % i)+'---'*10+'\n')*3, time.ctime())

            # Expand PRM
            # -----------------
            # Pick a pair of unconnected nearest neighbors
            # if distance > visibility horizon: # equivalent to longer_traj
            #   p* <- shortest path in PRM
            #   E <- ACADO(init = p*)
            # else: # equivalent to densify knn
            #   E <- ACADO(init = 0 or estimator)
            print('\n\n\n######################')
            print('EXPAND')
            nb_astar, nb_est, nb_attempt = prm.expand(self.estimator,
                                                      first=(not bool(i)))
            astar_successes[i] = nb_astar
            est_successes[i] = nb_est
            nb_attempts[i] = nb_attempt
            print()
            print('Edge number:', len(prm.graph.edges))
            print('######################\n\n\n')

            stop = prm.is_fully_connected()

            # Build a dataset of subtrajectories
            # to train the estimator
            dset = Dataset(prm.graph)

            # Train the estimator on the dataset
            self.estimator.train(dset)

            # Test the estimator networks
            metrics = self.estimator.test(dset)
            print('\n##########')
            print('TEST ESTIMATOR')
            print('    value', metrics[0])
            print('    controls', metrics[1])
            print('    value', metrics[2])
            print('##########\n')

            # Improve the PRM where the estimator
            # gives better results
            print('\n\n\n######################')
            print('IMPROVE')
            stop = prm.improve(self.estimator)
            # returns False if estimator did better
            # than PRM

            i += 1

            # test
            print()
            print()
            print("\nEstimations at iteration", i)
            # test_traj_idx = random.sample(range(len(dataset.us)), 1)
            test_traj_idx = 18
            print('Dataset size:', len(dset.x1s), 'trajectories')
            x0 = dset.x1s[test_traj_idx, :].T
            x1 = dset.x2s[test_traj_idx, :].T
            print('x0 x1')
            print(x0)
            print(x1)
            print('Nets trajectories')
            X, U, V = self.estimator.trajectories(x0, x1)
            print('State trajectory')
            print(X)
            print('Control trajectory')
            print(U)
            print('Value')
            print(V)
            print('Euclidian value')
            print(self.euclid(x0, x1))

        plt.plot(np.arange(IREPA_ITER),
                 astar_successes, color='blue', label='astar')
        plt.plot(np.arange(IREPA_ITER),
                 est_successes, color='green', label='estimator')
        plt.plot(np.arange(IREPA_ITER),
                 nb_attempts, color='orange', label='attempts')
        plt.legend()
        plt.show()

        print('Saving estimator weights')
        # self.estimator.save()
        print('Saved')

        tend = time.time()

        print('\n##############')
        print('IREPA was executed in ', (tend-tstart)/60, 'minutes')
        print()

    def connect(self, s1, s2, init=None):
        """Tries to connect 2 sets by calling the Acado optimizer service.
        If init trajectory is passed, warm start of the optimization process"""
        print('Try to connect', s1, s2)

        if init is not None:
            X_init, U_init, V_init = init
            print('Using initialization, value:', V_init,
                  ', length:', X_init.shape[0])
            X_init = X_init.flatten()
            U_init = U_init.flatten()
        else:
            X_init, U_init, V_init = [], [], 0

        # res = opt_control_proxy(s1, s2, states, controls, cost)
        goal = OptControlGoal(
            s1, s2, X_init, U_init, V_init, NX, NU)
        res = self.ocp_client.send_goal(goal)
        self.ocp_client.wait_for_result()
        res = self.ocp_client.get_result()

        if res.success:
            print('  SUCCESS of optimization, time:', res.time,
                  'Path length:', len(res.states)//NX)
            X = np.array(res.states).reshape(len(res.states)//NX, NX)
            U = np.array(res.controls).reshape(len(res.controls)//NU, NU)
            return res.success, X, U, res.time
        else:
            print('  FAILURE of optimization')
            return res.success, [], [], 0

    def connect_test(self, s1, s2, init=None):
        success = random.randint(0, 1)

        trajlength = random.randint(10, 30)

        sxarr = np.array([s1[0], s2[0]])
        syarr = np.array([s1[1], s2[1]])
        sthetaarr = np.array([s1[2], s2[2]])

        Xx = np.linspace(s1[0], s2[0], trajlength)
        Xy = np.interp(Xx, sxarr, syarr)
        Xtheta = np.interp(Xx, sxarr, sthetaarr)

        X = np.vstack([Xx, Xy, Xtheta]).T
        U = X.copy()[:, 0:2]
        V = self.euclid(s1, s2) + 0.02*random.random()

        return success, X, U, V

    def sample(self, n):
        """n: number of samples to be returned"""
        resp = self.sampling_client(n)
        return np.array(resp.samples).reshape(n, int(len(resp.samples)/n))

    def euclid(self, s1, s2):
        #   print(s1, s2)
        return np.sqrt(sum((x1i - x2i)**2 for (x1i, x2i) in zip(s1, s2)))
Exemple #30
0
    max_episodes = 1000000

    game.new_episode()
    game_state = game.get_state()
    misc = game_state.game_variables  # [KILLCOUNT, AMMO, HEALTH]
    prev_misc = misc

    action_size = game.get_available_buttons_size()

    img_rows, img_cols = 64, 64
    # Convert image into Black and white
    img_channels = 4  # We stack 4 frames

    state_size = (img_rows, img_cols, img_channels)
    agent = A2CAgent(state_size, action_size)
    agent.actor = Networks.actor_network(state_size, action_size,
                                         agent.actor_lr)
    agent.critic = Networks.critic_network(state_size, agent.value_size,
                                           agent.critic_lr)

    # Start training
    GAME = 0
    t = 0
    max_life = 0  # Maximum episode life (Proxy for agent performance)

    # Buffer to compute rolling statistics
    life_buffer, ammo_buffer, kills_buffer = [], [], []

    for i in range(max_episodes):

        game.new_episode()
        game_state = game.get_state()
Exemple #31
0
	def __init__(self):
		user_dir = "%s%s.kismon%s" % (os.path.expanduser("~"), os.sep, os.sep)
		if not os.path.isdir(user_dir):
			print("Creating Kismon user directory %s" % user_dir)
			os.mkdir(user_dir)
		config_file = "%skismon.conf" % user_dir
		self.config_handler = Config(config_file)
		self.config_handler.read()
		self.config = self.config_handler.config
		
		self.sources = {}
		self.crypt_cache = {}
		self.networks = Networks(self.config)
		self.client_threads = {}
		self.init_client_threads()
		
		if "--disable-map" in sys.argv:
			self.map_error = "--disable-map used"
		else:
			self.map_error = check_osmgpsmap()
		
		if self.map_error is not None:
			self.map_error =  "%s\nMap disabled" % self.map_error
			print(self.map_error, "\n")
		
		self.init_map()
		
		self.main_window = MainWindow(self.config,
			self.client_start,
			self.client_stop,
			self.map,
			self.networks,
			self.sources,
			self.client_threads)
		self.main_window.log_list.add("Kismon", "started")
		if self.map_error is not None:
			self.main_window.log_list.add("Kismon", self.map_error)
		
		self.networks_file = "%snetworks.json" % user_dir
		if os.path.isfile(self.networks_file):
			try:
				self.networks.load(self.networks_file)
			except:
				error = sys.exc_info()[1]
				print(error)
				dialog_message = "Could not read the networks file '%s':\n%s\n\nDo you want to continue?" % (self.networks_file, error)
				dialog = Gtk.MessageDialog(self.main_window.gtkwin, Gtk.DialogFlags.DESTROY_WITH_PARENT, Gtk.MessageType.ERROR, Gtk.ButtonsType.YES_NO, dialog_message)
				def dialog_response(dialog, response_id):
					self.dialog_response = response_id
				dialog.connect("response", dialog_response)
				dialog.run()
				dialog.destroy()
				if self.dialog_response == -9:
					print("exit")
					self.clients_stop()
					self.main_window.gtkwin = None
					return
		self.networks.set_autosave(self.config["networks"]["autosave"], self.networks_file, self.main_window.log_list.add)
		
		if self.map is not None:
			self.networks.notify_add_list["map"] = self.add_network_to_map
			self.networks.notify_remove_list["map"] = self.map.remove_marker
			GLib.timeout_add(100, self.map.set_last_from_config)
		
		self.main_window.network_list.crypt_cache = self.crypt_cache
		
		GLib.timeout_add(500, self.queues_handler)
		GLib.timeout_add(300, self.queues_handler_networks)
		GLib.idle_add(self.networks.apply_filters)
Exemple #32
0
    print("Env has ", action_size, " actions.")
    measurement_size = 3  # [Battery, posion, food]
    timesteps = [1, 2, 4, 8, 16, 32]  # For long horizon: [4,8,16,32,64,128]
    goal_size = measurement_size * len(timesteps)

    img_rows, img_cols = 84, 84  #KOE: Think this is still correct.
    # Convert image into Black and white

    #KOETODO Not quite sure what happens here - I'm making images black/white, so what is the point?
    img_channels = 3  # KOE: If I want to change this, I have to also edit the frame stacking when forming s_t

    state_size = (img_rows, img_cols, img_channels)
    agent = DFPAgent(state_size, measurement_size, action_size, timesteps)

    agent.model = Networks.dfp_network(state_size, measurement_size, goal_size,
                                       action_size, len(timesteps),
                                       agent.learning_rate)

    #x_t = game_state.screen_buffer # 480 x 640
    #x_t = preprocessImg(initial_observation, size=(img_rows, img_cols))

    #np.save("input_output_examples/initial_obs.npy", initial_observation)
    #np.save("input_output_examples/preprocessed_obs.npy", x_t)
    #KOE: Preprocessing to get black and white.

    #KOE: Not sure what is going on here. 4 images in a row?
    #s_t = np.stack(([x_t]*4), axis=2) # It becomes 64x64x4
    s_t = initial_observation
    s_t = np.expand_dims(s_t, axis=0)  # 1x64x64x4

    #np.save("input_output_examples/stacked_obs.npy", s_t)
Exemple #33
0
    prev_misc = misc

    action_size = game.get_available_buttons_size()

    img_rows, img_cols = 64, 64
    # Convert image into Black and white
    img_channels = 4  # We stack 4 frames

    # C51
    num_atoms = 51

    state_size = (img_rows, img_cols, img_channels)
    agent = C51Agent(state_size, action_size, num_atoms)

    agent.model = Networks.value_distribution_network(state_size, num_atoms,
                                                      action_size,
                                                      agent.learning_rate)
    agent.target_model = Networks.value_distribution_network(
        state_size, num_atoms, action_size, agent.learning_rate)

    x_t = game_state.screen_buffer  # 480 x 640
    x_t = preprocessImg(x_t, size=(img_rows, img_cols))
    s_t = np.stack(([x_t] * 4), axis=2)  # It becomes 64x64x4
    s_t = np.expand_dims(s_t, axis=0)  # 1x64x64x4

    is_terminated = game.is_episode_finished()

    # Start training
    epsilon = agent.initial_epsilon
    GAME = 0
    t = 0