Exemple #1
0
    def run(self, agent):
        logger.info("Setup agent")
        global global_agent
        global_agent.set_agent(agent)

        # Start
        self.reset()
    def __init__(self):
        signal.signal(signal.SIGINT, self._handle_exit_signal)
        signal.signal(signal.SIGTERM, self._handle_exit_signal)
        self.base_dir = None  # will be set by _install_dependencies
        self._install_dependencies()
        # original port:
        self.port = find_available_port(config.qopt_port)

        # start calcite + java server
        logger.info("port = " + str(self.port))
        self._start_java_server()

        context = zmq.Context()
        #  Socket to talk to server
        logger.info("Going to connect to calcite server")
        self.socket = context.socket(zmq.PAIR)
        self.socket.connect("tcp://drl-fw:" + str(self.port))
        self.reward_normalization = config.qopt_reward_normalization

        # TODO: describe spaces
        self.graph = None
        self.action_space = None
        # here, we specify the edge to choose next to calcite using the
        # position in the edge array
        # this will be updated every time we use _observe
        self._edge_pos_map = None

        # will store _min_reward / _max_reward for each unique query
        # will map query: (_min_reward, _max_reward)
        self.reward_mapper = {}
        # these values will get updated in reset.
        self._min_reward = None
        self._max_reward = None

        # self.query_set = self._send("getCurQuerySet")
        self.attr_count = int(self._send("getAttrCount"))

        self.current_query = None

        # setup space with the new graph
        self._setup_space()

        # more experimental stuff

        # original graph used to denote state
        self.orig_graph = None
        if config.qopt_viz:
            self.viz_ep = 0
            self.viz_output_dir = "./visualization/"
            self.viz_title_tmp = "query: {query}, ep: {ep}, step: {step}"

        self.queries_initialized = False
Exemple #3
0
    def setup_mahimahi(self):
        logger.info("Mahimahi setup")

        env_path = park.__path__[0] + "/envs/congestion_control"
        traces_path = os.path.join(env_path, 'traces/')

        if not os.path.exists(traces_path):
            sh.run("mkdir -p {}".format(traces_path), shell=True)
            wget.download(
                'https://www.dropbox.com/s/qw0tmgayh5d6714/cooked_traces.zip?dl=1',
                out=env_path)
            with zipfile.ZipFile(env_path + '/cooked_traces.zip',
                                 'r') as zip_f:
                zip_f.extractall(traces_path)

            sh.run("rm -f {}".format(env_path + '/cooked_traces.zip'),
                   shell=True)
            sh.run("cp /usr/share/mahimahi/traces/* {}".format(traces_path),
                   shell=True)

            # const traces
            write_const_mm_trace(os.path.join(traces_path, "const12.mahi"), 12)
            write_const_mm_trace(os.path.join(traces_path, "const24.mahi"), 24)
            write_const_mm_trace(os.path.join(traces_path, "const36.mahi"), 36)
            write_const_mm_trace(os.path.join(traces_path, "const48.mahi"), 48)
            write_const_mm_trace(os.path.join(traces_path, "const60.mahi"), 60)
            write_const_mm_trace(os.path.join(traces_path, "const72.mahi"), 72)
            write_const_mm_trace(os.path.join(traces_path, "const84.mahi"), 84)
            write_const_mm_trace(os.path.join(traces_path, "const96.mahi"), 96)

        # Setup link
        logger.debug(park.param.config)
        self.linkDelay = park.param.config.cc_delay
        self.uplinkTraceFile = os.path.join(traces_path,
                                            park.param.config.cc_uplink_trace)
        self.downlinkTraceFile = os.path.join(
            traces_path, park.param.config.cc_downlink_trace)

        # Setup workload generator
        self.workloadGeneratorSender = "iperf -c 100.64.0.1 -Z ccp -P 1 -i 2 -t {}".format(
            park.param.config.cc_duration)
        self.workloadGeneratorReceiver = "iperf -s -w 16m > /dev/null"
        self.workloadGeneratorKiller = "sudo pkill -9 iperf"

        with open("sender.sh", "w") as fout:
            fout.write(self.workloadGeneratorSender + "\n")

        sh.Popen("chmod a+x sender.sh", shell=True).wait()
Exemple #4
0
    def setup_ccp_shim(self):
        cong_env_path = park.__path__[0] + "/envs/congestion_control"

        # ccp-kernel
        if not os.path.exists(cong_env_path + "/ccp-kernel"):
            logger.info("Downloading ccp-kernel")
            sh.run(
                "git clone --recursive https://github.com/ccp-project/ccp-kernel.git {}"
                .format(cong_env_path + "/ccp-kernel"),
                shell=True)

        try:
            sh.check_call("lsmod | grep ccp", shell=True)
            sh.run(
                "make && sudo ./ccp_kernel_unload && sudo ./ccp_kernel_load ipc=0",
                cwd=cong_env_path + "/ccp-kernel",
                shell=True)
        except sh.CalledProcessError:
            logger.info('Loading ccp-kernel')
            sh.run("make && sudo ./ccp_kernel_load ipc=0",
                   cwd=cong_env_path + "/ccp-kernel",
                   shell=True)

        try:
            logger.info("Building ccp shim")
            sh.check_call("cargo build --release",
                          cwd=cong_env_path + "/park",
                          shell=True)
        except sh.CalledProcessError:
            logger.info("Installing rust")
            sh.check_call("sudo bash rust-install.sh",
                          cwd=cong_env_path,
                          shell=True)
            logger.info("Building ccp shim")
            sh.check_call("~/.cargo/bin/cargo build --release",
                          cwd=cong_env_path + "/park",
                          shell=True)
    def _install_dependencies(self):
        """
         - clone OR pull latest version from query-optimizer github repo
            - TODO: auth?
            - set path appropriately to run java stuff
         - use docker to install, and start postgres
        """
        logger.info("installing dependencies for query optimizer")
        self.base_dir = park.__path__[0]
        # self._install_if_needed("docker")
        # self._install_if_needed("mvn")
        # self._install_if_needed("java")

        # # set up the query_optimizer repo
        # try:
        #     qopt_path = os.environ["QUERY_OPT_PATH"]
        # except:
        #     # if it has not been set, then set it based on the base dir
        #     qopt_path = self.base_dir + "/query-optimizer"
        #     # if this doesn't exist, then git clone this
        #     if not os.path.exists(qopt_path):
        #         print("going to clone query-optimizer library")
        #         cmd = "git clone https://github.com/parimarjan/query-optimizer.git"
        #         p = sp.Popen(cmd, shell=True,
        #                      cwd=self.base_dir)
        #         p.wait()
        #         print("cloned query-optimizer library")
        # print("query optimizer path is: ", qopt_path)

        # # TODO: if psql -d imdb already set up locally, then do not use docker
        # # to set up postgres. Is this really useful, or should we just assume
        # # docker is always the way to go?

        # # TODO: print plenty of warning messages: going to start docker,
        # # docker's directory should have enough space - /var/lib/docker OR
        # # change it manually following instructions at >>>> .....

        # docker_dir = qopt_path + "/docker"
        # docker_img_name = "pg"
        container_name = "docker-pg"
        # # docker build
        # docker_bld = "docker build -t {} . ".format(docker_img_name)
        # p = sp.Popen(docker_bld, shell=True, cwd=docker_dir)
        # p.wait()
        # print("building docker image {} successful".format(docker_img_name))
        # time.sleep(2)
        # # start / or create new docker container
        # # Note: we need to start docker in a privileged mode so we can clear
        # # cache later on.
        # docker_run = "docker run --name {} -p \
        # 5432:5432 --privileged -d {}".format(container_name, docker_img_name)
        # docker_start_cmd = "docker start docker-pg || " + docker_run
        # p = sp.Popen(docker_start_cmd, shell=True, cwd=docker_dir)
        # p.wait()
        # print("starting docker container {} successful".format(container_name))
        # time.sleep(2)

        # check_container_cmd = "docker ps | grep {}".format(container_name)
        # process = sp.Popen(check_container_cmd, shell=True)
        # ret_code = process.wait()
        # if ret_code != 0:
        #     print("something bad happened when we tried to start docker container")
        #     print("got ret code: ", ret_code)
        #     env.clean()

        # time.sleep(2)
        # need to ensure that we psql has started in the container. If this is
        # the first time it is starting, then pg_restore could take a while.
        import psycopg2
        while True:
            try:
                conn = psycopg2.connect(host="docker-pg",
                                        port=5432,
                                        dbname="imdb",
                                        user="******",
                                        password="******")
                conn.close()
                break
            except psycopg2.OperationalError as ex:
                print("Connection failed: {0}".format(ex))
                print("""If this is the first time you are starting the
                        container, then pg_restore is probably taking its time.
                        Be patient. Will keep checking for psql to be alive.
                        Can take upto few minutes.
                        """)
                time.sleep(30)

        self.container_name = container_name
Exemple #6
0
    def __init__(self):
        signal.signal(signal.SIGINT, self._handle_exit_signal)
        signal.signal(signal.SIGTERM, self._handle_exit_signal)
        self.base_dir = None  # will be set by _install_dependencies
        self._install_dependencies()
        # original port:
        self.port = find_available_port(config.qopt_port)

        # start calcite + java server
        logger.info("port = " + str(self.port))
        self._start_java_server()

        context = zmq.Context()
        #  Socket to talk to server
        logger.info("Going to connect to calcite server")
        self.socket = context.socket(zmq.PAIR)
        self.socket.connect("tcp://localhost:" + str(self.port))
        self.reward_normalization = config.qopt_reward_normalization

        # TODO: describe spaces
        self.graph = None
        self.action_space = None
        # here, we specify the edge to choose next to calcite using the
        # position in the edge array
        # this will be updated every time we use _observe
        self._edge_pos_map = None

        self.query_set = self._send("getCurQuerySet")
        self.attr_count = int(self._send("getAttrCount"))

        # FIXME: make this nicer?
        # set dependent flags together:
        # if config.qopt_runtime_reward:
        # config.qopt_only_final_reward = 1
        # config.qopt_reward_normalization = ""

        # FIXME: these variable don't neccessarily belong here / should be
        # cleaned up
        # TODO: figure this out using the protocol too. Or set it on the java
        # side using some protocol.
        # self.only_final_reward = config.qopt_only_final_reward

        # will store _min_reward / _max_reward for each unique query
        # will map query: (_min_reward, _max_reward)
        self.reward_mapper = {}
        # these values will get updated in reset.
        self._min_reward = None
        self._max_reward = None
        self.current_query = None

        # setup space with the new graph
        self._setup_space()

        # more experimental stuff

        # original graph used to denote state
        self.orig_graph = None
        if config.qopt_viz:
            self.viz_ep = 0
            self.viz_output_dir = "./visualization/"
            self.viz_title_tmp = "query: {query}, ep: {ep}, step: {step}"
Exemple #7
0
    def __init__(self):
        # check if the operating system is ubuntu
        if sys.platform != 'linux' and sys.platform != 'linux2':
            raise OSError(
                'Congetsion control environment only tested on Linux.')

        if os.getuid() == 0:
            raise OSError('Please run as non-root')

        logger.info("Install Dependencies")
        sh.run(
            "sudo apt install -y git build-essential autoconf automake capnproto iperf",
            stdout=sh.PIPE,
            stderr=sh.PIPE,
            shell=True)
        sh.run("sudo add-apt-repository -y ppa:keithw/mahimahi",
               stdout=sh.PIPE,
               stderr=sh.PIPE,
               shell=True)
        sh.run("sudo apt-get -y update",
               stdout=sh.PIPE,
               stderr=sh.PIPE,
               shell=True)
        sh.run("sudo apt-get -y install mahimahi",
               stdout=sh.PIPE,
               stderr=sh.PIPE,
               shell=True)
        sh.run("sudo sysctl -w net.ipv4.ip_forward=1",
               stdout=sh.PIPE,
               stderr=sh.PIPE,
               shell=True)

        sh.run("sudo rm -rf /tmp/park-ccp", shell=True)
        self.setup_ccp_shim()
        self.setup_mahimahi()

        # state_space
        #
        # biggest BDP = 1200 packets = 1.8e6 Bytes
        #
        # bytesAcked         UInt64; at most one BDP
        # bytesMisordered    UInt64; at most one BDP
        # ecnBytes           UInt64; at most one BDP
        # packetsAcked       UInt64; at most one BDP / MSS
        # packetsMisordered  UInt64; at most one BDP / MSS
        # ecnPackets         UInt64; at most one BDP / MSS
        # loss               UInt64; at most one BDP / MSS
        # timeout            Bool;
        # bytesInFlight      UInt64; at most one BDP
        # packetsInFlight    UInt64; at most one BDP / MSS
        # bytesPending       UInt64; ignore
        # rtt                UInt64; [0ms, 300ms]
        # rin                UInt64; [0 Byte/s, 1GByte/s]
        # rout               UInt64; [0 Byte/s, 1GByte/s]
        self.observation_space = Box(
            low=np.array([0] * 14),
            high=np.array([
                1.8e6, 1.8e6, 1.8e6, 1200, 1200, 1200, 1200, 1, 1.8e6, 1200, 0,
                300e3, 1e9, 1e9
            ]),
        )

        # action_space
        # cwnd = [0, 4800 = 4BDP]
        # rate = [0, 2e9 = 2 * max rate]
        self.action_space = Box(low=np.array([0, 0]),
                                high=np.array([4800, 2e9]))

        # kill old shim process
        sh.run("sudo pkill -9 park", shell=True)
        sh.Popen(self.workloadGeneratorKiller, shell=True).wait()
        sleep(1.0)  # pkill has delay

        # start rlagent rpc server that ccp talks to
        logger.info("Starting RPC server thread")
        global global_agent
        t = threading.Thread(target=run_forever, args=("*:4539", global_agent))
        t.daemon = True
        t.start()

        # start ccp shim
        logger.info("Starting CCP shim process")
        cong_env_path = park.__path__[0] + "/envs/congestion_control"
        sh.Popen("sudo " + os.path.join(
            cong_env_path, "park/target/release/park 2> /dev/null"),
                 shell=True)
        sleep(1.0)  # spawn has delay

        # start workload generator receiver
        sh.Popen(self.workloadGeneratorReceiver, shell=True)

        logger.info("Done with init")
Exemple #8
0
def run_forever(addr, agent):
    server = capnp.TwoPartyServer(addr, bootstrap=CcpRlAgentImpl(agent))
    logger.info("Started RL agent in RPC server thread")
    server.run_forever()