Beispiel #1
0
def get_single_task(mongodb, x, y, s, d, t):
    print("get_single_task({}, {}, ({}, {}, {}))".format(x, y, s, d, t))
    start = time_print(0)

    if not os.path.exists(os.path.join(os.getcwd(), 'data')):
        raise OSError("Must first download data, see README.md")
    data_dir = os.path.join(os.getcwd(), 'data')

    file_path = os.path.join(data_dir, '{}-{}-G.pkl'.format(x, y))

    print("CHECK1")
    with open(file_path, 'rb') as handle:
        nx_g = pickle.load(handle)

    print("CHECK2")
    Qdf = generator.generate_single_query(nx_g, s, d, t)
    print(Qdf)

    write_queries_to_mongodb(mongodb, Qdf)
    a = generator.gen_SG(nx_g, Qdf)
    Qdf = Qdf.assign(og=a)

    task_list = generator.generate_tasks(Qdf)

    elapsed = time_print(0) - start
    print("Run time: {} ms".format(elapsed))

    if sorted:
        task_list.sort(key=lambda x: x.step, reverse=False)
    return task_list
Beispiel #2
0
    def process_tasks(self):
        while True:
            if len(self._tasks) > 0:
                for task in self._tasks:
                    t_dict = task.get_json()

                    if t_dict['next_node'] is None and t_dict['step'] != '000':
                        continue

                    if utils.time_print(
                            0) - t_dict['inquiry_time'] >= GLOBAL_VARS.TIMEOUT:
                        continue

                    route = self._route_extractor.find_route(t_dict)
                    if route is None:
                        continue

                    if all(route):
                        r = route[1]
                        r_int = [int(x) for x in r]
                        t_dict['route'] = r_int
                        t_dict['travel_time'] = route[0]
                        topic = utils.add_destination(
                            GLOBAL_VARS.RESPONSE_TO_BROKER, self._client_id)

                        if DEBUG == 1:
                            utils.print_log("Done {} done with route".format(
                                t_dict['_id']))
                            utils.print_log("Sending {} to {}:rsu-{}".format(
                                t_dict['_id'],
                                topic.split("/")[-1],
                                GLOBAL_VARS.WORKER[topic.split("/")[-1]]))
                            utils.print_log(
                                "Removing {} from task queue and appending to processed_tasks"
                                .format(t_dict['_id']))

                        self._processed_tasks.append(t_dict['_id'])
                        self.send(topic, json.dumps(t_dict))

                        next_rsu = t_dict['next_rsu']
                        if next_rsu:
                            rsu_rsu_dict = {}
                            next_step = str(int(t_dict['step']) + 1).zfill(3)
                            rsu_rsu_dict['_id'] = "{}{}{}".format(
                                t_dict['parsed_id'], next_step,
                                t_dict['steps'])
                            rsu_rsu_dict['next_node'] = t_dict['route'][-1]
                            topic = utils.add_destination(
                                GLOBAL_VARS.RSU_TO_RSU, next_rsu)
                            if DEBUG == 1:
                                utils.print_log(
                                    "Sending {} update to {}:rsu-{}".format(
                                        rsu_rsu_dict['_id'],
                                        topic.split("/")[-1],
                                        utils.get_worker_from_topic(topic)))
                            self.send(topic, json.dumps(rsu_rsu_dict))

                        self.remove_task(t_dict['_id'])

            time.sleep(0.1)
Beispiel #3
0
    def logging_task(self):
        while self._LOG_FLAG:
            log_dict = {}
            log_dict['time'] = utils.time_print(0)
            log_dict['queued_tasks'] = [
                task._id for task in self._tasks
                if task._id not in self._processed_tasks
            ]
            log_dict['total_processed'] = list(set(self._processed_tasks))
            log_dict['timed_out'] = []
            if (len(self._tasks) + len(self._processed_tasks)) != 0:
                utils.write_log(self._log_file, log_dict)

            time.sleep(GLOBAL_VARS.LOG_RATE)
Beispiel #4
0
    def parse_topic(self, msg):
        t_arr = msg.topic.split("/")

        # For logging only
        if msg.topic == GLOBAL_VARS.START_LOGGING:
            self._processed_tasks = []
            self._logging_task = threading.Thread(target=self.logging_task,
                                                  args=())
            self._logging_task.start()

        if msg.topic == GLOBAL_VARS.STOP_LOGGING:
            utils.print_log("Stopping logging and removing tasks.")
            self._tasks = []
            self._LOG_FLAG = False
            log_dict = {}
            log_dict['time'] = utils.time_print(0)
            log_dict['queued_tasks'] = []
            log_dict['total_processed'] = list(set(self._processed_tasks))
            log_dict['timed_out'] = [
                task._id for task in self._tasks
                if task._id not in self._processed_tasks
            ]
            utils.write_log(self._log_file, log_dict)

        if GLOBAL_VARS.BROKER_TO_RSU in msg.topic:
            rsu = t_arr[-1]
            if rsu == self._client_id:
                data = json.loads(msg.payload)
                if DEBUG == 1:
                    utils.print_log("RSU receives: {}".format(data['_id']))
                self.verify_append_task(Route_Task(data))

        if GLOBAL_VARS.RSU_TO_RSU in msg.topic:
            rsu = t_arr[-1]
            if rsu == self._client_id:
                data = json.loads(msg.payload)
                self.update_subtask(data)
                if len(self._tasks) > 0:
                    if DEBUG == 1:
                        utils.print_log("Updated tasks:")
                        [
                            utils.print_log("\t{}:{}".format(
                                t._id, t.next_node)) for t in self._tasks
                        ]
                    pass

        return True
    def generate_mongo_tasks_entry(self, tasks):
        for task in tasks:
            print(task)
            data = task.get_json()
            data['inquiry_time'] = utils.time_print(int)
            data['allocation_time'] = None
            data['processed_time'] = None
            data['state'] = GLOBAL_VARS.TASK_STATES["UNSENT"]
            data['next_node'] = None
            data['route'] = None
            data['travel_time'] = None
            data['rsu_assigned_to'] = None
            data['retry_count'] = 0
            data['next_rsu'] = None
            t_id = data['_id']

            _DB = self._mongodb_c.get_db("admin")
            found = _DB["tasks"].count_documents({"_id": t_id})
            if found == 0:
                t_id = self._mongodb_c.insert("tasks", data)
Beispiel #6
0
def generate_tasks(Qdf):
    task_list = []
    for _, row in Qdf.iterrows():
        t_id = row.t_id
        og = copy.copy(row['og'])
        s = row.s
        d = row.d
        t = str(row.t) + ':' + str(row.t_m)

        if len(og) >= 2:
            nodes = []
            nodes.append(s)
            nodes.extend([None] * (len(og) - 2))
            nodes.append(d)
            og.append(None)

            #             # [t] * len(og) -> Assigns time variable to each pair
            ids = [
                "{}{}{}".format(t_id,
                                str(i).zfill(3),
                                str(len(og) - 2).zfill(3))
                for i in range(len(og) - 1)
            ]
            pairs = zip(ids, nodes, og, og[1:], [t] * len(og))
        elif len(og) == 1:
            id_ = "{}{}{}".format(t_id, str(0).zfill(3), str(0).zfill(3))
            pairs = zip([id_], [s], [d], [og[0]], [t])
            pass

        labels = ["_id", "node", "gridA", "gridB", "time_window"]
        for p in pairs:
            task_json = dict(zip(labels, p))
            task_json['state'] = GLOBAL_VARS.TASK_STATES["UNSENT"]
            task_json['next_node'] = None
            task_json['inquiry_time'] = utils.time_print(0)
            task_json['next_rsu'] = None
            task_json['rsu_assigned_to'] = None
            task_json['route'] = None
            task_list.append(Task(task_json))
        task_list.extend(list(pairs))
    return task_list
Beispiel #7
0
def send_tasks(i, ts):
    print("Sending chunk: {} of size {}".format(i, len(ts)))
    mqttc = MyMQTTClass()
    mqttc.connect()
    mqttc.open()

    # task_list = specify_task(task_list, "b7c475b6")
    for t in ts:
        # for t in enumerate(task_list[0:1]):
        # for t in enumerate(task_list[1:2]):
        payload = {}
        payload['time_sent'] = time_print(0)
        # payload['data'] = t.get_json()
        payload['data'] = t.__dict__
        if __debug__:
            print(payload['data'])
        data = json.dumps(payload)
        mqttc.send(GLOBAL_VARS.QUERY_TO_BROKER, data)
        time.sleep(0.02)

    mqttc.close()
    def generate_mongo_payload(self, message):
        print("generate_mongo_payload:", len(message))
        _m = utils.decode(message)
        _d = json.loads(_m)
        data = _d['data']
        data['inquiry_time'] = utils.time_print(int)
        data['allocation_time'] = None
        data['processed_time'] = None
        data['state'] = GLOBAL_VARS.TASK_STATES["UNSENT"]
        data['next_node'] = None
        data['route'] = None
        data['travel_time'] = None
        data['rsu_assigned_to'] = None
        data['retry_count'] = 0
        data['next_rsu'] = None
        t_id = data['_id']

        _DB = self._mongodb_c.get_db("admin")
        found = _DB["tasks"].count_documents({"_id": t_id})
        if found == 0:
            t_id = self._mongodb_c.insert("tasks", data)
Beispiel #9
0
def write_queries_to_mongodb(mongodb, query_df):
    print("write_queries_to_mongodb()")
    # print(query_df.head())
    # df = query_df[['t_id', 's', 'd', 't', 'r']].copy()
    query_df = query_df.rename(columns={"t_id": "_id", "r": "initial_route"})
    query_df['query_time'] = time_print(0)
    query_df['total_processed_time'] = None
    query_df['final_route'] = None
    query_df['total_travel_time'] = None
    # query_df['json'] = query_df.apply(lambda x: x.to_json(), axis=1)
    # print(new_df)

    # OK, but failing if record already exists.
    records = json.loads(query_df.T.to_json()).values()
    for record in records:
        t_id = record['_id']

        if mongodb._db.queries.count_documents({"_id": t_id}) == 0:
            mongodb._db.queries.insert(record)
        else:
            print("{} already exists.".format(t_id))

    print("Finished writing")
Beispiel #10
0
def get_tasks(mongodb, x, y, queries, sorted=True):
    print("get_tasks({}, {}, {})".format(x, y, queries))
    start = time_print(0)

    if not os.path.exists(os.path.join(os.getcwd(), 'data')):
        raise OSError("Must first download data, see README.md")
    data_dir = os.path.join(os.getcwd(), 'data')

    file_path = os.path.join(data_dir, '{}-{}-G.pkl'.format(x, y))

    with open(file_path, 'rb') as handle:
        nx_g = pickle.load(handle)

    number_of_queries = queries

    # res = generator.generate_single_query(nx_g, 1286, 1471, 22)
    # pprint(res)

    file_path = os.path.join(
        data_dir, '{}-queries-for-{}-{}.pkl'.format(number_of_queries, x, y))
    if not os.path.exists(file_path):
        Qdf = generator.generate_query(nx_g, number_of_queries)
        Qdf.to_pickle(file_path)
    else:
        Qdf = pd.read_pickle(file_path)

    write_queries_to_mongodb(mongodb, Qdf)

    # Generate random minute values
    random.seed(100)
    minutes = random.choices(range(0, 59), k=number_of_queries)

    # Generate hours based on 'time-zone'
    dead_time = [21, 22, 23, 0, 1, 2, 3, 4, 5]
    morning_rush = [6, 7, 8, 9]
    evening_rush = [15, 16, 17, 18]
    working_hour = [10, 11, 12, 13, 14]
    random_hours = range(0, 24)

    if GLOBAL_VARS.TIME_ZONE == 'DEAD_TIME':
        Qdf['t'] = random.choices(dead_time, k=number_of_queries)
    elif GLOBAL_VARS.TIME_ZONE == 'MORNING_RUSH':
        Qdf['t'] = random.choices(morning_rush, k=number_of_queries)
    elif GLOBAL_VARS.TIME_ZONE == 'EVENING_RUSH':
        Qdf['t'] = random.choices(evening_rush, k=number_of_queries)
    elif GLOBAL_VARS.TIME_ZONE == 'WORKING_HOUR':
        Qdf['t'] = random.choices(working_hour, k=number_of_queries)
    else:
        Qdf['t'] = random.choices(random_hours, k=number_of_queries)

    Qdf.insert(4, "t_m", minutes, True)

    a = generator.gen_SG(nx_g, Qdf)
    Qdf = Qdf.assign(og=a)

    task_list = generator.generate_tasks(Qdf)

    elapsed = time_print(0) - start
    print("Run time: {} ms".format(elapsed))

    if sorted:
        task_list.sort(key=lambda x: x.step, reverse=False)
    return task_list
    def parse_topic(self, msg):
        t_arr = msg.topic.split("/")

        if msg.topic == GLOBAL_VARS.SIMULATED_SINGLE_QUERY_TO_BROKER:
            data = json.loads(utils.decode(msg.payload))
            print("Broker receives : {}".format(data))
            tasks = generator.get_single_task(self._mongodb_c, data['x'],
                                              data['y'], data['s'], data['d'],
                                              data['t'])
            print(tasks)
            print(type(tasks))

            self.generate_mongo_tasks_entry(tasks)

            start = utils.time_print(0)
            utils.print_log("Starting sub-task allocation.")
            for task in tasks:
                self.subtask_allocation(GLOBAL_VARS.NEIGHBOR_LEVEL, task)
            for task in tasks:
                self.assign_next_rsu(task)
            elapsed = utils.time_print(0) - start
            utils.print_log("Total allocation time: {} ms".format(elapsed))

            # HACK: Add sleep so I can get the allocation time data
            # time.sleep(5)

            self._tasks = tasks
            self._log_flag_once = False

            self._task_start_time = utils.time_print(0)
            # The placement here is purely for debugging
            self._collect_tasks = threading.Thread(
                target=self.compile_tasks_by_id, args=())
            self._collect_tasks.start()

            self.send(GLOBAL_VARS.START_LOGGING, utils.encode("START"))

            pass

        if msg.topic == GLOBAL_VARS.SIMULATED_QUERY_TO_BROKER:
            data = json.loads(utils.decode(msg.payload))
            print("Broker receives : {}".format(data))

            tasks = generator.get_tasks(self._mongodb_c,
                                        data['x'],
                                        data['y'],
                                        data['number_of_queries'],
                                        sorted=True)

            self.generate_mongo_tasks_entry(tasks)

            start = utils.time_print(0)
            utils.print_log("Starting sub-task allocation.")
            for task in tasks:
                self.subtask_allocation(GLOBAL_VARS.NEIGHBOR_LEVEL, task)
            for task in tasks:
                self.assign_next_rsu(task)
            elapsed = utils.time_print(0) - start
            utils.print_log("Total allocation time: {} ms".format(elapsed))

            # # HACK: Add sleep so I can get the allocation time data
            # time.sleep(5)

            self._tasks = tasks
            self._log_flag_once = False

            self._task_start_time = utils.time_print(0)
            # The placement here is purely for debugging
            self._collect_tasks = threading.Thread(
                target=self.compile_tasks_by_id, args=())
            self._collect_tasks.start()

            self.send(GLOBAL_VARS.START_LOGGING, utils.encode("START"))

        if msg.topic == GLOBAL_VARS.QUERY_TO_BROKER:
            print("Broker receives : {}".format(str(msg.payload)))
            self.generate_mongo_payload(msg.payload)

        if GLOBAL_VARS.RESPONSE_TO_BROKER in msg.topic:
            rsu = t_arr[-1]
            if rsu in list(GLOBAL_VARS.RSUS.values()):
                # update mongodb entry as Responded (2)
                data = json.loads(utils.decode(msg.payload))

                route = data['route']
                travel_time = data['travel_time']
                utils.print_log("worker-{} responded with :{}".format(
                    rsu, data['_id']))
                self._mongodb_c.update_one(
                    "tasks", data['_id'], {
                        "state": GLOBAL_VARS.TASK_STATES["RESPONDED"],
                        "processed_time": utils.time_print(int),
                        "travel_time": travel_time,
                        "route": route,
                        "next_node": route[-1]
                    })
                if DEBUG == 1:
                    utils.print_log("Updated: {}".format(data['_id']))

        self.start_unsent_tasks_thread()
    def compile_tasks_by_id(self):
        while True:
            if utils.time_print(
                    int) - self._task_start_time >= GLOBAL_VARS.TIMEOUT:
                res = self._mongodb_c._db.tasks.find({
                    "state": {
                        "$eq": GLOBAL_VARS.TASK_STATES["SENT"]
                    }
                }).sort([("_id", 1)])

                timedout = []

                for task in res:
                    parsed_id = task['parsed_id']
                    if parsed_id not in timedout:
                        timedout.append(parsed_id)
                    self._mongodb_c._db.tasks.update_many(
                        {"parsed_id": parsed_id}, {
                            '$set': {
                                'state': GLOBAL_VARS.TASK_STATES['TIMEOUT']
                            }
                        })
                    self._mongodb_c._db.queries.update_one(
                        {"_id": parsed_id}, {
                            '$set': {
                                'final_route': "ERROR",
                                'total_travel_time': "ERROR",
                                'total_processed_time': "ERROR"
                            }
                        })

                [utils.print_log("{} timedout.".format(t)) for t in timedout]
                self.send(GLOBAL_VARS.STOP_LOGGING, utils.encode("STOP"))
                self._mongodb_c.save_collection_to_json('queries')
                self._mongodb_c.save_collection_to_json('tasks')
                break

            count = self._mongodb_c._db.queries.count_documents(
                {"final_route": None})
            if count == 0:
                continue

            res = self._mongodb_c._db.queries.find({"final_route": None})
            for r in res:
                parsed_id = r['_id']
                count = self._mongodb_c._db.tasks.count_documents({
                    "parsed_id":
                    parsed_id,
                    "state":
                    GLOBAL_VARS.TASK_STATES["RESPONDED"]
                })
                temps = self._mongodb_c._db.tasks.find_one({
                    "parsed_id":
                    parsed_id,
                    "state":
                    GLOBAL_VARS.TASK_STATES["RESPONDED"]
                })

                if not temps:
                    continue

                task_steps = int(temps['steps']) + 1
                if task_steps == count:
                    tasks = self._mongodb_c._db.tasks.find({
                        "parsed_id":
                        parsed_id,
                        "state":
                        GLOBAL_VARS.TASK_STATES["RESPONDED"]
                    })
                    route = []
                    total_travel_time = 0
                    utils.print_log("Collecting {}".format(parsed_id))
                    for task in tasks:
                        route.extend(task['route'])
                        last_processing_time = task['processed_time']
                        total_travel_time = total_travel_time + task[
                            'travel_time']

                    if route and total_travel_time > 0:
                        self._mongodb_c._db.queries.update_one(
                            {'_id': parsed_id}, {
                                '$set': {
                                    'final_route': utils.f7(route),
                                    'total_processed_time':
                                    last_processing_time,
                                    'total_travel_time': total_travel_time
                                }
                            })

                        self._mongodb_c._db.tasks.update_many(
                            {
                                "parsed_id": parsed_id,
                                "state": GLOBAL_VARS.TASK_STATES["RESPONDED"]
                            }, {
                                '$set': {
                                    'state':
                                    GLOBAL_VARS.TASK_STATES['COLLECTED']
                                }
                            })
    def subtask_allocation(self, nlevel, subtask):
        data = subtask.get_json()
        gridA = data['gridA']
        gridB = data['gridB']

        if isinstance(gridA, int):
            optimal_rsu = gridB
        else:
            optimal_rsu = gridA

        if DEBUG == 1:
            print("Checking for: {}".format(optimal_rsu))

        if nlevel == 0:
            self._mongodb_c._db.tasks.update_one({"_id": data['_id']}, {
                '$set': {
                    'rsu_assigned_to': optimal_rsu,
                    'allocation_time': utils.time_print(int)
                }
            })
            return

        if nlevel > 0:
            r = geo_utils.get_rsu_by_grid_id(self._rsu_arr, optimal_rsu)
            nn = geo_utils.get_neighbors_level(self._rsu_arr, r.get_idx(),
                                               nlevel)

            nn.insert(0, r.get_idx())
            found = False
            candidate_rsus = []

            # Arranges the neighbors by distance from the most optimal grid in question
            # DEBUG: This will cause some additional delays
            nn = geo_utils.sort_idx_by_distance(GLOBAL_VARS.X_AXIS,
                                                GLOBAL_VARS.Y_AXIS,
                                                r.get_idx(), nn)
            nn.sort(key=lambda x: x[1])
            nn = [n[0] for n in nn]

            for n in nn:
                rsu = self._rsu_arr[n]
                candidate_rsus.append(rsu)

                res, gid = rsu.add_task(
                    self._nxg,
                    self._rsu_arr,
                    subtask,
                    use_sub_grids=GLOBAL_VARS.USE_SUB_GRIDS)
                if res:
                    found = True
                    self._mongodb_c._db.tasks.update_one(
                        {"_id": data['_id']}, {
                            '$set': {
                                'rsu_assigned_to': gid,
                                'allocation_time': utils.time_print(int)
                            }
                        })
                    # utils.print_log("Assigned to: {}".format(gid))
                    break
                '''
                If not ok, move to the next one (keep which has lowest number)
                '''
            # Get RSU with least number of queue
            if not found:
                if DEBUG == 1:
                    print("Not found by looking, must force...")
                # NOTE: Just shuffling so there is a chance that subtasks will be assigned to different RSUs and not just in the order they come in (if they are both tied for minimum queue lengths)
                # IDEA: Check if this has an effect
                # Right now as the last resort, it appends to the RSU with least queue length
                # If i don't randomize this, then it will assign to the least distance, which is just the most optimal one (may cause delay)
                random.shuffle(candidate_rsus)
                candidate_rsus = sorted(candidate_rsus,
                                        key=lambda rsu: len(rsu.queue),
                                        reverse=False)

                if DEBUG == 1:
                    [print(r) for r in candidate_rsus]
                    if candidate_rsus[0]._sub_grids:
                        [print(r) for r in candidate_rsus[0]._sub_grids]
                        print("\n")

                _, gid = candidate_rsus[0].add_task_forced(
                    subtask, use_sub_grids=GLOBAL_VARS.USE_SUB_GRIDS)
                # utils.print_log("Forced to: {}".format(gid))
                self._mongodb_c._db.tasks.update_one({"_id": data['_id']}, {
                    '$set': {
                        'rsu_assigned_to': gid,
                        'allocation_time': utils.time_print(int)
                    }
                })

            return