Exemple #1
0
def initialize():
    new_graph = gs.Graph()
    setup.build_graph(new_graph)

    a_star = AStar()
    a_star.search(new_graph, '15', '35')

    standard_links = a_star.standard_links
    transition_links = a_star.transition_links
    roundabout_links = a_star.roundabout_links
    chart.render_chart(roundabout_links, transition_links, standard_links)
Exemple #2
0
def main():
    # ヒューリスティクス関数のリスト
    heuristics_func_list = [
        calc_euclidean_distance, calc_manhattan_distance, all_0
    ]

    ## 標準入出力を利用し、対話的に設定を行う
    print("# 読み込むファイル名を入力してください(何も入力しない場合はmap.csvの読み込みを試みます)")
    f_name = input()
    if f_name == '': a_star = AStar()
    else: a_star = AStar(f_name=f_name)

    a_star.print_map()

    print("# スタートの位置を空白区切りで入力してください(X, Yの順)")
    start = tuple(map(int, input().split()))
    print("# ゴールの位置を空白区切りで入力してください(X, Yの順)")
    goal = tuple(map(int, input().split()))
    print("# 使用するヒューリスティクス関数を以下の番号から指定してください")
    for i in range(len(heuristics_func_list)):
        print("#     %d: %s" % (i, heuristics_func_list[i].__name__))
    func_index = int(input())

    route = a_star.search(start, goal, heuristics_func_list[func_index])

    print("route: ", end='')
    for x, y in route:
        print("[%s, %s], " % (x, y), end='')
    print("")
Exemple #3
0
class Environment(object):
    def __init__(self, dimension, agents, obstacles):
        self.dimension = dimension
        self.obstacles = obstacles

        self.agents = agents
        self.agent_dict = {}

        self.make_agent_dict()

        self.constraints = Constraints()
        self.constraint_dict = {}

        self.a_star = AStar(self)

    def get_neighbors(self, state):
        neighbors = []

        # Wait action
        n = State(state.time + 1, state.location)
        if self.state_valid(n):
            neighbors.append(n)
        # Up action
        n = State(state.time + 1, Location(state.location.x, state.location.y+1))
        if self.state_valid(n) and self.transition_valid(state, n):
            neighbors.append(n)
        # Down action
        n = State(state.time + 1, Location(state.location.x, state.location.y-1))
        if self.state_valid(n) and self.transition_valid(state, n):
            neighbors.append(n)
        # Left action
        n = State(state.time + 1, Location(state.location.x-1, state.location.y))
        if self.state_valid(n) and self.transition_valid(state, n):
            neighbors.append(n)
        # Right action
        n = State(state.time + 1, Location(state.location.x+1, state.location.y))
        if self.state_valid(n) and self.transition_valid(state, n):
            neighbors.append(n)
        return neighbors


    def get_first_conflict(self, solution):
        max_t = max([len(plan) for plan in solution.values()])
        result = Conflict()
        for t in range(max_t):
            for agent_1, agent_2 in combinations(solution.keys(), 2):
                state_1 = self.get_state(agent_1, solution, t)
                state_2 = self.get_state(agent_2, solution, t)
                if state_1.is_equal_except_time(state_2):
                    result.time = t
                    result.type = Conflict.VERTEX
                    result.location_1 = state_1.location
                    result.agent_1 = agent_1
                    result.agent_2 = agent_2
                    return result

            for agent_1, agent_2 in combinations(solution.keys(), 2):
                state_1a = self.get_state(agent_1, solution, t)
                state_1b = self.get_state(agent_1, solution, t+1)

                state_2a = self.get_state(agent_2, solution, t)
                state_2b = self.get_state(agent_2, solution, t+1)

                if state_1a.is_equal_except_time(state_2b) and state_1b.is_equal_except_time(state_2a):
                    result.time = t
                    result.type = Conflict.EDGE
                    result.agent_1 = agent_1
                    result.agent_2 = agent_2
                    result.location_1 = state_1a.location
                    result.location_2 = state_1b.location
                    return result
        return False

    def create_constraints_from_conflict(self, conflict):
        constraint_dict = {}
        if conflict.type == Conflict.VERTEX:
            v_constraint = VertexConstraint(conflict.time, conflict.location_1)
            constraint = Constraints()
            constraint.vertex_constraints |= {v_constraint}
            constraint_dict[conflict.agent_1] = constraint
            constraint_dict[conflict.agent_2] = constraint

        elif conflict.type == Conflict.EDGE:
            constraint1 = Constraints()
            constraint2 = Constraints()

            e_constraint1 = EdgeConstraint(conflict.time, conflict.location_1, conflict.location_2)
            e_constraint2 = EdgeConstraint(conflict.time, conflict.location_2, conflict.location_1)

            constraint1.edge_constraints |= {e_constraint1}
            constraint2.edge_constraints |= {e_constraint2}

            constraint_dict[conflict.agent_1] = constraint1
            constraint_dict[conflict.agent_2] = constraint2

        return constraint_dict

    def get_state(self, agent_name, solution, t):
        if t < len(solution[agent_name]):
            return solution[agent_name][t]
        else:
            return solution[agent_name][-1]

    def state_valid(self, state):
        return state.location.x >= 0 and state.location.x < self.dimension[0] \
            and state.location.y >= 0 and state.location.y < self.dimension[1] \
            and VertexConstraint(state.time, state.location) not in self.constraints.vertex_constraints \
            and (state.location.x, state.location.y) not in self.obstacles

    def transition_valid(self, state_1, state_2):
        return EdgeConstraint(state_1.time, state_1.location, state_2.location) not in self.constraints.edge_constraints

    def is_solution(self, agent_name):
        pass

    # function to add heuristics to loss function
    def admissible_heuristic(self, state, agent_name):
        goal = self.agent_dict[agent_name]["goal"]
        if(self.agent_dict[agent_name]['check'].location.y ==1):
            # return fabs(state.location.x - goal.location.x) + fabs(state.location.y - goal.location.y) 
        if(self.agent_dict[agent_name]['check'].location.x ==1):
            x = fabs(state.location.x - goal.location.x) + fabs(state.location.y - goal.location.y)
            x+= fabs(self.agent_dict[agent_name]["drop"].location.x - self.agent_dict[agent_name]["end"].location.x) + fabs(self.agent_dict[agent_name]["drop"].location.y - self.agent_dict[agent_name]["end"].location.y)
            # return x
        # return fabs(state.location.x - goal.location.x) + fabs(state.location.y - goal.location.y)
        return 0
    

    # check if at goal
    def is_at_goal(self, state, agent_name):
        # print(state.location)
        # print(goal_state.location)
        # if(state.location == self.agent_dict[agent_name]['goal'].location and state.location== self.agent_dict[agent_name]["pick"].location):
        #     print('works')
        # if(agent_name == 'agent1'):
        #     print(state.location)
        # if (state.location.x == self.agent_dict[agent_name]['goal'].location.x and state.location.y == self.agent_dict[agent_name]['goal'].location.y  and state.location== self.agent_dict[agent_name]["drop"].location ):
        #     # print('drop to end')
        #     self.agent_dict[agent_name]["start"] = self.agent_dict[agent_name]["drop"]
        #     self.agent_dict[agent_name]["goal"] = self.agent_dict[agent_name]["end"]

        # if (state.location.x == self.agent_dict[agent_name]['goal'].location.x and state.location.y == self.agent_dict[agent_name]['goal'].location.y and state.location== self.agent_dict[agent_name]["pick"].location):
        #     # print('pick to drop')
        #     self.agent_dict[agent_name]["start"] = self.agent_dict[agent_name]["pick"]
        #     self.agent_dict[agent_name]["goal"] = self.agent_dict[agent_name]["drop"]
        
        # if(agent_name == 'agent1'):
        #     print(state.location)
        goal_state = self.agent_dict[agent_name]["goal"]
        start_state = self.agent_dict[agent_name]["start"]
        #print(state.is_equal_except_time(goal_state))
        # if(state.is_equal_except_time(goal_state)):
        #     if(self.agent_dict[agent_name]['start'].location.x != self.agent_dict[agent_name]['goal'].location.x and self.agent_dict[agent_name]['start'].location.y != self.agent_dict[agent_name]['goal'].location.y ):
        #         self.agent_dict[agent_name]['start'].location.x = self.agent_dict[agent_name]['goal'].location.x
        #         self.agent_dict[agent_name]['start'].location.y = self.agent_dict[agent_name]['goal'].location.y
        #         self.agent_dict[agent_name]["goal"].location.x = 9
        #         self.agent_dict[agent_name]["goal"].location.y = 9
        # if(agent_name == "agent1" and state.location.x == self.agent_dict[agent_name]['goal'].location.x and state.location.y == self.agent_dict[agent_name]['goal'].location.y and state.location.x!=9 and state.location.y!=9):
        #     self.agent_dict[agent_name]['start'].location.x = self.agent_dict[agent_name]['goal'].location.x
        #     self.agent_dict[agent_name]['start'].location.y = self.agent_dict[agent_name]['goal'].location.y
        #     self.agent_dict[agent_name]["goal"].location.x = 9
        #     self.agent_dict[agent_name]["goal"].location.y = 9
        # if(state.is_equal_except_time(goal_state)):
        #     print("agent: ", agent_name)
        #     print("current-location - ",state.location )
        #     print("end-location - ",goal_state.location )
        #     print("start-location - ",start_state.location )
        #     print(" ")
        
        return state.is_equal_except_time(goal_state)

    def make_agent_dict(self):
        for agent in self.agents:
            start_state = State(0, Location(agent['start'][0], agent['start'][1]))
            pick_state = State(0, Location(agent['pick'][0], agent['pick'][1]))
            drop_state = State(0, Location(agent['drop'][0], agent['drop'][1]))
            end_state = State(0, Location(agent['end'][0], agent['end'][1]))
            goal_state = State(0, Location(agent['pick'][0], agent['pick'][1]))
            check = State(0,Location(agent['check'][0],agent['check'][1])) 
            # self.agent_dict.update({agent['name']:{'start':start_state, 'goal':goal_state}})
            self.agent_dict.update({agent['name']:{'start':start_state, 'pick':pick_state, 'drop':drop_state, 'end':end_state, 'goal':goal_state, "check":check}})

    def compute_solution(self):
        solution = {}
        for agent in self.agent_dict.keys():
            self.constraints = self.constraint_dict.setdefault(agent, Constraints())
            local_solution = self.a_star.search(agent)
            if not local_solution:
                return False
            solution.update({agent:local_solution})
        return solution

    def compute_solution_cost(self, solution):
        return sum([len(path) for path in solution.values()])
Exemple #4
0
class VolumeEnvironment(object):
    def __init__(self,
                 graph,
                 agents,
                 model_name,
                 volume_conflict_table,
                 min_cost_table=0):
        self.graph = graph
        self.volume_conflict_table = volume_conflict_table
        if min_cost_table != 0:
            self.min_cost_path_table = min_cost_table
        else:
            try:
                with open(model_name + '.mct', 'r') as f:
                    data = f.read()
                    if data != '':
                        self.min_cost_path_table = ast.literal_eval(data)
            except FileNotFoundError:
                dj = Dijkstra(graph)
                dj.traverse()
                self.min_cost_path_table = dj.paths
                with open(model_name + '.mct', 'w') as f:
                    f.write(str(self.min_cost_path_table))

        self.agents = agents
        self.agent_dict = {}
        self.make_agent_dict()

        self.constraints = Constraints()
        self.constraint_dict = {}

        self.a_star = AStar(self)

    def get_neighbors(self, state):
        neighbors = []

        # TODO
        # 应该怎么处理time step,究竟是按照根据路线长度推算的时间(length / speed)来
        # 还是将每次运动的time step都视为1处理
        # 如果按照推算时间来,等待这个动作应该等待多久? 地图中最长路线的行驶时间?地图路线平均长度/速度?
        # 我觉得按照推算时间来可以作为优化方向
        # 现在先按照每次加1处理好了

        # Wait
        n = State(state.time + 1, state.location)
        if self.state_valid(n):
            neighbors.append(n)

        # neighbors in the graph
        for neighbor in self.graph.neighbors(state.location.name):
            # neighbor is actually path
            n = State(state.time + 1, Location(neighbor.end))
            if self.state_valid(n) and self.transition_valid(
                    state, n) and self.volume_valid(n):
                neighbors.append(n)
        return neighbors

    def cost(self, start, end):
        return self.graph.cost(start, end)

    def get_first_conflict(self, solution):
        max_t = max([len(plan) for plan in solution.values()])
        result = Conflict()
        for t in range(max_t):
            for agent_1, agent_2 in combinations(solution.keys(), 2):
                state_1 = self.get_state(agent_1, solution, t)
                state_2 = self.get_state(agent_2, solution, t)
                if state_1.is_equal_except_time(state_2):
                    result.time = t
                    result.type = Conflict.VERTEX
                    result.location_1 = state_1.location
                    result.agent_1 = agent_1
                    result.agent_2 = agent_2
                    return result

            for agent_1, agent_2 in combinations(solution.keys(), 2):
                state_1 = self.get_state(agent_1, solution, t)
                state_2 = self.get_state(agent_2, solution, t)
                location1 = state_1.location
                location2 = state_2.location

                if location1 in self.volume_conflict_table.keys() and \
                    self.volume_conflict_table[location1].contains(location2) or \
                    location2 in self.volume_conflict_table.keys() and \
                    self.volume_conflict_table[location2].contains(location1):
                    result.time = t
                    result.type = Conflict.VOLUME
                    result.location_1 = location1
                    result.location_2 = location2
                    result.agent_1 = agent_1
                    result.agent_2 = agent_2
                    return result

            for agent_1, agent_2 in combinations(solution.keys(), 2):
                state_1a = self.get_state(agent_1, solution, t)
                state_1b = self.get_state(agent_1, solution, t + 1)

                state_2a = self.get_state(agent_2, solution, t)
                state_2b = self.get_state(agent_2, solution, t + 1)

                if state_1a.is_equal_except_time(
                        state_2b) and state_1b.is_equal_except_time(state_2a):
                    result.time = t
                    result.type = Conflict.EDGE
                    result.agent_1 = agent_1
                    result.agent_2 = agent_2
                    result.location_1 = state_1a.location
                    result.location_2 = state_1b.location
                    return result
        return False

    def create_constraints_from_conflict(self, conflict):
        constraint_dict = {}
        if conflict.type == Conflict.VERTEX:
            v_constraint = VertexConstraint(conflict.time, conflict.location_1)
            constraint = Constraints()
            constraint.vertex_constraints |= {v_constraint}
            constraint_dict[conflict.agent_1] = constraint
            constraint_dict[conflict.agent_2] = constraint

        elif conflict.type == Conflict.EDGE:
            constraint1 = Constraints()
            constraint2 = Constraints()

            e_constraint1 = EdgeConstraint(conflict.time, conflict.location_1,
                                           conflict.location_2)
            e_constraint2 = EdgeConstraint(conflict.time, conflict.location_2,
                                           conflict.location_1)

            constraint1.edge_constraints |= {e_constraint1}
            constraint2.edge_constraints |= {e_constraint2}

            constraint_dict[conflict.agent_1] = constraint1
            constraint_dict[conflict.agent_2] = constraint2

        elif conflict.type == Conflict.VOLUME:
            constraint1 = Constraints()
            constraint2 = Constraints()

            v_constraint1 = VolumeConstraint(conflict.time,
                                             conflict.location_1)
            v_constraint2 = VolumeConstraint(conflict.time,
                                             conflict.location_2)

            constraint1.volume_constraints |= {v_constraint1}
            constraint2.volume_constraints |= {v_constraint2}

            constraint_dict[conflict.agent_1] = constraint1
            constraint_dict[conflict.agent_2] = constraint2

        return constraint_dict

    def get_state(self, agent_name, solution, t):
        if t < len(solution[agent_name]):
            return solution[agent_name][t]
        else:
            return solution[agent_name][-1]

    def state_valid(self, state):
        return str(state.location.name) in self.graph.points.keys() \
            and VertexConstraint(state.time, state.location) not in self.constraints.vertex_constraints

    def transition_valid(self, state_1, state_2):
        return EdgeConstraint(
            state_1.time, state_1.location,
            state_2.location) not in self.constraints.edge_constraints

    def volume_valid(self, state):
        return VolumeConstraint(
            state.time,
            state.location) not in self.constraints.volume_constraints

    def is_solution(self, agent_name):
        pass

    def admissible_heuristic(self, state, agent_name):
        goal = self.agent_dict[agent_name]['goal']
        if state.is_equal_except_time(goal):
            return 0
        if state.location.name in self.min_cost_path_table.keys() and \
            goal.location.name in self.min_cost_path_table[state.location.name].keys():
            return self.min_cost_path_table[state.location.name][
                goal.location.name]['cost']
        else:
            print('some thing goes wrong')

    def is_at_goal(self, state, agent_name):
        goal_state = self.agent_dict[agent_name]['goal']
        return state.is_equal_except_time(goal_state)

    def make_agent_dict(self):
        for agent in self.agents:
            start_state = State(0, Location(agent['start']))
            goal_state = State(0, Location(agent['goal']))

            self.agent_dict.update(
                {agent['name']: {
                     'start': start_state,
                     'goal': goal_state
                 }})

    def compute_solution(self):
        solution = {}
        for agent in self.agent_dict.keys():
            self.constraints = self.constraint_dict.setdefault(
                agent, Constraints())
            local_solution = self.a_star.search(agent)
            if not local_solution:
                return False
            solution.update({agent: local_solution})
        return solution

    def compute_schedule(self, agent):
        solution = {}
        self.constraints = self.constraint_dict.setdefault(
            agent, Constraints())
        local_solution = self.a_star.search(agent)
        if not local_solution:
            return False
        solution.update({agent: local_solution})
        return solution

    def compute_solution_cost(self, solution):
        return sum([len(path) for path in solution.values()])
class Environment(object):
    def __init__(self, dimension, agents, obstacles):
        self.dimension = dimension
        self.obstacles = obstacles

        self.agents = agents
        self.agent_dict = {}

        self.make_agent_dict()

        self.constraints = Constraints()
        self.constraint_dict = {}

        self.a_star = AStar(self)

    def get_neighbors(self, state):
        neighbors = []

        # Wait action
        n = State(state.time + 1, state.location)
        if self.state_valid(n):
            neighbors.append(n)
        # Up action
        n = State(state.time + 1,
                  Location(state.location.x, state.location.y + 1))
        if self.state_valid(n) and self.transition_valid(state, n):
            neighbors.append(n)
        # Down action
        n = State(state.time + 1,
                  Location(state.location.x, state.location.y - 1))
        if self.state_valid(n) and self.transition_valid(state, n):
            neighbors.append(n)
        # Left action
        n = State(state.time + 1,
                  Location(state.location.x - 1, state.location.y))
        if self.state_valid(n) and self.transition_valid(state, n):
            neighbors.append(n)
        # Right action
        n = State(state.time + 1,
                  Location(state.location.x + 1, state.location.y))
        if self.state_valid(n) and self.transition_valid(state, n):
            neighbors.append(n)
        return neighbors

    def get_first_conflict(self, solution):
        max_t = max([len(plan) for plan in solution.values()])
        result = Conflict()
        for t in range(max_t):
            for agent_1, agent_2 in combinations(solution.keys(), 2):
                state_1 = self.get_state(agent_1, solution, t)
                state_2 = self.get_state(agent_2, solution, t)
                if state_1.is_equal_except_time(state_2):
                    result.time = t
                    result.type = Conflict.VERTEX
                    result.location_1 = state_1.location
                    result.agent_1 = agent_1
                    result.agent_2 = agent_2
                    return result

            for agent_1, agent_2 in combinations(solution.keys(), 2):
                state_1a = self.get_state(agent_1, solution, t)
                state_1b = self.get_state(agent_1, solution, t + 1)

                state_2a = self.get_state(agent_2, solution, t)
                state_2b = self.get_state(agent_2, solution, t + 1)

                if state_1a.is_equal_except_time(
                        state_2b) and state_1b.is_equal_except_time(state_2a):
                    result.time = t
                    result.type = Conflict.EDGE
                    result.agent_1 = agent_1
                    result.agent_2 = agent_2
                    result.location_1 = state_1a.location
                    result.location_2 = state_1b.location
                    return result
        return False

    def create_constraints_from_conflict(self, conflict):
        constraint_dict = {}
        if conflict.type == Conflict.VERTEX:
            v_constraint = VertexConstraint(conflict.time, conflict.location_1)
            constraint = Constraints()
            constraint.vertex_constraints |= {v_constraint}
            constraint_dict[conflict.agent_1] = constraint
            constraint_dict[conflict.agent_2] = constraint

        elif conflict.type == Conflict.EDGE:
            constraint1 = Constraints()
            constraint2 = Constraints()

            e_constraint1 = EdgeConstraint(conflict.time, conflict.location_1,
                                           conflict.location_2)
            e_constraint2 = EdgeConstraint(conflict.time, conflict.location_2,
                                           conflict.location_1)

            constraint1.edge_constraints |= {e_constraint1}
            constraint2.edge_constraints |= {e_constraint2}

            constraint_dict[conflict.agent_1] = constraint1
            constraint_dict[conflict.agent_2] = constraint2

        return constraint_dict

    def get_state(self, agent_name, solution, t):
        if t < len(solution[agent_name]):
            return solution[agent_name][t]
        else:
            return solution[agent_name][-1]

    def state_valid(self, state):
        return 0 <= state.location.x <= self.dimension[0] \
            and 0 <= state.location.y <= self.dimension[0] \
            and VertexConstraint(state.time, state.location) not in self.constraints.vertex_constraints \
            and (state.location.x, state.location.y) not in self.obstacles

    def transition_valid(self, state_1, state_2):
        return EdgeConstraint(
            state_1.time, state_1.location,
            state_2.location) not in self.constraints.edge_constraints

    def is_solution(self, agent_name):
        pass

    def admissible_heuristic(self, state, agent_name):
        goal = self.agent_dict[agent_name]["goal"]
        return fabs(state.location.x -
                    goal.location.x) + fabs(state.location.y - goal.location.y)

    def is_at_goal(self, state, agent_name):
        goal_state = self.agent_dict[agent_name]["goal"]
        return state.is_equal_except_time(goal_state)

    def make_agent_dict(self):
        for agent in self.agents:
            start_state = State(0,
                                Location(agent['start'][0], agent['start'][1]))
            goal_state = State(0, Location(agent['goal'][0], agent['goal'][1]))

            self.agent_dict.update(
                {agent['name']: {
                     'start': start_state,
                     'goal': goal_state
                 }})

    def compute_solution(self):
        solution = {}
        for agent in self.agent_dict.keys():
            self.constraints = self.constraint_dict.setdefault(
                agent, Constraints())
            local_solution = self.a_star.search(agent)
            if not local_solution:
                return False
            solution.update({agent: local_solution})
        return solution

    def compute_solution_cost(self, solution):
        return sum([len(path) for path in solution.values()])
        # Create and store the results of Greedy Best First with H2
        gbfs_h2 = GreedyBestFirst(puzzle, heuristic="h2")
        gbfs_h2.search()

        if gbfs_h2.solution_found:
            results['gbfs_h2']['solution_path_list'].append(len(gbfs_h2.solution_path))
            results['gbfs_h2']['cost_list'].append(gbfs_h2.total_cost)
            results['gbfs_h2']['exec_time_list'].append(gbfs_h2.exec_time)
        else:
            results['gbfs_h2']['no_solution_count'] += 1
        results['gbfs_h2']['search_path_list'].append(len(gbfs_h2.search_path))


        # Create and store the results of A* with H1
        astar_h1 = AStar(puzzle, heuristic="h1")
        astar_h1.search()

        if astar_h1.solution_found:
            results['astar_h1']['solution_path_list'].append(len(astar_h1.solution_path))
            results['astar_h1']['cost_list'].append(astar_h1.total_cost)
            results['astar_h1']['exec_time_list'].append(astar_h1.exec_time)
        else:
            results['astar_h1']['no_solution_count'] += 1
        results['astar_h1']['search_path_list'].append(len(astar_h1.search_path))


        # Create and store the results of A* with H2
        astar_h2 = AStar(puzzle, heuristic="h2")
        astar_h2.search()

        if astar_h2.solution_found:
Exemple #7
0
import unittest
import graph_structure as gs
from a_star import AStar
import setup_nodes_and_links as setup

# Necessery instances.
new_graph = gs.Graph()
a_star = AStar()
setup.build_graph(new_graph)

result1 = a_star.search(new_graph, '3', '40')
result2 = a_star.search(new_graph, '3', '70')


class TestAStar(unittest.TestCase):
    def test_search(self):
        self.assertTrue(result1)
        self.assertFalse(result2)
        self.assertRaises(ValueError, a_star.search, new_graph, None, '40')
        self.assertRaises(ValueError, a_star.search, new_graph, 3, '40')
        self.assertRaises(ValueError, a_star.search, 'new_graph', '3', '40')


if __name__ == '__main__':
    unittest.main()