/
taskalloc.py
executable file
·121 lines (107 loc) · 3.59 KB
/
taskalloc.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
#! /usr/bin/env python2
# coding=utf8
from pyddl import Action, Domain, Problem, planner, neg
# from planner import *
import re
import numpy as np
def problem(distancemat, agentcount, verbose=True):
agents = [i for i in range(agentcount)]
trashcount = len(distancemat) - agentcount
trash_cans = [i + agentcount for i in range(trashcount)]
agentDict = dict([(i,0) for i in agents])
domain = Domain((
Action(
"Check",
# Send agent A1 from T1 to pick up trash at T2
parameters=(
("agent", "A1"),
("trash_can", "T1"),
("trash_can", "T2"),
),
preconditions=(
("at", "A1", "T1"),
("unchecked", "T2"),
),
effects=(
# A1 is no longer at T1
neg(("at", "A1", "T1")),
# A1 is now at T2
("at", "A1", "T2"),
# T2 is checked.
("checked", "T2"),
neg(("unchecked", "T2")),
)
),
))
problem = Problem(
domain,
{
# List of all agents
"agent": agents,
# list of trash cans. Note: Starting positions are
# treated as trash cans.
"trash_can": [i for i in range(len(distancemat))],
},
init=[("at", i, i) for i in agents] + \
[("unchecked", i) for i in trash_cans],
goal=[("checked", i) for i in trash_cans]
)
# Heuristics based on the agent that has travelled the farthest
def heuristic(state):
# copy prepared table
agent2cost = agentDict.copy()
# calculate cost
for a in [action.sig for action in state.plan()]:
agent2cost[a[1]] += distancemat[a[2]][a[3]]
cost = max(agent2cost.values())
#heur = ?
return cost
return planner(problem,
heuristic=heuristic,
verbose=verbose)
def get_plan(distancemat, agentcount , verbose=False, pop=False):
"""
Returns a dictionary of with allocated trash cans for every agent.
<pop> sets ordering of targets. True -> reversed order, for pop()-support.
"""
assert(agentcount > 0)
plan = problem(distancemat, agentcount, verbose)
if plan is None:
return dict()
# (agent, from, to)
# ('a', 1, 3)
# turn _grounded somthing into a tuple
tupledPlan = [act.sig for act in plan]
# list of all agents
agents = [i for i in range(agentcount)]
# initialize
alloc_dict = dict([(i, list()) for i in agents])
agent2time = dict([(i, 0) for i in agents])
# populate and calculate distance each agent has travelled
for act in tupledPlan:
agent2time[act[1]] += distancemat[act[2]][act[3]]
if pop:
alloc_dict[act[1]].insert(0, act[3])
else:
alloc_dict[act[1]].append(act[3])
time = max(agent2time.values())
totalDistance = sum(agent2time.values())
alloc_dict["time"] = time
alloc_dict["distance"] = totalDistance
if verbose:
print("time: %d,\tdistance: %d" % (time, totalDistance))
for act in plan:
print(act)
return alloc_dict
if __name__ == "__main__":
# Distances between starting positions and trash cans.
distancemat = [
[0, 10, 2, 4, 5, 7],
[10, 0, 5, 6, 6, 3],
[2, 5, 0, 4, 4, 5],
[4, 6, 4, 0, 2, 3],
[5, 6, 4, 2, 0, 2],
[7, 3, 5, 3, 2, 0]]
plan = get_plan(distancemat, 2, True)
print("-------")
print(plan)