Ejemplo n.º 1
0
    def __init__(self, robot):
        self.body = planning_program.JointGroup()
        self.body.angular_weight = 0.5
        self.body.linear_weight = 50.0
        self.joint_group = robot.get('whole_body', robot.Items.JOINT_GROUP)
        self.buf = tf2_ros.Buffer()
        self._lis = tf2_ros.TransformListener(self.buf)
        self._broadcaster = tf2_ros.TransformBroadcaster()
        self.planner = planner.Planner(TEST_FOLDA, CAT)
        self._tf = Tf()

        try:
            self.srv_record_start = rospy.ServiceProxy("rosbag_record",
                                                       RosbagRecord)
            self.srv_record_stop = rospy.ServiceProxy("rosbag_record_stop",
                                                      RosbagStop)
            self.srv_marker = rospy.ServiceProxy("marker_data_saver", Empty)
            self.jbr = JointBagReader()
        except:
            pass
        self.rosbag_number = 0
        self.record_flag = False
        self.rosbag_time = rospy.Time(0)
        self.record_number = 0
        self.number = 0
        self.date = "act_bag_{}"
        self.image_number = 0
        self.image_time = 0
Ejemplo n.º 2
0
 def __init__(self):
     '''
 '''
     self.rightArm = arm.Arm('right')
     self.leftArm = arm.Arm('left')
     self.rightArm.sim.env.SetViewer('qtcoin')
     self.valid = True
     self.graspPlanner = planner.Planner('right')
Ejemplo n.º 3
0
def main():
    ''' Execute
    '''
    import os.path
    import planner as pln
    plan = pln.Planner(params())
    plan.log(os.path.splitext(os.path.basename(__file__))[0] + '.log')
    plan.gpx(os.path.splitext(os.path.basename(__file__))[0] + '.gpx')
 def __init__(self):
     '''
 '''
     self.rightArm = arm.Arm('right')
     self.leftArm = arm.Arm('left')
     self.rightArm.sim.env.SetViewer('qtcoin')
     self.valid = True
     self.graspPlanner = planner.Planner('right')
     rightArmPose = self.rightArm.get_pose()
     self.currentGoal = rightArmPose.position.array  # Set goal as default to current starting position
Ejemplo n.º 5
0
 def __init__(self):
     self.set_param(None)
     self.planner = planner.Planner(self._learned_folda,
                                    self._learned_category)
     rospy.Service("/gp_hsmm/trajectory/make_trajectory", TrajectoryOrder,
                   self.make_trajectory)
     self.buf = tf2_ros.Buffer()
     self._lis = tf2_ros.TransformListener(self.buf)
     print "#########"
     rospy.loginfo("set up")
     print "#########"
Ejemplo n.º 6
0
 def __init__(self, graph, **kwargs):
     self.graph = graph
     self.horizon = kwargs["horizon"]
     self.speed = kwargs["speed"]
     self.rate_funcs = kwargs["rate_funcs"]
     self.entropies = kwargs["entropies"]
     self.eps_time = kwargs["eps_time"]
     self.visualizer = kwargs.get("visualizer", None)
     self.agent_heap = self.init_agent_heap(kwargs["agents"])
     self.times = self.init_times()
     self.costs = self.init_costs()
     self.pl = planner.Planner(self.graph,
                               times=self.times,
                               rate_funcs=self.rate_funcs,
                               entropies=self.entropies,
                               costs=self.costs,
                               horizon=self.horizon,
                               eps_time=self.eps_time)
Ejemplo n.º 7
0
    def __init__(self, which='webcam'):
        self.which = which
        if which == 'planner':
            self.network = self.video = planner.Planner()
            self.displayer = None

        elif which == 'webcam':
            self.network = NoNetwork()
            self.video = Webcam()
            self.displayer = VideoDisplayer(self.video)
            
        elif which == 'drone':
            self.network = Network()
            self.video = Video()
            self.displayer = VideoDisplayer(self.video)
            
        else:
            raise "Unrecognized option: {}".format(which)

        self.stop()
        self.network.sendrecv('streamon')
Ejemplo n.º 8
0
data_7_2 = torch.randn(1, 64, 27, 27)
conv_7_2 = nn.Conv2d(64, 256, 1, padding=0)
conv_7_3 = nn.Conv2d(64, 256, 3, padding=1)

data_8_1 = torch.randn(1, 512, 13, 13)
conv_8_1 = nn.Conv2d(512, 64, 1, padding=0)

data_8_2 = torch.randn(1, 64, 13, 13)
conv_8_2 = nn.Conv2d(64, 256, 1, padding=0)
conv_8_3 = nn.Conv2d(64, 256, 3, padding=1)

data_9 = torch.randn(1, 512, 13, 13)
conv_9 = nn.Conv2d(512, 1000, 1, padding=0)

pnn = pln.Planner(writeup=True, codegen=True, realsim=True)


def task(name, pnn, data, module):
    global hw_spec
    print('[Front-end] \x1b[32m' + str(name) + ' start...\x1b[0m\n')
    pnn.set_data(data=data,
                 module=module,
                 hw_spec=hw_spec,
                 writeup_file='vgg19.csv',
                 testcase=name)
    pnn.run('../../build/cnn_planner')
    print('[Front-end] \x1b[32m' + str(name) + ' finish!\x1b[0m\n')


start_time = time.time()
Ejemplo n.º 9
0
import planner as pln
import hardware as hw
import dataset
import models

import torch.nn
import torch

import time

simd_cfg_path = '../../hwcfg/simd.json'
hw_spec = hw.HardwareSpec(simd_cfg_path)

data = torch.Tensor(1, 3, 256, 256)
resnet50 = models.resnet50()

pnn = pln.Planner()

start_time = time.time()

for name, module in resnet50.named_modules():
    if isinstance(module, torch.nn.Sequential):
        continue
    pnn.set_data(data=data, module=module, hw_spec=hw_spec, layer_name=name)
    data = pnn.run('../../build')

elapsed_time = time.time() - start_time
print('[Front-end] Elapsed time: ' +
      time.strftime('%H hours %M min %S sec', time.gmtime(elapsed_time)))
Ejemplo n.º 10
0
import commands
import planner

record = None
# source = "table2.avi"
source = 1

if type(source) == int:
    fl = frame_loader.CameraFrameLoader(source, record)
else:
    fl = frame_loader.VideoFrameLoader(source, record)

md = mine_detection.MineDetection()
rd = robot_detection.RobotDetection()
c = commands.Commander()
p = planner.Planner(md, rd, c)

try:
    while True:
        p.wait()
except KeyboardInterrupt:
    print("great")

# keep looping
while True:
    try:
        frame = fl.get_frame_cropped().copy()
        frame, robot_mask, cleared_mask, rd_mask = rd.find_circles(frame)
        mine_mask = md.get_mines_mask(frame, robot_mask * cleared_mask)
        frame = md.get_mine_positions(frame)
        frame = p.run(frame)
Ejemplo n.º 11
0
            merged_dict[key] = d1[key]
        else:
            merged_dict[key] = d2[key]
    return merged_dict


#################################################################
if __name__ == '__main__':

    import planner
    import dot_plan

    args = parse()

    # make a policy given domain and problem
    policy = planner.Planner(args.domain, args.problem, args.planners,
                             args.rank, args.verbose)

    # transform the produced policy into a contingency plan and print it
    plan = policy.plan()
    path = policy.get_paths(policy.plan())[0]
    policy.print_plan(plan=path)

    #############################
    # get possible concurrent and joint executions
    single_executions, joint_executions = concurrent_executions(
        policy, path, args.agents)

    if args.verbose:
        print(color.fg_yellow('----------------------------------'))
        print(color.fg_yellow('-- possible concurrent executions'))
        print(color.fg_yellow('----------------------------------'))
Ejemplo n.º 12
0
def make_plan(domain,
              problem,
              planners=['optic-clp'],
              agents=[],
              temporal_actions=[],
              rank=False,
              verbose=False):

    import planner
    import dot_plan
    import dot_ma_plan

    # make a policy given domain and problem
    policy = planner.Planner(domain=domain,
                             problem=problem,
                             planners=planners,
                             rank=rank,
                             verbose=verbose)

    # transform the produced policy into a contingency plan and print it
    plan = policy.plan()
    policy.print_plan(plan=plan)

    #############################
    # get possible concurrent and joint executions
    single_executions, joint_executions = concurrent_executions(
        policy, plan, agents)

    if verbose:
        print(color.fg_yellow('----------------------------------'))
        print(color.fg_yellow('-- possible concurrent executions'))
        print(color.fg_yellow('----------------------------------'))
        for i, single_execution in enumerate(single_executions):
            print(color.fg_yellow('-- execution_{}'.format(str(i))))
            for level, (actions, outcomes) in sorted(single_execution.items()):
                # for level, (actions, outcomes) in sorted(merge_dict(single_execution,joint_executions).items()):
                print('{} : {} {}'.format(str(level),
                                          ' '.join(map(str,
                                                       actions)), outcomes))

        print(color.fg_yellow('-- joint executions'))
        for level, (actions, outcomes) in joint_executions.items():
            print('{} : {} {}'.format(str(level), ' '.join(map(str, actions)),
                                      outcomes))

    #############################
    # refine and separate the concurrent executions into concurrent clusters
    main_list = concurrent_subplans(policy, plan, agents)

    if verbose:
        print(color.fg_yellow('\n----------------------------------'))
        print(color.fg_yellow('-- actual multi-agent plan'))
        print(color.fg_yellow('----------------------------------'))

        for i, (key, subplans) in enumerate(main_list.items()):
            print(
                color.fg_yellow(
                    '---------------------------------- block_{}'.format(
                        str(i))))
            for j, subplan in enumerate(subplans):
                if (len(subplans)) > 1:
                    print(color.fg_beige('-- subplan_{}'.format(str(j))))
                for k, (actions, outcomes) in subplan.items():
                    print('{} -- {} {}'.format(k, ' '.join(map(str, actions)),
                                               outcomes))

    #############################
    # convert the plan inti a concurrent plan in json files
    plan_json_file, actions_json_file = json_ma_plan(policy, agents,
                                                     temporal_actions)

    print(color.fg_yellow('-- plan_json_file:') + plan_json_file)
    print(color.fg_yellow('-- actions_json_file:') + actions_json_file)
    print(
        color.fg_yellow('-- graphical plan_json_file:') + plan_json_file +
        '.dot')
    os.system('lua lua/json_multiagent_plan.lua %s &' % plan_json_file)
    if verbose: os.system('xdot %s.dot &' % plan_json_file)

    # generate a graph of the policy as a dot file in graphviz
    dot_file = dot_plan.gen_dot_plan(plan=plan,
                                     domain_file=domain,
                                     problem_file=problem)
    print(color.fg_yellow('-- plan in dot file: ') + dot_file)
    # transform the plan into a parallel plan
    dot_file, tred_dot_file = dot_ma_plan.parallel_plan(policy,
                                                        verbose=verbose)
    print(color.fg_yellow('-- precedence graph: ') + dot_file)
    print(color.fg_yellow('-- transitive reduction: ') + tred_dot_file)
    if verbose: os.system('xdot %s &' % tred_dot_file)

    # print out resulting info
    print('\nPlanning domain: %s' % policy.domain_file)
    print('Planning problem: %s' % policy.problem_file)
    print('Arguments: %s' % ' '.join(sys.argv[3:]))
    print('Policy length: %i' % len(policy.policy))
    print('Plan length: %i' % (len(plan) - 1))
    print('Compilation time: %.3f s' % policy.compilation_time)
    print('Planning time: %.3f s' % policy.planning_time)
    print('Planning iterations (all-outcome): %i' %
          policy.alloutcome_planning_call)
    print('Total number of replannings (single-outcome): %i' %
          policy.singleoutcome_planning_call)
    print('Total number of unsolvable states: %i' %
          len(policy.unsolvable_states))

    return (policy, plan, plan_json_file, actions_json_file)
Ejemplo n.º 13
0
def getPlan(initial_state, actions, positive_goals, negative_goals):
	import planner
	p = planner.Planner()
	return p.getPlan(initial_state,actions,positive_goals,negative_goals)
Ejemplo n.º 14
0
def main():
    ## parse arguments
    parser = parse()
    args = parser.parse_args()
    if args.domain == None:
        parser.print_help()
        sys.exit()

    ## make a policy given domain and problem
    policy = planner.Planner(args.domain, args.problem, args.planners,
                             args.safe_planner, args.rank, args.all_outcome,
                             args.verbose)

    ## transform the produced policy into a contingency plan
    plan = policy.plan()

    ## print out the plan in a readable form
    policy.print_plan(del_effect_inc=True, det_effect_inc=False)

    ## print out sub-paths in the plan
    if args.path:
        paths = policy.get_paths(plan)
        policy.print_paths(paths=paths, del_effect_inc=True)
        # for path in paths:
        #     policy.print_plan(plan=path, del_effect_inc=True)
        ## generate graphs of sub-paths too
        if args.dot:
            for i, path in enumerate(paths):
                dot_file = dot_plan.gen_dot_plan(plan=path)
                print(
                    color.fg_yellow('-- path{} dot file: ').format(str(i +
                                                                       1)) +
                    dot_file)
                # os.system('xdot %s &' % dot_file)
            dot_file = dot_plan.gen_dot_plan(plan=paths[0])
            # os.system('xdot %s &' % dot_file)
            print('')

    ## generate a graph of the policy as a dot file in graphviz
    if args.dot:
        plan = policy.plan(tree=True)
        dot_file = dot_plan.gen_dot_plan(plan=plan,
                                         del_effect=True,
                                         domain_file=args.domain,
                                         problem_file=args.problem)
        print(color.fg_yellow('-- dot file: ') + dot_file + '\n')
        # os.system('xdot %s &' % dot_file)
        # os.system('dot -T pdf %s > %s.pdf &' % (dot_file, dot_file))
        # os.system('evince %s.pdf &' % dot_file)

    ## transform the policy into a json file
    if args.json:
        import json_ma_plan
        import dot_ma_plan
        json_output = json_ma_plan.json_ma_plan(policy, verbose=args.verbose)
        if json_output is not None:
            plan_json_file, actions_json_file = json_output
            print(
                color.fg_yellow('-- plan_json_file:') + plan_json_file +
                color.fg_red(' [EXPERIMENTAL!]'))
            print(
                color.fg_yellow('-- actions_json_file:') + actions_json_file +
                color.fg_red(' [EXPERIMENTAL!]'))
            os.system('cd lua && lua json_multiagent_plan.lua ../%s &' %
                      plan_json_file)
            print(
                color.fg_yellow('-- plan_json_dot_file:') +
                ('%s.dot' % plan_json_file) + color.fg_red(' [EXPERIMENTAL!]'))
            # transform the plan into a parallel plan
            dot_file, tred_dot_file = dot_ma_plan.parallel_plan(
                policy, verbose=args.verbose)
            print(color.fg_yellow('-- graphviz file: ') + dot_file)
            print(color.fg_yellow('-- transitive reduction: ') + tred_dot_file)
            # os.system('xdot %s.dot &' % plan_json_file)

    ## transform the policy into a json file
    if args.json:
        import json_plan
        plan = policy.plan(tree=False)
        json_file, plan_json = json_plan.json_plan(policy)
        print(
            color.fg_yellow('\n-- json file: ') + json_file +
            color.fg_red(' [EXPERIMENTAL!]'))
        print(
            color.fg_yellow('-- try: ') + 'lua json_plan.lua ' + json_file +
            color.fg_red(' [EXPERIMENTAL!]\n'))

    if args.store:
        stat_file = policy.log_performance(plan)
        print(color.fg_yellow('-- planner performance: ') + stat_file)

    # print out resulting info
    if args.problem is not None:
        print('\nPlanning domain: %s' % policy.domain_file)
        print('Planning problem: %s' % policy.problem_file)
        print('Arguments: %s' % ' '.join(sys.argv[3:]))
    else:
        print('Planning problem: %s' % policy.domain_file)
        print('Arguments: %s' % ' '.join(sys.argv[2:]))
    print('Policy length: %i' % len(policy.policy))
    print('Plan length: %i' % (len(plan) - 1))
    print('Compilation time: %.3f s' % policy.compilation_time)
    print('Planning time: %.3f s' % policy.planning_time)
    print('Planning iterations (all-outcome): %i' %
          policy.alloutcome_planning_call)
    print('Total number of replannings (single-outcome): %i' %
          policy.singleoutcome_planning_call)
    print('Total number of unsolvable states: %i' %
          len(policy.unsolvable_states))
Ejemplo n.º 15
0
    print "new start?: " + str(start_q)

    print "target_xy: " + str(target_xy)
    print "target_q: " + str(target_q)

    # setup controller
    kp = 40
    ki = 0
    kd = 5
    print "kp: " + str(kp) + ", ki: " + str(ki) + ", kd: " + str(kd)

    start_q = arm.q
    grav_comp = True
    alpha = 1.0

    controller = control.PID(kp,
                             ki,
                             kd,
                             start_q,
                             target_q,
                             grav_comp,
                             alpha=alpha)
    trajplanner = planner.Planner(start_q, target_q, alpha=alpha)

    # start simulator
    sim_flag = sys.argv[1]
    iactive = True
    sim = Simulator(dt=dt, sim_type=sim_flag)
    sim.run(arm, controller, trajplanner, target_xy, target_q)
    sim.show()