Пример #1
0
 def __init__(self, request):
     super().__init__(request)
     if profile is not None:
         self.profiler = profile.Profile()
Пример #2
0
 def setUpClass(cls):
     DOM.use(DOM.HTML5LIB)
     cls.pr = cProfile.Profile()
     cls.pr.enable()
     print("\nhtml5lib")
Пример #3
0
# uncompyle6 version 3.7.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.8.5 (default, Aug 12 2020, 00:00:00) 
# [GCC 10.2.1 20200723 (Red Hat 10.2.1-1)]
# Embedded file name: c:\Jenkins\live\output\Live\win_64_static\Release\python-bundle\MIDI Remote Scripts\ableton\v2\control_surface\profile.py
# Compiled at: 2020-05-05 13:23:28
"""
Provides facilities to ease the profiling of Control Surfaces.
"""
from __future__ import absolute_import, print_function, unicode_literals
from functools import wraps, partial
ENABLE_PROFILING = False
if ENABLE_PROFILING:
    import cProfile
    PROFILER = cProfile.Profile()

def profile(fn):
    """
    Decorator to mark a function to be profiled.
    """
    if ENABLE_PROFILING:

        @wraps(fn)
        def wrapper(self, *a, **k):
            if PROFILER:
                return PROFILER.runcall(partial(fn, self, *a, **k))
            else:
                print(b'Can not profile (%s), it is probably reloaded' % fn.__name__)
                return fn(*a, **k)

        return wrapper
Пример #4
0
def profileStart():
    import cProfile
    global profile
    profile = cProfile.Profile()
    profile.enable()
Пример #5
0
 def setUpClass(cls):
     DOM.use(DOM.LXML)
     cls.pr = cProfile.Profile()
     cls.pr.enable()
     print("\nlxml")
Пример #6
0
def picknplace():
    # Define positions.
    rpos = geometry_msgs.msg.Pose()
    rpos.position.x = 0.555
    rpos.position.y = 0.0
    rpos.position.z = 0.206
    rpos.orientation.x = 1.0
    rpos.orientation.y = 0.0
    rpos.orientation.z = 0.0
    rpos.orientation.w = 0.0

    lpos = geometry_msgs.msg.Pose()
    lpos.position.x = 0.65
    lpos.position.y = 0.6
    lpos.position.z = 0.206
    lpos.orientation.x = 1.0
    lpos.orientation.y = 0.0
    lpos.orientation.z = 0.0
    lpos.orientation.w = 0.0

    placegoal = geometry_msgs.msg.Pose()
    placegoal.position.x = 0.55
    placegoal.position.y = 0.28
    placegoal.position.z = 0
    placegoal.orientation.x = 1.0
    placegoal.orientation.y = 0.0
    placegoal.orientation.z = 0.0
    placegoal.orientation.w = 0.0

    # Define variables.
    offset_zero_point = 0.903
    table_size_x = 0.714655654394
    table_size_y = 1.05043717328
    table_size_z = 0.729766045265
    center_x = 0.457327827197
    center_y = 0.145765166941
    center_z = -0.538116977368
    # The distance from the zero point in Moveit to the ground is 0.903 m.
    # The value is not allways the same. (look in Readme)
    center_z_cube= -offset_zero_point+table_size_z+0.0275/2
    pressure_ok=0
    j=0
    old_k=0
    k=0
    start=1
    u = 0
    locs_x = []
    # Initialize a list for the objects and the stacked cubes.
    objlist = ['obj01', 'obj02', 'obj03', 'obj04', 'obj05', 'obj06', 'obj07', 'obj08', 'obj09', 'obj10', 'obj11']
    boxlist= ['box01', 'box02', 'box03', 'box04', 'box05', 'box06', 'box07', 'box08', 'box09', 'box10', 'box11']
    # Clear planning scene.
    p.clear()
    # Add table as attached object.
    p.attachBox('table', table_size_x, table_size_y, table_size_z, center_x, center_y, center_z, 'base', touch_links=['pedestal'])
    p.waitForSync()
    # Move left arm to start state. 
    left_arm.set_pose_target(lpos)
    left_arm.plan()
    left_arm.go(wait=True)
    # cProfile to measure the performance (time) of the task.
    pr = cProfile.Profile()
    pr.enable()
    # Loop to continue pick and place until all objects are cleared from table.
    while start:
	if start:
            start = 0
            old_k+=k
            k=0
            u=0
            #locs=[]
            # Move right arm to start state. 
            right_arm.set_pose_target(rpos)
            right_arm.plan()
            right_arm.go(wait=True)
            time.sleep(2)
            # Receive the data from all objects from the topic "detected_objects".
            temp = rospy.wait_for_message("detected_objects", PoseArray) 
            locs = temp.poses 
            locs_x = []
            locs_y = []
            orien = []
            size = []

            # Add the data from the objects.
            for i in range(len(locs)):
                locs_x.append(locs[i].position.x) 
                locs_y.append(locs[i].position.y) 
                orien.append(locs[i].position.z*pi/180)
                size.append(locs[i].orientation.x)

            # Filter objects list to remove multiple detected locations for same objects.
            ind_rmv = []
            for i in range(0,len(locs)):
                if (locs_y[i] > 0.24 or locs_x[i] > 0.75):
                    ind_rmv.append(i)
                    continue
                for j in range(i,len(locs)):
                    if not (i == j):
                        if sqrt((locs_x[i] - locs_x[j])**2 + (locs_y[i] - locs_y[j])**2)<0.018:
                            ind_rmv.append(i)
        
            locs_x = del_meth(locs_x, ind_rmv)
            locs_y = del_meth(locs_y, ind_rmv)
            orien = del_meth(orien, ind_rmv) 
            size = del_meth(size, ind_rmv)		

        # Do the task only if there are still objects on the table.
        while u<len(locs_x):
            # Sort objects based on size (largest first to smallest last). This was done to enable stacking large cubes.
            ig0 = itemgetter(0)
            sorted_lists = zip(*sorted(zip(size,locs_x,locs_y,orien), reverse=True, key=ig0))
            locs_x = list(sorted_lists[1])
            locs_y = list(sorted_lists[2])
            orien = list(sorted_lists[3])
            size = list(sorted_lists[0])
	    # Initialize the data of the biggest object on the table.
	    xn = locs_x[u]
	    yn = locs_y[u]	
            # -0.16 is the z position to grip the objects on the table.	
	    zn = -0.16
	    thn = orien[u]
	    sz = size[u]
	    if thn > pi/4:
	        thn = -1*(thn%(pi/4))
            # Clear planning scene.
	    p.clear() 
            # Add table as attached object.
            p.attachBox('table', table_size_x, table_size_y, table_size_z, center_x, center_y, center_z, 'base', touch_links=['pedestal'])
	    # Add the detected objects into the planning scene.
	    #for i in range(1,len(locs_x)):
	        #p.addBox(objlist[i], 0.05, 0.05, 0.0275, locs_x[i], locs_y[i], center_z_cube)   
	    # Add the stacked objects as collision objects into the planning scene to avoid moving against them.
	    #for e in range(0, old_k+k):
	        #p.attachBox(boxlist[e], 0.07, 0.07, 0.0275, placegoal.position.x, placegoal.position.y, center_z_cube+0.0275*(e-1), 'base', touch_links=['cubes'])   
            if k>0:
	        p.attachBox(boxlist[0], 0.07, 0.07, 0.0275*k, placegoal.position.x, placegoal.position.y, center_z_cube, 'base', touch_links=['cubes']) 
	    p.waitForSync()
            # Initialize the approach pickgoal (5 cm to pickgoal).
            approach_pickgoal = geometry_msgs.msg.Pose()
            approach_pickgoal.position.x = xn
            approach_pickgoal.position.y = yn
            approach_pickgoal.position.z = zn+0.05
	
            approach_pickgoal_dummy = PoseStamped() 
            approach_pickgoal_dummy.header.frame_id = "base"
            approach_pickgoal_dummy.header.stamp = rospy.Time.now()
            approach_pickgoal_dummy.pose.position.x = xn
            approach_pickgoal_dummy.pose.position.y = yn
            approach_pickgoal_dummy.pose.position.z = zn+0.05
            approach_pickgoal_dummy.pose.orientation.x = 1.0
            approach_pickgoal_dummy.pose.orientation.y = 0.0
            approach_pickgoal_dummy.pose.orientation.z = 0.0
            approach_pickgoal_dummy.pose.orientation.w = 0.0

	    # Orientate the gripper --> uses function from geometry.py (by Mike Ferguson) to 'rotate a pose' given rpy angles. 
            approach_pickgoal_dummy.pose = rotate_pose_msg_by_euler_angles(approach_pickgoal_dummy.pose, 0.0, 0.0, thn)
            approach_pickgoal.orientation.x = approach_pickgoal_dummy.pose.orientation.x
            approach_pickgoal.orientation.y = approach_pickgoal_dummy.pose.orientation.y
            approach_pickgoal.orientation.z = approach_pickgoal_dummy.pose.orientation.z
            approach_pickgoal.orientation.w = approach_pickgoal_dummy.pose.orientation.w
            # Move to the approach goal and the pickgoal.
            right_arm.set_pose_target(approach_pickgoal)
            right_arm.plan()
            right_arm.go(wait=True)
            pickgoal=deepcopy(approach_pickgoal)
            pickgoal.position.z = zn 
            right_arm.set_pose_target(pickgoal)
            right_arm.plan()
            right_arm.go(wait=True)
            time.sleep(0.5)
            # Read the force in z direction. 
            b=rightarm.endpoint_effort()
            z_= b['force']
	    z_force= z_.z
            # Continue with other objects, if the gripper isn't at the right position and presses on an object.
	    #print("----->force in z direction:", z_force)
	    if z_force>-4:
                rightgripper.close()
                attempts=0
	        pressure_ok=1
                # If the gripper hadn't enough pressure after 2 seconds it opens and continue with other objects.
	        while(rightgripper.force()<25 and pressure_ok==1):   
		    time.sleep(0.04)
		    attempts+=1
		    if(attempts>50):
                        rightgripper.open()
                        pressure_ok=0
	                print("----->pressure is to low<-----")
            else:
                print("----->gripper presses on an object<-----")

            # Move back to the approach pickgoal.
            right_arm.set_pose_target(approach_pickgoal)
            right_arm.plan()
            right_arm.go(wait=True)

	    if pressure_ok and z_force>-4:
                # Define the approach placegoal.
                # Increase the height of the tower every time by 2.75 cm.
                approached_placegoal=deepcopy(placegoal)
                approached_placegoal.position.z = -0.155+((old_k+k)*0.0275)+0.08
                # Define the placegoal.
                placegoal.position.z = -0.155+((old_k+k)*0.0275)
                right_arm.set_pose_target(approached_placegoal)
                right_arm.plan()
                right_arm.go(wait=True)
                right_arm.set_pose_target(placegoal)
                right_arm.plan()
                right_arm.go(wait=True)
	        rightgripper.open()
                while(rightgripper.force()>10):
		    time.sleep(0.01)
		# Move to the approach placegoal.
                right_arm.set_pose_target(approached_placegoal)
                right_arm.plan()
                right_arm.go(wait=True)
                k += 1
            u += 1 
        if u!=k:
            start=1
        print "\nBaxter picked", (old_k+k),"objects of a total of",(old_k+len(locs_x)),"succesful\n!"

    right_arm.set_pose_target(rpos)
    right_arm.plan()
    right_arm.go(wait=True)
    pr.disable()
    sortby = 'cumulative'
    ps=pstats.Stats(pr).sort_stats(sortby).print_stats(0.0)
    p.clear()
    moveit_commander.roscpp_shutdown()
    # Exit MoveIt.
    moveit_commander.os._exit(0)
Пример #7
0
 def __init__(self, filepath='last-run.prof'):
     self.filepath = filepath
     self._profile = cProfile.Profile() if filepath else None
Пример #8
0
 def __init__(self, dataDir):
     super(CpuProfilerBackend, self).__init__(dataDir)
     self.profiler = cProfile.Profile()
Пример #9
0
 def command_loop(self):
     # Cache the thread setting.
     useBgThread = self.prefs.editor['useBgThread']
     cmdCount = 0
     # Track the time needed to handle commands and render the UI.
     # (A performance measurement).
     self.mainLoopTime = 0
     self.mainLoopTimePeak = 0
     self.cursesWindowGetCh = app.window.mainCursesWindow.getch
     if self.prefs.startup['timeStartup']:
         # When running a timing of the application startup, push a CTRL_Q
         # onto the curses event messages to simulate a full startup with a
         # GUI render.
         curses.ungetch(17)
     start = time.time()
     # The first render, to get something on the screen.
     if useBgThread:
         self.bg.put(u"cmdList", [])
     else:
         self.programWindow.short_time_slice()
         self.programWindow.render()
         self.backgroundFrame.set_cmd_count(0)
     # This is the 'main loop'. Execution doesn't leave this loop until the
     # application is closing down.
     while not self.exiting:
         if 0:
             profile = cProfile.Profile()
             profile.enable()
             self.refresh(drawList, cursor, cmdCount)
             profile.disable()
             output = io.StringIO.StringIO()
             stats = pstats.Stats(profile,
                                  stream=output).sort_stats('cumulative')
             stats.print_stats()
             app.log.info(output.getvalue())
         self.mainLoopTime = time.time() - start
         if self.mainLoopTime > self.mainLoopTimePeak:
             self.mainLoopTimePeak = self.mainLoopTime
         # Gather several commands into a batch before doing a redraw.
         # (A performance optimization).
         cmdList = []
         while not len(cmdList):
             if not useBgThread:
                 (drawList, cursor,
                  frameCmdCount) = self.backgroundFrame.grab_frame()
                 if frameCmdCount is not None:
                     self.frontFrame = (drawList, cursor, frameCmdCount)
             if self.frontFrame is not None:
                 drawList, cursor, frameCmdCount = self.frontFrame
                 self.refresh(drawList, cursor, frameCmdCount)
                 self.frontFrame = None
             for _ in range(5):
                 eventInfo = None
                 if self.exiting:
                     return
                 ch = self.get_ch()
                 # assert isinstance(ch, int), type(ch)
                 if ch == curses.ascii.ESC:
                     # Some keys are sent from the terminal as a sequence of
                     # bytes beginning with an Escape character. To help
                     # reason about these events (and apply event handler
                     # callback functions) the sequence is converted into
                     # tuple.
                     keySequence = []
                     n = self.get_ch()
                     while n != curses.ERR:
                         keySequence.append(n)
                         n = self.get_ch()
                     #app.log.info('sequence\n', keySequence)
                     # Check for Bracketed Paste Mode begin.
                     paste_begin = app.curses_util.BRACKETED_PASTE_BEGIN
                     if tuple(
                             keySequence[:len(paste_begin)]) == paste_begin:
                         ch = app.curses_util.BRACKETED_PASTE
                         keySequence = keySequence[len(paste_begin):]
                         paste_end = (
                             curses.ascii.ESC,
                         ) + app.curses_util.BRACKETED_PASTE_END
                         while tuple(keySequence[-len(paste_end):]
                                     ) != paste_end:
                             #app.log.info('waiting in paste mode')
                             n = self.get_ch()
                             if n != curses.ERR:
                                 keySequence.append(n)
                         keySequence = keySequence[:-(len(paste_end))]
                         eventInfo = struct.pack(
                             'B' * len(keySequence),
                             *keySequence).decode(u"utf-8")
                     else:
                         ch = tuple(keySequence)
                     if not ch:
                         # The sequence was empty, so it looks like this
                         # Escape wasn't really the start of a sequence and
                         # is instead a stand-alone Escape. Just forward the
                         # esc.
                         ch = curses.ascii.ESC
                 elif type(ch) is int and 160 <= ch < 257:
                     # Start of utf-8 character.
                     u = None
                     if (ch & 0xe0) == 0xc0:
                         # Two byte utf-8.
                         b = self.get_ch()
                         u = bytes_to_unicode((ch, b))
                     elif (ch & 0xf0) == 0xe0:
                         # Three byte utf-8.
                         b = self.get_ch()
                         c = self.get_ch()
                         u = bytes_to_unicode((ch, b, c))
                     elif (ch & 0xf8) == 0xf0:
                         # Four byte utf-8.
                         b = self.get_ch()
                         c = self.get_ch()
                         d = self.get_ch()
                         u = bytes_to_unicode((ch, b, c, d))
                     assert u is not None
                     eventInfo = u
                     ch = app.curses_util.UNICODE_INPUT
                 if ch != curses.ERR:
                     self.ch = ch
                     if ch == curses.KEY_MOUSE:
                         # On Ubuntu, Gnome terminal, curses.getmouse() may
                         # only be called once for each KEY_MOUSE. Subsequent
                         # calls will throw an exception. So getmouse is
                         # (only) called here and other parts of the code use
                         # the eventInfo list instead of calling getmouse.
                         self.debugMouseEvent = curses.getmouse()
                         eventInfo = (self.debugMouseEvent, time.time())
                     cmdList.append((ch, eventInfo))
         start = time.time()
         if len(cmdList):
             if useBgThread:
                 self.bg.put(u"cmdList", cmdList)
             else:
                 self.programWindow.execute_command_list(cmdList)
                 self.programWindow.short_time_slice()
                 self.programWindow.render()
                 cmdCount += len(cmdList)
                 self.backgroundFrame.set_cmd_count(cmdCount)
Пример #10
0
def bayesopt():

    # The caller is responsible with updating comparisons.
    # They should be able to just upload the x and f as they were provided by the last
    # call to this URL.
    f = np.array(loads(request.args.get('f', '[]')), dtype=np.float)
    x = np.array(loads(request.args.get('x', '[]')), dtype=np.float)
    comparisons = np.array(loads(request.args.get('comparisons', '[]')),
                           dtype=np.int)

    SIGMA = 10.0
    BOUNDS = np.array([
        [0.0, 4.0],
        [0.0, 4.0],
        [0.0, 4.0],
    ])
    KERNELFUNC = partial(default_kernel, a=-.25)

    # Create extra bounds to rule out configurations that burned
    within_bounds = {}
    BURNING_CONFIGURATIONS = [
        (4, 0, 0),
        (4, 1, 0),
        (4, 0, 1),
        (4, 1, 1),
        (4, 1, 1),
        (3, 0, 2),
        (4, 0, 2),
        (4, 1, 2),
        (3, 0, 3),
        (4, 0, 3),
        (4, 1, 3),
        (3, 0, 4),
        (4, 0, 4),
        (4, 1, 4),
    ]
    keys = [_ for _ in itertools.product(range(5), range(5), range(5))]
    for k in keys:
        within_bounds[k] = (k not in BURNING_CONFIGURATIONS)

    def extrabounds(p):
        rounded = np.round(p).astype('int')
        rounded_key = tuple(rounded)
        return within_bounds[rounded_key]

    # If no parameters are given, initialize the data
    if f.shape == (0, ):
        x = np.array([
            [1.0, 1.0, 1.0],
        ])
        f = np.array([
            [0.0],
        ])
        comparisons = np.array([])
        best_f_i = 0
        xbest = x[0]
        xnew = np.array([3.0, 3.0, 3.0])
    # Predict the next point for comparison
    else:
        prof = cProfile.Profile()
        f, Cmap = prof.runcall(newton_rhapson,
                               x,
                               f,
                               comparisons,
                               KERNELFUNC,
                               compute_H,
                               compute_g,
                               SIGMA,
                               maxiter=10)
        prof.dump_stats('code.profile')
        xnew = acquire(x, f, Cmap, BOUNDS, KERNELFUNC, extrabounds)
        best_f_i = np.argmax(f)
        xbest = x[best_f_i]

    # Add newest points to x and f
    x = np.array(x.tolist() + [xnew])
    f = np.array(f.tolist() + [[0.0]])

    return jsonify(
        **{
            'x': x.tolist(),
            'f': f.tolist(),
            'xbest': {
                'value': xbest.tolist(),
                'img': get_cut_image_name(*(xbest.tolist())),
                'index': best_f_i,
            },
            'xnew': {
                'value': xnew.tolist(),
                'img': get_cut_image_name(*(xnew.tolist())),
                'index': len(f) - 1,
            },
            'comparisons': comparisons.tolist()
        })
Пример #11
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-p',
                        '--problem',
                        default='mirror',
                        help='The name of the problem to solve')
    parser.add_argument('-a',
                        '--algorithm',
                        default='incremental',
                        help='Specifies the algorithm')
    parser.add_argument('-c',
                        '--cfree',
                        action='store_true',
                        help='Disables collisions')
    parser.add_argument('-d',
                        '--deterministic',
                        action='store_true',
                        help='Uses a deterministic sampler')
    parser.add_argument('-g',
                        '--gurobi',
                        action='store_true',
                        help='Uses gurobi')
    parser.add_argument('-n',
                        '--number',
                        default=1,
                        type=int,
                        help='The number of blocks')
    parser.add_argument('-o',
                        '--optimal',
                        action='store_true',
                        help='Runs in an anytime mode')
    parser.add_argument('-s',
                        '--skeleton',
                        action='store_true',
                        help='Enforces skeleton plan constraints')
    parser.add_argument('-t',
                        '--max_time',
                        default=30,
                        type=int,
                        help='The max time')
    parser.add_argument('-u',
                        '--unit',
                        action='store_true',
                        help='Uses unit costs')
    parser.add_argument('-v',
                        '--visualize',
                        action='store_true',
                        help='Visualizes graphs')
    args = parser.parse_args()
    print('Arguments:', args)

    np.set_printoptions(precision=2)
    if args.deterministic:
        random.seed(seed=0)
        np.random.seed(seed=0)
    print('Random seed:', get_random_seed())

    problem_from_name = {fn.__name__: fn for fn in PROBLEMS}
    if args.problem not in problem_from_name:
        raise ValueError(args.problem)
    print('Problem:', args.problem)
    problem_fn = problem_from_name[args.problem]
    tamp_problem = problem_fn(args.number)
    print(tamp_problem)

    pddlstream_problem = pddlstream_from_tamp(tamp_problem,
                                              collisions=not args.cfree,
                                              use_stream=not args.gurobi,
                                              use_optimizer=args.gurobi)
    print('Constants:', str_from_object(pddlstream_problem.constant_map))
    print('Initial:', sorted_str_from_list(pddlstream_problem.init))
    print('Goal:', str_from_object(pddlstream_problem.goal))
    pr = cProfile.Profile()
    pr.enable()
    success_cost = 0 if args.optimal else INF
    planner = 'max-astar'
    #planner = 'ff-wastar1'
    if args.algorithm == 'incremental':
        solution = solve_incremental(pddlstream_problem,
                                     complexity_step=1,
                                     planner=planner,
                                     unit_costs=args.unit,
                                     success_cost=success_cost,
                                     max_time=args.max_time,
                                     verbose=False)
    else:
        raise ValueError(args.algorithm)

    print_solution(solution)
    plan, cost, evaluations = solution
    pr.disable()
    pstats.Stats(pr).sort_stats('cumtime').print_stats(20)
    if plan is not None:
        display_plan(tamp_problem, plan)
Пример #12
0
  def run(self):
    import datetime
    time_now = datetime.datetime.now()

    self.mpi_logger.log(str(time_now))
    if self.mpi_helper.rank == 0:
      self.mpi_logger.main_log(str(time_now))

    self.mpi_logger.log_step_time("TOTAL")

    self.mpi_logger.log_step_time("PARSE_INPUT_PARAMS")
    self.parse_input()
    self.mpi_logger.log_step_time("PARSE_INPUT_PARAMS", True)

    if self.params.mp.debug.cProfile:
      import cProfile
      pr = cProfile.Profile()
      pr.enable()

    # Create the workers using the factories
    self.mpi_logger.log_step_time("CREATE_WORKERS")
    from xfel.merging import application
    import importlib, copy

    self._resolve_persistent_columns()

    workers = []
    steps = self.params.dispatch.step_list if self.params.dispatch.step_list else default_steps
    for step in steps:
      step_factory_name = step
      step_additional_info = []

      step_info = step.split('_')
      assert len(step_info) > 0
      if len(step_info) > 1:
        step_factory_name = step_info[0]
        step_additional_info = step_info[1:]

      try:
        factory = importlib.import_module('xfel.merging.application.' + step_factory_name + '.factory')
      except ModuleNotFoundError:
        # remember the system path so the custom worker can temporarily modify it
        sys_path = copy.deepcopy(sys.path)
        pathstr = os.path.join(
            '~', '.cctbx.xfel', 'merging', 'application', step_factory_name,
            'factory.py'
        )
        pathstr = os.path.expanduser(pathstr)
        modulename = 'xfel.merging.application.' + step_factory_name + '.factory'
        spec = importlib.util.spec_from_file_location(modulename, pathstr)
        factory = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(factory)
        # reset the path
        sys.path = sys_path

      workers.extend(factory.factory.from_parameters(self.params, step_additional_info, mpi_helper=self.mpi_helper, mpi_logger=self.mpi_logger))

    # Perform phil validation up front
    for worker in workers:
      worker.validate()
    self.mpi_logger.log_step_time("CREATE_WORKERS", True)

    # Do the work
    experiments = reflections = None
    step = 0
    while(workers):
      worker = workers.pop(0)
      self.mpi_logger.log_step_time("STEP_" + worker.__repr__())
      # Log worker name, i.e. execution step name
      step += 1
      if step > 1:
        self.mpi_logger.log('')
      step_desc = "STEP %d: %s"%(step, worker)
      self.mpi_logger.log(step_desc)

      if self.mpi_helper.rank == 0:
        if step > 1:
          self.mpi_logger.main_log('')
        self.mpi_logger.main_log(step_desc)

      # Execute worker
      experiments, reflections = worker.run(experiments, reflections)
      self.mpi_logger.log_step_time("STEP_" + worker.__repr__(), True)
      if experiments:
        self.mpi_logger.log("Ending step with %d experiments"%len(experiments))

    if self.params.output.save_experiments_and_reflections:
      if len(reflections) and 'id' not in reflections:
        from dials.array_family import flex
        id_ = flex.int(len(reflections), -1)
        if experiments:
          for expt_number, expt in enumerate(experiments):
            sel = reflections['exp_id'] == expt.identifier
            id_.set_selected(sel, expt_number)
        else:
          for expt_number, exp_id in enumerate(set(reflections['exp_id'])):
            sel = reflections['exp_id'] == exp_id
            id_.set_selected(sel, expt_number)
        reflections['id'] = id_

        assert (reflections['id'] == -1).count(True) == 0, ((reflections['id'] == -1).count(True), len(reflections))

      if self.mpi_helper.size == 1:
        filename_suffix = ""
      else:
        filename_suffix = "_%06d"%self.mpi_helper.rank

      if len(reflections):
        reflections.as_pickle(os.path.join(self.params.output.output_dir, "%s%s.refl"%(self.params.output.prefix, filename_suffix)))
      if experiments:
        experiments.as_file(os.path.join(self.params.output.output_dir, "%s%s.expt"%(self.params.output.prefix, filename_suffix)))

    self.mpi_logger.log_step_time("TOTAL", True)

    if self.params.mp.debug.cProfile:
      pr.disable()
      pr.dump_stats(os.path.join(self.params.output.output_dir, "cpu_%s_%d.prof"%(self.params.output.prefix, self.mpi_helper.rank)))
Пример #13
0
def main(_):
  args = config_distill.get_args_for_config(FLAGS.config_name)
  args.logdir = FLAGS.logdir
  args.solver.num_workers = FLAGS.num_workers
  args.solver.task = FLAGS.task
  args.solver.ps_tasks = FLAGS.ps_tasks
  args.solver.master = FLAGS.master
  
  args.buildinger.env_class = nav_env.MeshMapper
  fu.makedirs(args.logdir)
  args.buildinger.logdir = args.logdir
  R = nav_env.get_multiplexor_class(args.buildinger, args.solver.task)
  
  if False:
    pr = cProfile.Profile()
    pr.enable()
    rng = np.random.RandomState(0)
    for i in range(1):
      b, instances_perturbs = R.sample_building(rng)
      inputs = b.worker(*(instances_perturbs))
      for j in range(inputs['imgs'].shape[0]):
        p = os.path.join('tmp', '{:d}.png'.format(j))
        img = inputs['imgs'][j,0,:,:,:3]*1
        img = (img).astype(np.uint8)
        fu.write_image(p, img)
      print((inputs['imgs'].shape))
      inputs = R.pre(inputs)
    pr.disable()
    pr.print_stats(2)

  if args.control.train:
    if not gfile.Exists(args.logdir):
      gfile.MakeDirs(args.logdir)
   
    m = utils.Foo()
    m.tf_graph = tf.Graph()
    
    config = tf.ConfigProto()
    config.device_count['GPU'] = 1
    config.gpu_options.allow_growth = True
    config.gpu_options.per_process_gpu_memory_fraction = 0.8
    
    with m.tf_graph.as_default():
      with tf.device(tf.train.replica_device_setter(args.solver.ps_tasks)):
        m = distill.setup_to_run(m, args, is_training=True,
                                batch_norm_is_training=True)

        train_step_kwargs = distill.setup_train_step_kwargs_mesh(
            m, R, os.path.join(args.logdir, 'train'),
            rng_seed=args.solver.task, is_chief=args.solver.task==0, iters=1,
            train_display_interval=args.summary.display_interval)

        final_loss = slim.learning.train(
            train_op=m.train_op,
            logdir=args.logdir,
            master=args.solver.master,
            is_chief=args.solver.task == 0,
            number_of_steps=args.solver.max_steps,
            train_step_fn=tf_utils.train_step_custom,
            train_step_kwargs=train_step_kwargs,
            global_step=m.global_step_op,
            init_op=m.init_op,
            init_fn=m.init_fn,
            sync_optimizer=m.sync_optimizer,
            saver=m.saver_op,
            summary_op=None, session_config=config)
 
  if args.control.test:
    m = utils.Foo()
    m.tf_graph = tf.Graph()
    checkpoint_dir = os.path.join(format(args.logdir))
    with m.tf_graph.as_default():
      m = distill.setup_to_run(m, args, is_training=False,
                              batch_norm_is_training=args.control.force_batchnorm_is_training_at_test)
      
      train_step_kwargs = distill.setup_train_step_kwargs_mesh(
          m, R, os.path.join(args.logdir, args.control.test_name),
          rng_seed=args.solver.task+1, is_chief=args.solver.task==0,
          iters=args.summary.test_iters, train_display_interval=None)
      
      sv = slim.learning.supervisor.Supervisor(
          graph=ops.get_default_graph(), logdir=None, init_op=m.init_op,
          summary_op=None, summary_writer=None, global_step=None, saver=m.saver_op)

      last_checkpoint = None
      while True:
        last_checkpoint = slim.evaluation.wait_for_new_checkpoint(checkpoint_dir, last_checkpoint)
        checkpoint_iter = int(os.path.basename(last_checkpoint).split('-')[1])
        start = time.time()
        logging.info('Starting evaluation at %s using checkpoint %s.', 
                     time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime()),
                     last_checkpoint)
        
        config = tf.ConfigProto()
        config.device_count['GPU'] = 1
        config.gpu_options.allow_growth = True
        config.gpu_options.per_process_gpu_memory_fraction = 0.8
        
        with sv.managed_session(args.solver.master,config=config,
                                start_standard_services=False) as sess:
          sess.run(m.init_op)
          sv.saver.restore(sess, last_checkpoint)
          sv.start_queue_runners(sess)
          vals, _ = tf_utils.train_step_custom(
              sess, None, m.global_step_op, train_step_kwargs, mode='val')
          if checkpoint_iter >= args.solver.max_steps:
            break
Пример #14
0
    def __init__(self, arg=0, testMode=False, **kwargs):
        """With a wx.App some things get done here, before App.__init__
        then some further code is launched in OnInit() which occurs after
        """
        if profiling:
            import cProfile, time
            profile = cProfile.Profile()
            profile.enable()
            t0 = time.time()

        self._appLoaded = False  # set to true when all frames are created
        self.coder = None
        self.runner = None
        self.version = psychopy.__version__
        # set default paths and prefs
        self.prefs = psychopy.prefs
        self._currentThemeSpec = None

        self.keys = self.prefs.keys
        self.prefs.pageCurrent = 0  # track last-viewed page, can return there
        self.IDs = IDStore()
        self.urls = urls.urls
        self.quitting = False
        # check compatibility with last run version (before opening windows)
        self.firstRun = False
        self.testMode = testMode
        self._stdout = sys.stdout
        self._stderr = sys.stderr
        self._stdoutFrame = None
        self.iconCache = themes.IconCache()

        # mdc - removed the following and put it in `app.startApp()` to have
        #       error logging occur sooner.
        #
        # if not self.testMode:
        #     self._lastRunLog = open(os.path.join(
        #             self.prefs.paths['userPrefsDir'], 'last_app_load.log'),
        #             'w')
        #     sys.stderr = sys.stdout = lastLoadErrs = self._lastRunLog
        #     logging.console.setLevel(logging.DEBUG)

        # indicates whether we're running for testing purposes
        self.osfSession = None
        self.pavloviaSession = None

        self.copiedRoutine = None
        self.copiedCompon = None
        self._allFrames = frametracker.openFrames  # ordered; order updated with self.onNewTopWindow

        wx.App.__init__(self, arg)

        # import localization after wx:
        from psychopy import localization  # needed by splash screen
        self.localization = localization
        self.locale = localization.setLocaleWX()
        self.locale.AddCatalog(self.GetAppName())

        logging.flush()
        self.onInit(testMode=testMode, **kwargs)
        if profiling:
            profile.disable()
            print("time to load app = {:.2f}".format(time.time()-t0))
            profile.dump_stats('profileLaunchApp.profile')
        logging.flush()

        # if we're on linux, check if we have the permissions file setup
        from psychopy.app.linuxconfig import (
            LinuxConfigDialog, linuxConfigFileExists)

        if not linuxConfigFileExists():
            linuxConfDlg = LinuxConfigDialog(
                None, timeout=1000 if self.testMode else None)
            linuxConfDlg.ShowModal()
            linuxConfDlg.Destroy()
Пример #15
0
def detail_profile_start():
    profiler = cProfile.Profile()
    profiler.enable()
    return profiler
Пример #16
0
 def wrapper(*args, **kwargs):
     profile = cProfile.Profile()
     result = profile.runcall(f, *args, **kwargs)
     profile.dump_stats(self.fname or ("%s.cprof" % (f.__name__, )))
     return result
Пример #17
0
def main(deterministic=True):
    # TODO: GeometryInstance, InternalGeometry, & GeometryContext to get the shape of objects
    # TODO: cost-sensitive planning to avoid large kuka moves
    # get_contact_results_output_port
    # TODO: gripper closing via collision information

    time_step = 0.0002 # TODO: context.get_continuous_state_vector() fails
    if deterministic:
        random.seed(0)
        np.random.seed(0)

    parser = argparse.ArgumentParser()
    parser.add_argument('-p', '--problem', default='load_manipulation', help='The name of the problem to solve.')
    parser.add_argument('-c', '--cfree', action='store_true', help='Disables collisions')
    parser.add_argument('-v', '--visualizer', action='store_true', help='Use the drake visualizer')
    parser.add_argument('-s', '--simulate', action='store_true', help='Simulate')
    args = parser.parse_args()

    problem_fn_from_name = {fn.__name__: fn for fn in PROBLEMS}
    if args.problem not in problem_fn_from_name:
        raise ValueError(args.problem)
    print('Problem:', args.problem)
    problem_fn = problem_fn_from_name[args.problem]

    meshcat_vis = None
    if not args.visualizer:
        import meshcat
        # Important that variable is saved
        meshcat_vis = meshcat.Visualizer()  # vis.set_object
        # http://127.0.0.1:7000/static/

    mbp, scene_graph, task = problem_fn(time_step=time_step)
    #station, mbp, scene_graph = load_station(time_step=time_step)
    #builder.AddSystem(station)
    #dump_plant(mbp)
    #dump_models(mbp)
    print(task)
    #print(sorted(body.name() for body in task.movable_bodies()))
    #print(sorted(body.name() for body in task.fixed_bodies()))

    ##################################################

    builder, _ = build_diagram(mbp, scene_graph, not args.visualizer)
    if args.simulate:
        state_machine = connect_controllers(builder, mbp, task.robot, task.gripper)
    else:
        state_machine = None
    diagram = builder.Build()
    RenderSystemWithGraphviz(diagram) # Useful for getting port names
    diagram_context = diagram.CreateDefaultContext()
    context = diagram.GetMutableSubsystemContext(mbp, diagram_context)
    task.diagram = diagram
    task.diagram_context = diagram_context

    #context = mbp.CreateDefaultContext() # scene_graph.CreateDefaultContext()
    for joint, position in task.initial_positions.items():
        set_joint_position(joint, context, position)
    for model, pose in task.initial_poses.items():
        set_world_pose(mbp, context, model, pose)
    open_wsg50_gripper(mbp, context, task.gripper)
    #close_wsg50_gripper(mbp, context, task.gripper)
    #set_configuration(mbp, context, task.gripper, [-0.05, 0.05])

    # from underactuated.meshcat_visualizer import MeshcatVisualizer
    # #add_meshcat_visualizer(scene_graph)
    # viz = MeshcatVisualizer(scene_graph, draw_timestep=0.033333)
    # viz.load()
    # viz.draw(context)

    diagram.Publish(diagram_context)
    initial_state = get_state(mbp, context)

    ##################################################

    problem = get_pddlstream_problem(mbp, context, scene_graph, task, collisions=not args.cfree)
    pr = cProfile.Profile()
    pr.enable()
    solution = solve_focused(problem, planner='ff-wastar2', max_cost=INF, max_time=120, debug=False)
    pr.disable()
    pstats.Stats(pr).sort_stats('tottime').print_stats(10)
    print_solution(solution)
    plan, cost, evaluations = solution
    if plan is None:
        return
    trajectories = postprocess_plan(mbp, task.gripper, plan)

    ##################################################

    set_state(mbp, context, initial_state)
    if args.simulate:
        splines, gripper_setpoints = convert_splines(mbp, task.robot, task.gripper, context, trajectories)
        sim_duration = compute_duration(splines)
        print('Splines: {}\nDuration: {:.3f} seconds'.format(len(splines), sim_duration))
        set_state(mbp, context, initial_state)

        if True:
            state_machine.Load(splines, gripper_setpoints)
            simulate_splines(diagram, diagram_context, sim_duration)
        else:
            # NOTE: there is a plan that moves home initially for 15 seconds
            from .lab_1.robot_plans import JointSpacePlan
            plan_list = [JointSpacePlan(spline) for spline in splines]
            #meshcat_vis.delete()
            user_input('Simulate?')
            test_manipulation(plan_list, gripper_setpoints)
    else:
        #time_step = None
        #time_step = 0.001
        time_step = 0.02
        step_trajectories(diagram, diagram_context, context, trajectories, time_step=time_step) #, teleport=True)
Пример #18
0
def run_cosipy(cluster, IO, DATA, RESULT, RESTART, futures):

    with Client(cluster) as client:
        print('--------------------------------------------------------------')
        print('\t Starting clients and submit jobs ... \n')
        print(
            '-------------------------------------------------------------- \n'
        )

        print(cluster)
        print(client)

        # Get dimensions of the whole domain
        ny = DATA.dims[northing]
        nx = DATA.dims[easting]

        cp = cProfile.Profile()

        # Get some information about the cluster/nodes
        total_grid_points = DATA.dims[northing] * DATA.dims[easting]
        if slurm_use is True:
            total_cores = processes * nodes
            points_per_core = total_grid_points // total_cores
            print(total_grid_points, total_cores, points_per_core)

        if sge_use is True:
            print('Total grid points: {0}'.format(total_grid_points))

        # Check if evaluation is selected:
        if stake_evaluation is True:
            # Read stake data (data must be given as cumulative changes)
            df_stakes_loc = pd.read_csv(stakes_loc_file,
                                        delimiter='\t',
                                        na_values='-9999')
            df_stakes_data = pd.read_csv(stakes_data_file,
                                         delimiter='\t',
                                         index_col='TIMESTAMP',
                                         na_values='-9999')
            df_stakes_data.index = pd.to_datetime(df_stakes_data.index)

            # Uncomment, if stake data is given as changes between measurements
            # df_stakes_data = df_stakes_data.cumsum(axis=0)

            # Init dataframes to store evaluation statistics
            df_stat = pd.DataFrame()
            df_val = df_stakes_data.copy()

            # reshape and stack coordinates
            coords = np.column_stack(
                (DATA.lat.values.ravel(), DATA.lon.values.ravel()))

            # construct KD-tree, in order to get closes grid cell
            ground_pixel_tree = scipy.spatial.cKDTree(
                transform_coordinates(coords))

            # Check for stake data
            stakes_list = []
            for index, row in df_stakes_loc.iterrows():
                index = ground_pixel_tree.query(
                    transform_coordinates((row['lat'], row['lon'])))
                index = np.unravel_index(index[1], DATA.lat.shape)
                stakes_list.append((index[0][0], index[1][0], row['id']))

        else:
            stakes_loc = None
            df_stakes_data = None

        # Distribute data and model to workers
        start_res = datetime.now()
        for y, x in product(range(DATA.dims[northing]),
                            range(DATA.dims[easting])):
            if stake_evaluation is True:
                stake_names = []
                # Check if the grid cell contain stakes and store the stake names in a list
                for idx, (stake_loc_y, stake_loc_x,
                          stake_name) in enumerate(stakes_list):
                    if ((y == stake_loc_y) & (x == stake_loc_x)):
                        stake_names.append(stake_name)
            else:
                stake_names = None

            if WRF is True:
                mask = DATA.MASK.sel(south_north=y, west_east=x)
                # Provide restart grid if necessary
                if ((mask == 1) & (restart == False)):
                    if np.isnan(
                            DATA.sel(south_north=y,
                                     west_east=x).to_array()).any():
                        print('ERROR!!!!!!!!!!! There are NaNs in the dataset')
                        sys.exit()
                    futures.append(
                        client.submit(cosipy_core,
                                      DATA.sel(south_north=y, west_east=x),
                                      y,
                                      x,
                                      stake_names=stake_names,
                                      stake_data=df_stakes_data))
                elif ((mask == 1) & (restart == True)):
                    if np.isnan(
                            DATA.sel(south_north=y,
                                     west_east=x).to_array()).any():
                        print('ERROR!!!!!!!!!!! There are NaNs in the dataset')
                        sys.exit()
                    futures.append(
                        client.submit(
                            cosipy_core,
                            DATA.sel(south_north=y, west_east=x),
                            y,
                            x,
                            GRID_RESTART=IO.create_grid_restart().sel(
                                south_north=y, west_east=x),
                            stake_names=stake_names,
                            stake_data=df_stakes_data))
            else:
                mask = DATA.MASK.isel(lat=y, lon=x)
                # Provide restart grid if necessary
                if ((mask == 1) & (restart == False)):
                    if np.isnan(DATA.isel(lat=y, lon=x).to_array()).any():
                        print('ERROR!!!!!!!!!!! There are NaNs in the dataset')
                        sys.exit()
                    futures.append(
                        client.submit(cosipy_core,
                                      DATA.isel(lat=y, lon=x),
                                      y,
                                      x,
                                      stake_names=stake_names,
                                      stake_data=df_stakes_data))
                elif ((mask == 1) & (restart == True)):
                    if np.isnan(DATA.isel(lat=y, lon=x).to_array()).any():
                        print('ERROR!!!!!!!!!!! There are NaNs in the dataset')
                        sys.exit()
                    futures.append(
                        client.submit(
                            cosipy_core,
                            DATA.isel(lat=y, lon=x),
                            y,
                            x,
                            GRID_RESTART=IO.create_grid_restart().isel(lat=y,
                                                                       lon=x),
                            stake_names=stake_names,
                            stake_data=df_stakes_data))
        # Finally, do the calculations and print the progress


#        progress(futures)

#---------------------------------------
# Guarantee that restart file is closed
#---------------------------------------
        if (restart == True):
            IO.get_grid_restart().close()

        # Create numpy arrays which aggregates all local results
        IO.create_global_result_arrays()

        # Create numpy arrays which aggregates all local results
        IO.create_global_restart_arrays()

        #---------------------------------------
        # Assign local results to global
        #---------------------------------------
        for future in as_completed(futures):

            # Get the results from the workers
            indY,indX,local_restart,RAIN,SNOWFALL,LWin,LWout,H,LE,B,MB,surfMB,Q,SNOWHEIGHT,TOTALHEIGHT,TS,ALBEDO,NLAYERS, \
                            ME,intMB,EVAPORATION,SUBLIMATION,CONDENSATION,DEPOSITION,REFREEZE,subM,Z0,surfM, \
                            LAYER_HEIGHT,LAYER_RHO,LAYER_T,LAYER_LWC,LAYER_CC,LAYER_POROSITY,LAYER_ICE_FRACTION, \
                            LAYER_IRREDUCIBLE_WATER,LAYER_REFREEZE,stake_names,stat,df_eval = future.result()

            IO.copy_local_to_global(indY,indX,RAIN,SNOWFALL,LWin,LWout,H,LE,B,MB,surfMB,Q,SNOWHEIGHT,TOTALHEIGHT,TS,ALBEDO,NLAYERS, \
                            ME,intMB,EVAPORATION,SUBLIMATION,CONDENSATION,DEPOSITION,REFREEZE,subM,Z0,surfM,LAYER_HEIGHT,LAYER_RHO, \
                            LAYER_T,LAYER_LWC,LAYER_CC,LAYER_POROSITY,LAYER_ICE_FRACTION,LAYER_IRREDUCIBLE_WATER,LAYER_REFREEZE)

            IO.copy_local_restart_to_global(indY, indX, local_restart)

            # Write results to file
            IO.write_results_to_file()

            # Write restart data to file
            IO.write_restart_to_file()

            if stake_evaluation is True:
                # Store evaluation of stake measurements to dataframe
                stat = stat.rename('rmse')
                df_stat = pd.concat([df_stat, stat])

                for i in stake_names:
                    if (obs_type == 'mb'):
                        df_val[i] = df_eval.mb
                    if (obs_type == 'snowheight'):
                        df_val[i] = df_eval.snowheight

        # Measure time
        end_res = datetime.now() - start_res
        print(
            "\t Time required to do calculations: %4g minutes %2g seconds \n" %
            (end_res.total_seconds() // 60.0, end_res.total_seconds() % 60.0))

        if stake_evaluation is True:
            # Save the statistics and the mass balance simulations at the stakes to files
            df_stat.to_csv(os.path.join(data_path, 'output',
                                        'stake_statistics.csv'),
                           sep='\t',
                           float_format='%.2f')
            df_val.to_csv(os.path.join(data_path, 'output',
                                       'stake_simulations.csv'),
                          sep='\t',
                          float_format='%.2f')
Пример #19
0
    start = ox.get_nearest_node(graph, coords)
    nodes, _ = ox.graph_to_gdfs(graph)

    pivots = get_pivots(graph, nodes, start, dist_goal)
    # pivots = alt_pivots(graph, nodes, start, dist_goal)

    paths = []
    for piv in pivots:
        test = make_loop(graph, nodes, piv, start, dist_goal)
        if test is None:
            print("repeat pivots, skipping")
            continue
        paths.append(test)


#        ox.plot_graph_route(graph, test)
# print(len(test))

    return paths, nodes

pr = cProfile.Profile()
pr.enable()

loops, nodes = create_route((40.96238, -73.112820), 5)

pr.disable()

with open('profile_log1', 'w+') as f:
    ps = pstats.Stats(pr, stream=f)
    ps.strip_dirs().sort_stats('tottime').print_stats()
Пример #20
0
def enable() -> None:
    global _profile
    import cProfile  # pylint: disable=import-outside-toplevel
    _profile = cProfile.Profile()
    _profile.enable()
    console.verbose("Enabled profiling.\n")
Пример #21
0
 def __init__(self, name, args):
     self.name = name
     self.profile = profiler.Profile()
     self.args = args
Пример #22
0
def process_image(base,
                  date,
                  filtf,
                  vers,
                  outfn=None,
                  overwrite=False,
                  outmodel=False,
                  outdirc=None,
                  outdirm=None,
                  verbose=False,
                  resume=False,
                  bmask_deblend=False,
                  maskgal=False,
                  maskdiffuse=True,
                  contmask=False,
                  nproc=numpy.inf,
                  extnamelist=None,
                  plot=False,
                  profile=False,
                  miniter=4,
                  maxiter=10,
                  titer_thresh=2,
                  pixsz=9,
                  wcutoff=0.0,
                  nthreads=1,
                  inject=0,
                  injextnamelist=None,
                  injectfrac=0.1,
                  modsaveonly=False,
                  donefrommod=False,
                  noModsave=False):
    if profile:
        import cProfile
        import pstats
        from guppy import hpy
        hp = hpy()
        before = hp.heap()
        pr = cProfile.Profile()
        pr.enable()

    imfn, ivarfn, dqfn = decaps_filenames(base, date, filtf, vers)
    with fits.open(imfn) as hdulist:
        extnames = [hdu.name for hdu in hdulist]
    if 'PRIMARY' not in extnames:
        raise ValueError('No PRIMARY header in file')
    prihdr = fits.getheader(imfn, extname='PRIMARY')

    bstarfn = os.path.join(os.environ['DECAM_DIR'], 'data',
                           'tyc2brighttrim.fits')
    brightstars = fits.getdata(bstarfn)
    from astropy.coordinates.angle_utilities import angular_separation
    from astropy.coordinates import SkyCoord
    from astropy import units
    coordcen = SkyCoord(ra=prihdr['RA'],
                        dec=prihdr['DEC'],
                        unit=(units.hourangle, units.deg))
    sep = angular_separation(numpy.radians(brightstars['ra']),
                             numpy.radians(brightstars['dec']),
                             coordcen.ra.to(units.radian).value,
                             coordcen.dec.to(units.radian).value)
    sep = numpy.degrees(sep)
    m = sep < 3
    brightstars = brightstars[m]
    dmjd = prihdr['MJD-OBS'] - 51544.5  # J2000 MJD.
    cosd = numpy.cos(
        numpy.radians(numpy.clip(brightstars['dec'], -89.9999, 89.9999)))
    brightstars[
        'ra'] += dmjd * brightstars['pmra'] / 365.25 / cosd / 1000 / 60 / 60
    brightstars['dec'] += dmjd * brightstars['pmde'] / 365.25 / 1000 / 60 / 60
    filt = prihdr['filter']
    # cat filename handling
    if outfn is None or len(outfn) == 0:
        outfn = os.path.splitext(os.path.basename(imfn))[0]
        if outfn[-5:] == '.fits':
            outfn = outfn[:-5]
        outfn = outfn + '.cat.fits'
    if outdirc is not None:
        outfn = os.path.join(outdirc, outfn)
    if not resume or not os.path.exists(outfn):
        fits.writeto(outfn, None, prihdr, overwrite=overwrite)
        extnamesdone = None
    else:
        hdulist = fits.open(outfn)
        extnamesdone = []
        for hdu in hdulist:
            if hdu.name == 'PRIMARY':
                continue
            extfull = hdu.name.split('_')
            ext = "_".join(extfull[:-1])
            if extfull[-1] != 'CAT':
                continue
            extnamesdone.append(ext)
        hdulist.close()
    # model filename handling
    if outmodel:
        outmodelfn = os.path.splitext(os.path.basename(imfn))[0]
        if outmodelfn[-5:] == '.fits':
            outmodelfn = outmodelfn[:-5]
        outmodelfn = outmodelfn + '.mod.fits'
        if outdirm is not None:
            outmodelfn = os.path.join(outdirm, outmodelfn)
        if (not resume or not os.path.exists(outmodelfn)):
            fits.writeto(outmodelfn, None, prihdr, overwrite=overwrite)
        else:
            if donefrommod:
                hdulist = fits.open(outmodelfn)
                extnamesdone = []
                for hdu in hdulist:
                    if hdu.name == 'PRIMARY':
                        continue
                    ext, exttype = hdu.name.split('_')
                    if exttype != 'SKY':
                        continue
                    extnamesdone.append(ext)
                hdulist.close()
    # fwhm scrape all the ccds
    fwhms = []
    for name in extnames:
        if name == 'PRIMARY':
            continue
        hdr = fits.getheader(imfn, extname=name)
        if 'FWHM' in hdr:
            fwhms.append(hdr['FWHM'])
    fwhms = numpy.array(fwhms)
    fwhms = fwhms[fwhms > 0]

    # Prepare main CCD for loop
    if extnamelist is not None:
        if verbose:
            s = ("Only running CCD subset: [%s]" % ', '.join(extnamelist))
            print(s)

    if extnamesdone is not None:
        alreadydone = [n for n in extnames if n in extnamesdone]
        extnames = [n for n in extnames if n not in extnamesdone]
        if verbose:
            print('Skipping %s, extension %s; already done.' %
                  (imfn, ' '.join(alreadydone)))
    if extnamelist is not None:
        extnames = [n for n in extnames if n in extnamelist]
    extnames = [n for n in extnames if n != 'PRIMARY']
    if np.isfinite(nproc):
        extnames = extnames[:nproc]

    bigdict = dict(imfn=imfn,
                   ivarfn=ivarfn,
                   dqfn=dqfn,
                   maskdiffuse=maskdiffuse,
                   maskgal=maskgal,
                   verbose=verbose,
                   wcutoff=wcutoff,
                   contmask=contmask,
                   fwhms=fwhms,
                   filt=filt,
                   pixsz=pixsz,
                   brightstars=brightstars,
                   bmask_deblend=bmask_deblend,
                   plot=plot,
                   miniter=miniter,
                   maxiter=maxiter,
                   titer_thresh=titer_thresh,
                   expnum=prihdr['EXPNUM'],
                   outmodel=outmodel,
                   outfn=outfn,
                   outmodelfn=outmodelfn,
                   modsaveonly=modsaveonly,
                   noModsave=noModsave)

    run_fxn(bigdict, extnames, nthreads)

    ### This is the (optional) synthetic injection pipeline ###
    if inject != 0:
        import decam_inject
        imfnI, ivarfnI, dqfnI, injextnames = decam_inject.write_injFiles(
            imfn,
            ivarfn,
            dqfn,
            outfn,
            inject,
            injextnamelist,
            filt,
            pixsz,
            wcutoff,
            verbose,
            resume,
            date,
            overwrite,
            injectfrac=injectfrac)

        bigdict['imfn'] = imfnI
        bigdict['ivarfn'] = ivarfnI
        bigdict['dqfn'] = dqfnI

        run_fxn(bigdict, injextnames, nthreads)

    if profile:
        pr.disable()
        pstats.Stats(pr).sort_stats('cumulative').print_stats(60)
        after = hp.heap()
        leftover = after - before
        print(leftover)
Пример #23
0
 def __enter__(self):
     self.pr = cProfile.Profile()
     self.pr.enable()
Пример #24
0
 def __init__(self, file_path, sort_by='time', builtins=False):
     self._profiler = cProfile.Profile(builtins=builtins)
     self.file_path = file_path
     self.sort_by = sort_by
Пример #25
0
 def setUpClass(cls):
     DOM.use(DOM.STRING)
     cls.pr = cProfile.Profile()
     cls.pr.enable()
     print("\nstring")
Пример #26
0
max_memory = 0
for line in profiling_text:
    for part in line.split():
        x = line.split()
        if x[1] != '/usr/bin/python3':  # At the first line in x[1] is always the "/usr/bin/python3"
            if float(x[1]) > max_memory:
                max_memory = float(x[1])
#print ("************** Peak Memory (MB): ", max_memory, " **************")
# Delete the output txt file
os.system('rm memory_profiling.txt')
#print ("memory_profiling.txt Deleted Succesfully")

################# Time Profiling #################

print("...Time Profiling...")
profiler = cProfile.Profile()
profiler.enable()
exec(open(filename).read())
profiler.disable()
s = io.StringIO()

# Output the results at the CLI:
#stats = pstats.Stats(profiler).sort_stats('ncalls')
#stats.print_stats()

# Write the results in a txt file
with open('time_profiling.txt', 'w+') as f:
    ps = pstats.Stats(profiler, stream=f)
    ps.sort_stats('ncalls')
    ps.print_stats()
Пример #27
0
        save_checkpoint(epoch, val_acc[0], best_acc)
    if num_epochs > 1:
        print('Average epoch time: {}'.format(
            float(total_time) / (num_epochs - 1)))


def main():
    net.hybridize()
    train(opt, context)


if __name__ == '__main__':
    if opt.profile:
        # import hotshot, hotshot.stats
        # prof = hotshot.Profile('image-classifier-%s-%s.prof'%(opt.model, opt.mode))
        # prof.runcall(main)
        # prof.close()
        # stats = hotshot.stats.load('image-classifier-%s-%s.prof'%(opt.model, opt.mode))
        # stats.strip_dirs()
        # stats.sort_stats('cumtime', 'calls')
        # stats.print_stats()
        import cProfile
        prof = cProfile.Profile()
        prof.enable()
        prof.runcall(main)
        prof.disable()
        prof.dump_stats('train.prof')

    else:
        main()
from apps.publish.enqueue.enqueue_published import EnqueuePublishedService
from apps.publish.published_item import PUBLISH_STATE, QUEUE_STATE, PUBLISHED, ERROR_MESSAGE
from bson.objectid import ObjectId
from eve.utils import config, ParsedRequest
from eve.versioning import resolve_document_version
from superdesk.celery_app import celery
from superdesk.utc import utcnow
from superdesk.profiling import ProfileManager
from apps.content import push_content_notification
from superdesk.errors import ConnectionTimeout
from celery.exceptions import SoftTimeLimitExceeded


logger = logging.getLogger(__name__)

profile = cProfile.Profile()

UPDATE_SCHEDULE_DEFAULT = {'seconds': 10}

ITEM_PUBLISH = 'publish'
ITEM_CORRECT = 'correct'
ITEM_KILL = 'kill'
ITEM_TAKEDOWN = 'takedown'

enqueue_services = {
    ITEM_PUBLISH: EnqueuePublishedService(),
    ITEM_CORRECT: EnqueueCorrectedService(),
    ITEM_KILL: EnqueueKilledService(),
    ITEM_TAKEDOWN: EnqueueKilledService(),
}
def main():
    """
    Python MTG Card Detector.
    Can be used also purely through the defined classes.
    """

    # Add command line parser
    parser = argparse.ArgumentParser(
        description='Recognize Magic: the Gathering cards from an image. ' +
        'Author: Timo Ikonen, timo.ikonen(at)iki.fi')

    parser.add_argument('input_path',
                        help='path containing the images to be analyzed')
    parser.add_argument('output_path', help='output path for the results')
    parser.add_argument('--phash',
                        default='alpha_reference_phash.dat',
                        help='pre-calculated phash reference file')
    parser.add_argument('--visual',
                        default=False,
                        action='store_true',
                        help='run with visualization')
    parser.add_argument('--verbose',
                        default=False,
                        action='store_true',
                        help='run in verbose mode')

    args = parser.parse_args()

    # Create the output path
    output_path = args.output_path.rstrip('/')
    if not os.path.exists(output_path):
        os.mkdir(output_path)

    # Instantiate the detector
    card_detector = MagicCardDetector(output_path)

    do_profile = False
    card_detector.visual = args.visual
    card_detector.verbose = args.verbose

    # Read the reference and test data sets
    # card_detector.read_and_adjust_reference_images(
    #     '../../MTG/Card_Images/LEA/')
    card_detector.read_prehashed_reference_data(args.phash)
    card_detector.read_and_adjust_test_images(args.input_path)

    if do_profile:
        # Start up the profiler.
        profiler = cProfile.Profile()
        profiler.enable()

    # Run the card detection and recognition.

    card_detector.run_recognition()

    if do_profile:
        # Stop profiling and organize and print profiling results.
        profiler.disable()
        profiler.dump_stats('magic_card_detector.prof')
        profiler_stream = io.StringIO()
        sortby = pstats.SortKey.CUMULATIVE
        profiler_stats = pstats.Stats(
            profiler, stream=profiler_stream).sort_stats(sortby)
        profiler_stats.print_stats(20)
        print(profiler_stream.getvalue())
Пример #30
0
        :param sig: signal
        :pram frame: frame
        :return: None
        """
        self.running = False


if len(sys.argv) > 1 and 'console' in sys.argv[1:]:
    # command line start
    if 'profile' in sys.argv[1:]:
        # start with profiling
        import cProfile
        import StringIO
        import pstats

        pr = cProfile.Profile(builtins=False)
        pr.enable()
        Main()
        pr.disable()
        s = StringIO.StringIO()
        sortby = 'cumulative'
        ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
        ps.print_stats()
        print s.getvalue()
    else:
        Main()
else:
    # Daemonize flowd aggregator
    daemon = Daemonize(app="flowd_aggregate", pid='/var/run/flowd_aggregate.pid', action=Main)
    daemon.start()