Beispiel #1
0
 def create_media(self):
     old_title = self.create.title
     data = self.create.get_dict()
     old_path = util.to_path('New', old_title)
     new_path = util.to_path(data['media_type'], data['title'])
     util.move(old_path, new_path)
     db.enter(data)
     self.create.hide()
Beispiel #2
0
    def _move_to_random_home_position(self):
        """ Moves to one of the home positions we have set up. 
        
        One change: I adjust the angles to be in line with what we have. The reason is
        that we'll keep referring back to this angle when we move, and I want it to be
        inside the actual ranges of yaw, pitch, and roll we're considering. Hence, the
        `better_rotation` that's here.
        """
        pose = self.home_pos_list[ np.random.randint(len(self.home_pos_list)) ]
        home_pos, home_rot = utils.lists_of_pos_rot_from_frame(pose)
        home_pos[2] += self.z_offset_home
        utils.move(arm=self.arm, pos=home_pos, rot=home_rot, SPEED_CLASS='Slow')

        better_rotation = [self._sample_yaw(), self._sample_pitch(), self._sample_roll()]
        utils.move(arm=self.arm, pos=home_pos, rot=better_rotation, SPEED_CLASS='Slow')
Beispiel #3
0
 def submit_edit(self):
     # edits database entry based on edit window data
     try:
         data = self.edit.get_dict()
         old_path = util.to_path(
             db.get_dict(self.edit.title)['media_type'], self.edit.title)
         new_path = util.to_path(data['media_type'], data['title'])
         if old_path != new_path:
             util.move(old_path,
                       new_path)  # move and/or rename folder if needed
         db.update(self.edit.title, data)
         self.main.update(self.rows, db.get_dict(data['title']))
         self.edit.hide()
     except ValueError:
         # HIGHLIGHT INCORRECT FIELDS
         print('order and year need to be numbers')
Beispiel #4
0
def alphaBeta(board, currentDepth, maxDepth, emptySpaces, isXsTurn, alpha,
              beta):
    evaluation = evaluate(board)
    if currentDepth == maxDepth or not len(emptySpaces) or abs(
            evaluation) == abs(X_WIN):
        return evaluation, (), 0

    optimalMove = ()
    searches = 0
    optimalScore = -INF if isXsTurn else INF
    for space in emptySpaces:
        nextState, _ = move(board, space[0], space[1], X if isXsTurn else O)

        nextScore, _, subtreeSearches = alphaBeta(nextState, currentDepth + 1,
                                                  maxDepth,
                                                  findEmptySpaces(nextState),
                                                  not isXsTurn, alpha, beta)

        searches += 1 + subtreeSearches

        if isXsTurn and optimalScore < nextScore:
            optimalScore = nextScore
            optimalMove = space
            alpha = max(alpha, optimalScore)
        elif not isXsTurn and optimalScore > nextScore:
            optimalScore = nextScore
            optimalMove = space
            beta = min(beta, optimalScore)

        if beta <= alpha:
            break

    return optimalScore, optimalMove, searches
Beispiel #5
0
def minimax(board, currentDepth, maxDepth, emptySpaces, isXsTurn):
    evaluation = evaluate(board)
    if currentDepth == maxDepth or not len(emptySpaces) or abs(
            evaluation) == abs(X_WIN):
        return evaluation, (), 0

    optimalMove = ()
    searches = 0

    optimalScore = -INF if isXsTurn else INF
    for space in emptySpaces:
        nextState, _ = move(board, space[0], space[1], X if isXsTurn else O)

        nextScore, _, subtreeSearches = minimax(nextState, currentDepth + 1,
                                                maxDepth,
                                                findEmptySpaces(nextState),
                                                not isXsTurn)

        searches += 1 + subtreeSearches

        if isXsTurn and optimalScore < nextScore:
            optimalScore = nextScore
            optimalMove = space
        elif not isXsTurn and optimalScore > nextScore:
            optimalScore = nextScore
            optimalMove = space
    """ Debug string:
        print(
            "Depth: {}, Empty Spaces: {}, X's turn: {}, Score: {}, Move: {}, Searches: {}"
            .format(currentDepth, len(emptySpaces), isXsTurn, optimalScore, optimalMove, searches)
        )
    """

    return optimalScore, optimalMove, searches
def inherent_noise(arm):
    """
    NEW! September 7, 2017, use this to argue for why there's inherent random noise.
    """
    yaw = 0
    pitch, roll = utils.get_interpolated_pitch_and_roll(yaw)
    rotation = [yaw, pitch, roll]

    targ1 = [-0.01, 0.06, -0.13]
    targ2 = [-0.02, 0.07, -0.14]
    positions1 = []
    positions2 = []

    for i in range(20):
        utils.move(arm, targ1, rotation, 'Fast')
        pos, rot = utils.lists_of_pos_rot_from_frame(
                arm.get_current_cartesian_position()         
        )
        print("{}".format(pos))
        positions1.append(pos)
        time.sleep(2)

        utils.move(arm, targ2, rotation, 'Fast')
        pos, rot = utils.lists_of_pos_rot_from_frame(
                arm.get_current_cartesian_position()         
        )
        print("\t{}".format(pos))
        positions2.append(pos)
        time.sleep(2)

    positions1 = np.array(positions1)
    positions2 = np.array(positions2)

    print("\n\tPositions 1:")
    print(positions1)
    print("mean: {}".format(np.mean(positions1, axis=0)))
    print("std:  {}".format(np.std(positions1, axis=0)))
    print("min:  {}".format(np.min(positions1, axis=0)))
    print("max:  {}".format(np.max(positions1, axis=0)))

    print("\n\tPositions 2:")
    print(positions2)
    print("mean: {}".format(np.mean(positions2, axis=0)))
    print("std:  {}".format(np.std(positions2, axis=0)))
    print("min:  {}".format(np.min(positions2, axis=0)))
    print("max:  {}".format(np.max(positions2, axis=0)))
def playerVersusMinimax(save):
    print("Tic Tac Toe")
    print("Players: 1")

    board = 0
    boards = []
    win = UNFINISHED
    while (win == UNFINISHED):
        valid = False
        while not valid:
            print()
            printBoard(board)
            print()
            print("Column:")
            col = input()
            print("Row:")
            row = input()

            if len(col) == 0 or len(row) == 0:
                break

            board, valid = move(board, int(col), int(row), X)
            if not valid:
                print("That space is taken")

        _, bestMove, _ = minimax(board, 0, 2, findEmptySpaces(board), False)

        boards.append(str(board) + "\n")

        board, valid = move(board, bestMove[0], bestMove[1], O)

        boards.append(str(board) + "\n")

        win = checkForWin(board)

    if save:
        writeGame(boards)

    print()
    printBoard(board)
    print()

    print("**************")
    print(win + " wins!")
    print("**************")
Beispiel #8
0
 def set_batch_edit(self):
     # sets database fields to that value if it's a foreignkeyfield
     # adds values to database fields if it's a manytomanyfield
     data = self.batch_edit.get_dict()
     for row in self.rows:
         if 'media_type' in data.keys(
         ):  # determine if we need to move the files
             if data['media_type'] and data[
                     'media_type'] != '':  # make sure data['media_type'] isn't blank
                 title = self.main.get_media_title(row)
                 old_path = util.to_path(
                     db.get_dict(title)['media_type'], title)
                 new_path = util.to_path(data['media_type'], title)
                 util.move(old_path, new_path)  # move folder if needed
         db.update_set(self.main.get_media_title(row), data)
         self.main.update([row],
                          db.get_dict(self.main.get_media_title(row)))
     self.batch_edit.hide()
Beispiel #9
0
    def handle_new(self, new: dict, startup: bool):
        # after scanning the directory, if there's any folders that don't appear in our database,
        # ask the user if they want to delete those folders or create a new database entry
        pop_up = QtWidgets.QMessageBox(
        )  # we want a dialog box regardless of what happens
        pop_up.setWindowTitle('New Files')

        if not new:  # if there aren't any new files
            if not startup:  # don't show the pop up if this is happening at startup
                pop_up.setText('Did not find any new files.')
                pop_up.setStandardButtons(QtWidgets.QMessageBox.Ok)
                pop_up.exec_()
            return

        # if there are new files
        # create the display text
        text = ''
        for media_type in new:
            for media in new[media_type]:
                text += 'In ' + media_type + ': "' + media + '"\n'

        pop_up.setText(
            'New files were found! Check "Show Details" below to findout which ones.\nWould you like to create new database entries for them?'
        )
        pop_up.setDetailedText(text)
        pop_up.setStandardButtons(QtWidgets.QMessageBox.Yes
                                  | QtWidgets.QMessageBox.No)
        pop_up.exec_()

        if pop_up.clickedButton().text() == '&Yes':
            for media_type in new:
                for media_name in new[media_type]:
                    if media_type != 'New':
                        old_path = util.to_path(media_type, media_name)
                        new_path = util.to_path('New', media_name)
                        util.move(
                            old_path, new_path
                        )  # move them from where we found it to the new folder
                    self.create.show(media_name)
                    self.create.hide()
            self.main.refresh_table()
def playerVersusRandom(save):
    print("Tic Tac Toe")
    print("Players: 1")

    board = 0
    boards = []
    win = UNFINISHED
    while (win == UNFINISHED):
        valid = False
        while not valid:
            print()
            printBoard(board)
            print()
            print("Column:")
            col = input()
            print("Row:")
            row = input()

            if len(col) == 0 or len(row) == 0:
                break

            board, valid = move(board, int(col), int(row), X)
            if not valid:
                print("That space is taken")

        boards.append(str(board) + "\n")

        board = randomMove(board, O)

        boards.append(str(board) + "\n")

        win = checkForWin(board)

    if save:
        writeGame(boards)

    print()
    printBoard(board)
    print()

    print("**************")
    print(win + " wins!")
    print("**************")
def computerVsComputer(gameTypes, save):
    print("Tic Tac Toe")
    print("{} versus {}".format(selectDescription[gameTypes[0]],
                                selectDescription[gameTypes[1]]))
    print("Players: 0")

    board = 0
    boards = []
    win = UNFINISHED
    currentTurn = (X, True, gameTypes[0])
    nextTurn = (O, False, gameTypes[1])
    boards.append(str(board) + "\n")
    while (win == UNFINISHED):
        startTime = time()
        bestMove, statistic = selectAlgorithm[currentTurn[2]](board,
                                                              currentTurn[1])
        duration = time() - startTime
        board, _ = move(board, bestMove[0], bestMove[1], currentTurn[0])

        boards.append(str(board) + "\n")

        print()
        printBoard(board)
        if statistic != str():
            print(statistic)
        print("Time: {} seconds".format(round(duration, 4)))
        print()

        win = checkForWin(board)
        currentTurn, nextTurn = nextTurn, currentTurn

    if save:
        writeGame(boards)

    print()
    print("**************")
    print(win + " wins!")
    print("**************")
def proof_of_concept_part_one(left_im, right_im, left_contours, right_contours, arm):
    """ 
    See my notes. I'm effectively trying to argue why my DL way with automatic trajectory
    collection will work. Maybe this will work as a screenshot or picture sequence in an
    eventual paper? I hope ...
    """
    center_left  = get_single_contour_center(left_im,  left_contours)
    center_right = get_single_contour_center(right_im, right_contours)
    cv2.destroyAllWindows()
    camera_pt = utils.camera_pixels_to_camera_coords(center_left, center_right)
    print("(left, right) = ({}, {})".format(center_left, center_right))
    print("(cx,cy,cz) = ({:.4f}, {:.4f}, {:.4f})".format(*camera_pt))

    # Get rotation set to -90 to start.
    pos, rot = utils.lists_of_pos_rot_from_frame(arm.get_current_cartesian_position())
    utils.move(arm, pos, [-90, 10, -170], 'Slow') 

    # Now do some manual movement.
    arm.open_gripper(90)
    utils.call_wait_key( cv2.imshow("(left image) move to point keeping angle roughly -90)", left_im) )
    pos, rot = utils.lists_of_pos_rot_from_frame(arm.get_current_cartesian_position())
    utils.move(arm, pos, [-90, 10, -170], 'Slow')  # Because we probably adjusted angle by mistake.
    pos2, rot2 = utils.lists_of_pos_rot_from_frame(arm.get_current_cartesian_position())

    # At this point, pos, rot should be the -90 degree angle version which can grip this.
    print("pos,rot -90 yaw:\n({:.4f}, {:.4f}, {:.4f}), ({:.4f}, {:.4f}, {:.4f})".format(
        pos2[0],pos2[1],pos2[2],rot2[0],rot2[1],rot2[2]))

    # Next, let's MANUALLY move to the reverse angle, +90. This SHOULD use a different position.
    # This takes some practice to figure out a good location. Also, the reason for manual movement
    # is that when I told the code to go to +90 yaw, it was going to +180 for some reason ...
    utils.call_wait_key( cv2.imshow("(left image) move to +90 where it can grip seed)", left_im) )
    pos3, rot3 = utils.lists_of_pos_rot_from_frame(arm.get_current_cartesian_position())
    print("pos,rot after manual +90 yaw change:\n({:.4f}, {:.4f}, {:.4f}), ({:.4f}, {:.4f}, {:.4f})".format(pos3[0],pos3[1],pos3[2],rot3[0],rot3[1],rot3[2]))

    # Automatic correction in case we made slight error.
    utils.move(arm, pos3, [90, 0, -165], 'Slow') 
    pos4, rot4 = utils.lists_of_pos_rot_from_frame(arm.get_current_cartesian_position())

    # Now pos, rot should be the +90 degree angle version which can grip this.
    print("pos,rot +90 yaw:\n({:.4f}, {:.4f}, {:.4f}), ({:.4f}, {:.4f}, {:.4f})".format(
        pos4[0],pos4[1],pos4[2],rot4[0],rot4[1],rot4[2]))
def motion_planning(contours_by_size, img, arm, rotations):
    """ The open loop.
    
    Parameters
    ----------
    contours_by_size: [list]
        A list of contours, arranged from largest area to smallest area.
    img: [np.array]
        Image the camera sees, in BGR form (not RGB).
    arm: [dvrk arm]
        Represents the arm we're using for the DVRK.
    rotations:
    """
    print("Identified {} contours but will keep top {}.".format(len(contours_by_size), TOPK_CONTOURS))
    img_for_drawing = img.copy()
    contours = list(contours_by_size)
    cv2.drawContours(img_for_drawing, contours, -1, (0,255,0), 3)
    places_to_visit = []

    # Iterate and find centers. We'll make the robot move to these centers in a sequence.
    # Note that duplicate contours should be detected beforehand.
    for i,cnt in enumerate(contours):
        M = cv2.moments(cnt)
        if M["m00"] == 0: continue
        cX = int(M["m10"] / M["m00"])
        cY = int(M["m01"] / M["m00"])
        places_to_visit.append((cX,cY))

    # Collect only top K places to visit and insert ordering preferences. I do right to left.
    places_to_visit = places_to_visit[:TOPK_CONTOURS]
    places_to_visit = sorted(places_to_visit, key=lambda x:x[0], reverse=True)

    # Number the places to visit in an image so I see them.
    for i,(cX,cY) in enumerate(places_to_visit):
        cv2.putText(img=img_for_drawing, text=str(i), org=(cX,cY), 
                    fontFace=cv2.FONT_HERSHEY_SIMPLEX, 
                    fontScale=1, color=(0,0,0), thickness=2)
        cv2.circle(img=img_for_drawing, center=(cX,cY), radius=3, color=(0,0,255), thickness=-1)
        print(i,cX,cY)

    # Show image with contours + exact centers. Exit if it's not looking good. 
    # If good, SAVE IT!!! This protects me in case of poor perception causing problems.
    trial = len([fname for fname in os.listdir(IMAGE_DIR) if '.png' in fname])
    cv2.imshow("Top-K contours (exit if not looking good), trial index is {}".format(trial), img_for_drawing)
    utils.call_wait_key()
    cv2.imwrite(IMAGE_DIR+"/im_"+str(trial).zfill(3)+".png", img_for_drawing)
    cv2.destroyAllWindows()

    # Given points in `places_to_visit`, must figure out the robot points for each.
    robot_points_to_visit = []
    print("\nHere's where we'll be visiting (left_pixels, robot_point):")
    for left_pixels in places_to_visit:
        robot_points_to_visit.append(
                utils.left_pixel_to_robot_prediction(
                        left_pt=left_pixels,
                        params=PARAMETERS,
                        better_rf=RF_REGRESSOR,
                        ARM1_XOFFSET=ARM1_XOFFSET,
                        ARM1_YOFFSET=ARM1_YOFFSET,
                        ARM1_ZOFFSET=ARM1_ZOFFSET
                )
        )
        print("({}, {})\n".format(left_pixels, robot_points_to_visit[-1]))

    # With robot points, tell it to _move_ to these points. Apply vertical offsets here, FYI.
    # Using the `linear_interpolation` is slower and jerkier than just moving directly.
    arm.open_gripper(degree=90, time_sleep=2)

    for (robot_pt, rot) in zip(robot_points_to_visit, rotations):
        # Go to the point. Note: switch rotation **first**?
        pos = [robot_pt[0], robot_pt[1], robot_pt[2]+ZOFFSET_SAFETY]
        utils.move(arm, HOME_POS, rot, SPEED_CLASS) 
        utils.move(arm, pos,      rot, SPEED_CLASS)

        # Lower, close, and raise the end-effector.
        pos[2] -= ZOFFSET_SAFETY
        utils.move(arm, pos, rot, SPEED_CLASS)
        arm.open_gripper(degree=CLOSE_ANGLE, time_sleep=2) # Need >=2 seconds!
        pos[2] += ZOFFSET_SAFETY
        utils.move(arm, pos, rot, SPEED_CLASS)

        # Back to the home position.
        utils.move(arm, HOME_POS, rot, SPEED_CLASS)
        arm.open_gripper(degree=90, time_sleep=2) # Need >=2 seconds!
Beispiel #14
0
                predicted_pos = predicted_pos - residual_vec
                print("\tresidual vector from RF corrector: {}".format(
                    residual_vec))
                print("\trevised predicted_pos: {}".format(predicted_pos))

        # Use offsets.
        predicted_pos[2] = np.maximum(predicted_pos[2], -0.18)
        predicted_pos[2] = np.minimum(predicted_pos[2], -0.168)

        predicted_pos[0] += args.x_offset
        predicted_pos[1] += args.y_offset
        predicted_pos[2] += args.z_offset
        assert len(predicted_pos) == 3

        # Robot moves to that point and will likely be off. Let the camera refresh.
        utils.move(arm, predicted_pos, rotation, 'Slow')
        time.sleep(5)

        # Update image (left, though it doesn't matter) and put center coords. Blue=Before, Red=AfteR.
        updated_image_copy = (d.left_image).copy()
        cv2.circle(img=updated_image_copy,
                   center=(lx, ly),
                   radius=6,
                   color=(255, 0, 0),
                   thickness=-1)
        cv2.putText(img=updated_image_copy,
                    text="{}".format((lx, ly)),
                    org=(lx, ly),
                    fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=1,
                    color=(255, 0, 0),
def calibrateImage(contours, img, arm1, outfile):
    """ Perform camera calibration using both images.
    
    This code saves the camera pixels (cX,cY) and the robot coordinates 
    (the (pos,rot) for ONE arm) all in one pickle file. Then, not in
    this code, we run regression to get our desired mapping from pixel 
    space to robot space. Whew. It's manual, but worth it. I put numbers
    to indicate how many we've saved. DO ONE SAVE PER CONTOUR so that I 
    can get a correspondence with left and right images after arranging
    pixels in the correct ordering (though I don't think I have to do that).
    """
    utils.move(arm1, HOME_POS, ROTATION, 'Fast')
    arm1.close_gripper()
    print("(after calling `home`) psm1 current position: {}".format(
        arm1.get_current_cartesian_position()))
    print("len(contours): {}".format(len(contours)))
    num_saved = 0

    for i, (cX, cY, approx, peri) in enumerate(contours):
        if utils.filter_point(cX, cY, 500, 1500, 75, 1000):
            continue
        image = img.copy()

        # Deal with the image and get a visual. Keep clicking ESC key until we see a circle.
        cv2.circle(image, (cX, cY), 50, (0, 0, 255))
        cv2.drawContours(image, [approx], -1, (0, 255, 0), 3)
        cv2.putText(img=image,
                    text=str(num_saved),
                    org=(cX, cY),
                    fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=1,
                    color=(0, 0, 0),
                    thickness=2)
        cv2.imshow("Contour w/{} saved so far out of {}.".format(num_saved, i),
                   image)
        key1 = cv2.waitKey(0)

        if key1 not in utils.ESC_KEYS:
            # We have a circle. Move arm to target. The rotation is off, but we command it to rotate.
            frame = arm1.get_current_cartesian_position()
            utils.move(arm=arm1,
                       pos=frame.position[:3],
                       rot=ROTATION,
                       SPEED_CLASS='Slow')

            # Now the human re-positions it to the center.
            cv2.imshow(
                "Here's where we are after generic movement + rotation. Now correct it!",
                image)
            key2 = cv2.waitKey(0)

            # Get position and orientation of the arm, save, & reset.
            pos, rot = utils.lists_of_pos_rot_from_frame(
                arm1.get_current_cartesian_position())
            a1 = (pos, rot, cX, cY)
            print("contour {}, a1={}".format(i, a1))
        else:
            print("(not storing contour {} on the left)".format(i))
        utils.move(arm1, HOME_POS, ROTATION, 'Fast')
        arm1.close_gripper()

        # Only store this contour if both keys were not escape keys.
        if key1 not in utils.ESC_KEYS:
            utils.storeData(outfile, a1)
            num_saved += 1
        cv2.destroyAllWindows()
Beispiel #16
0
    """ 
    When running this for real, be sure to feed (i.e. `tee`) the output into a file 
    for my own future reference. Again, as I said earlier, we're in two cases, depending
    on whether a `guidelines.p` file already exists.

    For the second case, there's a bunch of arguments, but no argparse since this isn't
    being run many times in simulation. By the way, `z_offset` is for generic offsets to 
    avoid damaging the paper, but `z_offset_home` is only for the four `home` positions.
    """
    arm, _, d = utils.initializeRobots()
    arm.close_gripper()
    directory = 'traj_collector/guidelines.p'

    if not os.path.isfile(directory):
        print("We're going to start the first step, to collect guidelines.")
        utils.move(arm=arm, pos=[0.0,0.06,-0.13], rot=[0,-10,-170], SPEED_CLASS='Slow')
        collect_guidelines(arm, d, directory)
    else:
        print("Guidelines exist. Now let's proceed to the automatic trajectory collector.")
        args = {}
        args['d'] = d
        args['arm'] = arm
        args['guidelines_dir'] = directory
        args['z_offset'] = 0.001
        args['z_offset_home'] = 0.005
        args['interpolation_interval'] = 20
        args['require_negative_roll'] = True

        # THESE ARE THE MAIN VALUES TO CHANGE!!
        args['num_trajs'] = 25
        args['rots_per_stoppage'] = 3
Beispiel #17
0
    def collect_trajectories(self):
        """ Runs the robot and collects `self.num_trajs` trajectories. 
        
        1. Choose a random location of (x,y,z) that is within legal and safe range.
        2. Command the arm to move there, but stop it periodically along its trajectory.
        3. For each point in its trajectory, we'll randomly make it rotate to certain spots.
        4. Keep collecting and saving left AND right camera images of this process.
        5. Each trajectory is saved in its own sub-directory of images.

        We're going to save the normal camera views and the thresholded ones. The normal views
        are solely for debugging and reassurance, though we might do some systematic checking.

        I put in `time.sleep(...)` commands since there's a delay when camera images are updated.
        """
        traj_dirs = [x for x in os.listdir('traj_collector') if 'traj' in x]
        traj_index = len(traj_dirs)
        print("\nNow collecting trajectories. Starting index: {}\n".format(traj_index))
        
        # Note that `traj_index` is what I use for tracking in case I want to resume without
        # destroying existing trajectory information.
        for _ in range(self.num_trajs):

            # Get directories/stats set up, and move to a random home position w/correct angles.
            this_dir = 'traj_collector/traj_'+str(traj_index).zfill(4)+'/'
            os.makedirs(this_dir)
            os.makedirs(this_dir+'left/')
            os.makedirs(this_dir+'right/')
            os.makedirs(this_dir+'left_th')
            os.makedirs(this_dir+'right_th/')
            intervals_in_traj = 0
            traj_poses = []
            self._move_to_random_home_position()
            time.sleep(3)

            # Pick a safe target position. 
            xx = np.random.uniform(low=self.info['min_x'], high=self.info['max_x'])
            yy = np.random.uniform(low=self.info['min_y'], high=self.info['max_y'])
            zz = self._get_z_from_xy_values(xx, yy)
            target_position = [xx, yy, zz + self.z_offset] 
            print("\n\nTrajectory {}, target position: {}".format(traj_index, target_position))

            # ------------------------------------------------------------------
            # Follows the `linear_interpolation` movement code to take incremental sets.
            interval   = 0.0001
            start_vect = self.arm.get_current_cartesian_position()

            # Get a list with the yaw, pitch, and roll from the _starting_ position.
            _, current_rot = utils.lists_of_pos_rot_from_frame(start_vect)

            # If calling movement code, `end_vect` would be the input "tfx frame."
            end_vect     = tfx.pose(target_position, tfx.tb_angles(*current_rot))
            displacement = np.array(end_vect.position - start_vect.position)    

            # Total interval present between start and end poses
            tinterval = max(int(np.linalg.norm(displacement)/ interval), 50)    
            print("Number of intervals: {}".format(tinterval))

            for ii in range(0, tinterval, self.interpolation_interval):
                # SLERP interpolation from tfx function (from `dvrk/robot.py`).
                mid_pose = start_vect.interpolate(end_vect, (ii+1.0)/tinterval)   
                arm.move_cartesian_frame(mid_pose, interpolate=True)

                # --------------------------------------------------------------
                # Back to my stuff. Note that `frame` = `mid_pose` (in theory, at least).
                time.sleep(3)
                print("\ninterval {} of {}, mid_pose: {}".format(ii+1, tinterval, mid_pose))
                frame = self.arm.get_current_cartesian_position() # The _actual_ pose.
                this_pos, this_rot = utils.lists_of_pos_rot_from_frame(frame)
                old_yaw = this_rot[0]

                # After moving there (keeping rotation fixed) we record information.
                num = str(intervals_in_traj).zfill(3) # Increments by 1 each non-rotation movement.
                l_center, r_center = self._save_images(this_dir, num, '0')
                traj_poses.append( (frame,l_center,r_center) )
           
                for rr in range(1, self.rots_per_stoppage+1):
                    # Pick a random rotation and move there. Update `old_yaw` as well.
                    random_rotation = [self._sample_yaw(old_yaw), 
                                       self._sample_pitch(), 
                                       self._sample_roll()]
                    old_yaw = random_rotation[0]
                    utils.move(arm=self.arm, pos=this_pos, rot=random_rotation, SPEED_CLASS='Slow')
                    time.sleep(3)
                    frame = self.arm.get_current_cartesian_position()
                    random_rotation_str = ["%.2f" % v for v in random_rotation]
                    print("    rot {}, target rot:  {}".format(rr, random_rotation_str))
                    print("    rot {}, actual pose: {}".format(rr, frame))

                    # Record information.
                    l_center, r_center = self._save_images(this_dir, num, str(rr))
                    traj_poses.append( (frame,l_center,r_center) )

                # Back to the original rotation; I think this will make it work better.
                utils.move(arm=self.arm, pos=this_pos, rot=this_rot, SPEED_CLASS='Slow')
                intervals_in_traj += 1

            # Finished with this trajectory!
            print("Finished with trajectory. len(traj_poses): {}".format(len(traj_poses)))
            pickle.dump(traj_poses, open(this_dir+'traj_poses_list.p', 'w'))
            traj_index += 1
Beispiel #18
0
# Double check these as needed. Here, the version numbers don't change that much.
# Actually, the version numbers should be the same here. Just double check please.
OUTVERSION = '01'  # for _storing_ stuff
VERSION = '01'  # for _loading_ stuff, changes with tool (+ yaw angle...)!!

# Other stuff.
OUTPUT_FILE = 'config/calibration_results/data_for_rf_v' + OUTVERSION + '.p'
ROTATION = utils.get_rotation_from_version(VERSION)
MAX_NUM_ADD = 36

if __name__ == "__main__":
    print("Rotation: {}".format(ROTATION))
    arm, _, d = utils.initializeRobots()
    print("current arm position: {}".format(
        arm.get_current_cartesian_position()))
    utils.move(arm, utils.HOME_POS, ROTATION, 'Slow')
    arm.close_gripper()
    print("current arm position: {}".format(
        arm.get_current_cartesian_position()))

    # I will be IGNORING the random forest here, because it doesn't really work.
    params = pickle.load(
        open('config/mapping_results/params_matrices_v' + VERSION + '.p', 'r'))

    # Use the d.left_image for calibration.
    cv2.imwrite("images/left_image.jpg", d.left_image)
    image_original = cv2.imread("images/left_image.jpg")
    num_added = 0

    # Iterate through valid contours from the _left_ camera (we'll simulate right camera).
    for i, (cX, cY, approx, peri) in enumerate(d.left_contours):
Beispiel #19
0
import pickle
import sys
import tfx
import utilities as utils
np.set_printoptions(suppress=True)

if __name__ == "__main__":
    """ See the top of the file for program-wide arguments. """
    arm, _, d = utils.initializeRobots(sleep_time=2)
    arm.open_gripper(90)
    #print(arm.get_current_cartesian_position())
    #utils.home(arm)
    #arm.close_gripper()

    # Image, showing the human can push and modify things. Just put it at a specific
    # angle and let the human move it.

    # Next image, let's get five rotations and yaws visualized. Just do one by one
    # since I need pictures ... Remember, it's -90, -45, 0, 45, and 90 that I used.
    yaw = 90
    pitch, roll = utils.get_interpolated_pitch_and_roll(yaw)
    print("yaw: {}\npitch: {}\nroll: {}".format(yaw, pitch, roll))
    current_pos, current_rot = utils.lists_of_pos_rot_from_frame(
        arm.get_current_cartesian_position())
    arm.open_gripper(90)
    utils.move(arm, current_pos, [yaw, pitch, roll], 'Fast')

    new_pos, new_rot = utils.lists_of_pos_rot_from_frame(
        arm.get_current_cartesian_position())
    print("New rotation: {}".format(new_rot))