def __init__(self, world):
        self.world = world
        self.current_bin = "A"
        self.bin_state = {
            "A": {"done": False, "contents": []},
            "B": {"done": False, "contents": []},
            "C": {"done": False, "contents": []},
            "D": {"done": False, "contents": []},
            "E": {"done": False, "contents": []},
            "F": {"done": False, "contents": []},
            "G": {"done": False, "contents": []},
            "H": {"done": False, "contents": []},
            "I": {"done": False, "contents": []},
            "J": {"done": False, "contents": []},
            "K": {"done": False, "contents": []},
            "L": {"done": False, "contents": []},
        }
        self.robotModel = world.robot(0)
        self.state = INITIAL_STATE
        self.config = self.robotModel.getConfig()
        self.left_arm_links = [self.robotModel.link(i) for i in LEFT_ARM_LINK_NAMES]
        self.right_arm_links = [self.robotModel.link(i) for i in RIGHT_ARM_LINK_NAMES]
        id_to_index = dict([(self.robotModel.link(i).getID(), i) for i in range(self.robotModel.numLinks())])
        self.left_arm_indices = [id_to_index[i.getID()] for i in self.left_arm_links]
        self.right_arm_indices = [id_to_index[i.getID()] for i in self.right_arm_links]

        self.Tcamera = se3.identity()
        self.Tvacuum = se3.identity()
        self.calibratedCameraXform = RIGHT_F200_CAMERA_CALIBRATED_XFORM
        self.object_com = [0, 0, 0]
        self.points1 = []
        self.points2 = []

        self.vacuumPc = Geometry3D()
        self.vacuumPc.loadFile(VACUUM_PCD_FILE)

        # Set up serial
        if REAL_VACUUM:
            self.serial = serial.Serial()
            self.serial.port = ARDUINO_SERIAL_PORT
            self.serial.baudrate = 9600
            self.serial.open()
            if self.serial.isOpen():
                self.serial.write("hello")
                response = self.serial.read(self.serial.inWaiting())
            self.turnOffVacuum()

        # Load JSON
        with open(PICK_JSON_PATH) as pick_json_file:
            raw_json_data = json.load(pick_json_file)
        for k in self.bin_state:
            self.bin_state[k]["contents"] = raw_json_data["bin_contents"]["bin_" + k]
        for my_dict in raw_json_data["work_order"]:
            bin_letter = my_dict["bin"][4]
            self.bin_state[bin_letter]["target"] = my_dict["item"]
        self.current_bin = planning.selectBin(self.bin_state)
    def __init__(self, world):
        self.world = world
        self.current_bin = 'A'
        self.bin_state = {'A': {'done': False, 'contents': []},'B': {'done': False, 'contents': []}, 'C': {'done': False, 'contents': []}, 'D': {'done': False, 'contents': []}, 'E': {'done': False, 'contents': []}, 'F': {'done': False, 'contents': []}, 'G': {'done': False, 'contents': []}, 'H': {'done': False, 'contents': []}, 'I': {'done': False, 'contents': []}, 'J': {'done': False, 'contents': []}, 'K': {'done': False, 'contents': []}, 'L': {'done': False, 'contents': []}}
        self.robotModel = world.robot(0)
        self.state = INITIAL_STATE
        self.config = self.robotModel.getConfig()
        self.left_arm_links = [self.robotModel.link(i) for i in LEFT_ARM_LINK_NAMES]
        self.right_arm_links = [self.robotModel.link(i) for i in RIGHT_ARM_LINK_NAMES]
        id_to_index = dict([(self.robotModel.link(i).getID(),i) for i in range(self.robotModel.numLinks())])
        self.left_arm_indices = [id_to_index[i.getID()] for i in self.left_arm_links]
        self.right_arm_indices = [id_to_index[i.getID()] for i in self.right_arm_links]

        self.Tcamera = se3.identity()
        self.Tvacuum = se3.identity()
        self.calibratedCameraXform = RIGHT_F200_CAMERA_CALIBRATED_XFORM
        self.object_com = [0, 0, 0]
        self.points1 = []
        self.points2 = []

        self.vacuumPc = Geometry3D()
        self.vacuumPc.loadFile(VACUUM_PCD_FILE)


	if REAL_SCALE:
		self.scale = scale.Scale();

        # Set up serial
        if REAL_VACUUM:
            self.serial = serial.Serial()
            self.serial.port = ARDUINO_SERIAL_PORT
            self.serial.baudrate = 9600
            self.serial.open()
            if self.serial.isOpen():
                self.serial.write("hello")
                response = self.serial.read(self.serial.inWaiting())
            self.turnOffVacuum()

        # Load JSON
        with open(PICK_JSON_PATH) as pick_json_file:
            raw_json_data = json.load(pick_json_file)
        for k in self.bin_state:
            self.bin_state[k]['contents'] = raw_json_data['bin_contents']['bin_'+k]
        for my_dict in raw_json_data['work_order']:
            bin_letter = my_dict['bin'][4]
            self.bin_state[bin_letter]['target'] = my_dict['item']
        self.current_bin = planning.selectBin(self.bin_state)
    def loop(self):
        try:
            while True:
                print OKBLUE + "Bin " + str(self.current_bin) + ": " + self.state + END_COLOR

                if self.state == "VISUAL_DEBUG":
                    self.object_com = [1.0839953170961105, -0.25145094946424207, 1.1241831909823194]
                    # self.set_model_right_arm( [0.1868786927065213, -1.567604604679142, 0.6922768776941961, 1.5862815343628953, -0.005567750307534711, -0.017979599494945674, 0.0035268645585939083])
                    self.load_real_robot_state()
                else:
                    self.load_real_robot_state()

                self.Tcamera = se3.mul(
                    self.robotModel.link("right_lower_forearm").getTransform(), self.calibratedCameraXform
                )
                self.Tvacuum = se3.mul(self.robotModel.link("right_wrist").getTransform(), VACUUM_POINT_XFORM)

                self.vacuumPc = Geometry3D()
                self.vacuumPc.loadFile(VACUUM_PCD_FILE)
                temp_xform = self.robotModel.link("right_wrist").getTransform()
                self.vacuumPc.transform(self.Tvacuum[0], self.Tvacuum[1])

                if self.state == "DEBUG_COLLISION_CHECKER":
                    temp_planner = planning.LimbPlanner(self.world, self.vacuumPc)
                    print "Is this configuration collision free?"
                    print temp_planner.check_collision_free("right")
                    sys.stdout.flush()

                elif self.state == "FAKE_PATH_PLANNING":
                    self.object_com = [1.114, -0.077, 1.412]
                    self.right_arm_ik(self.object_com)
                    self.Tcamera = se3.mul(
                        self.robotModel.link("right_lower_forearm").getTransform(), self.calibratedCameraXform
                    )
                    self.Tvacuum = se3.mul(self.robotModel.link("right_wrist").getTransform(), VACUUM_POINT_XFORM)
                    time.sleep(2342340)

                elif self.state == "START":
                    self.state = "MOVE_TO_SCAN_BIN"

                elif self.state == "MOVE_TO_SCAN_BIN":
                    for milestone in eval("Q_BEFORE_SCAN_" + self.current_bin):
                        print "Moving to " + str(milestone)
                        motion.robot.right_mq.appendLinear(MOVE_TIME, planning.cleanJointConfig(milestone))
                        while motion.robot.right_mq.moving():
                            time.sleep(1)
                        time.sleep(1)
                    motion.robot.right_mq.appendLinear(
                        MOVE_TIME, planning.cleanJointConfig(eval("Q_SCAN_BIN_" + self.current_bin))
                    )
                    self.state = "MOVING_TO_SCAN_BIN"

                elif self.state == "MOVING_TO_SCAN_BIN":
                    if not motion.robot.right_mq.moving():
                        self.wait_start_time = time.time()
                        self.state = "WAITING_TO_SCAN_BIN"

                elif self.state == "WAITING_TO_SCAN_BIN":
                    if time.time() - self.wait_start_time > SCAN_WAIT_TIME:
                        if REAL_PERCEPTION:
                            self.state = "SCANNING_BIN"
                        else:
                            self.state = "FAKE_SCANNING_BIN"

                elif self.state == "SCANNING_BIN":
                    print "Waiting for message from camera"
                    cloud = rospy.wait_for_message(ROS_DEPTH_TOPIC, PointCloud2)

                    if perception.isCloudValid(cloud):
                        cloud = perception.convertPc2ToNp(cloud)
                        np_cloud = cloud[::STEP]

                        self.points1 = []
                        for point in np_cloud:
                            transformed = se3.apply(self.Tcamera, point)
                            self.points1.append(transformed)

                        np_cloud = perception.subtractShelf(np_cloud, self.current_bin)

                        self.points2 = []
                        for point in np_cloud:
                            transformed = se3.apply(self.Tcamera, point)
                            self.points2.append(transformed)

                        self.object_com = se3.apply(self.Tcamera, perception.com(np_cloud))

                        if PRINT_BLOBS:
                            print np_cloud

                        sio.savemat(CLOUD_MAT_PATH, {"cloud": np_cloud})
                        fo = open(CHENYU_GO_PATH, "w")
                        fo.write("chenyugo")
                        fo.close()

                        self.object_com = se3.apply(self.Tcamera, perception.com(np_cloud))

                        if CALIBRATE_CAMERA:
                            self.state = "CALIBRATE_CAMERA"
                        elif SEGMENT:
                            self.state = "WAITING_FOR_SEGMENTATION"
                        else:
                            self.state = "MOVE_TO_GRASP_OBJECT"
                    else:
                        print FAIL_COLOR + "Got an invalid cloud, trying again" + END_COLOR

                elif self.state == "FAKE_SCANNING_BIN":
                    self.object_com = [1.1114839836097854, -0.6087936130127559, 0.9899267043340634]

                    if DRY_RUN:
                        raw_input("Hit enter to continue: ")

                    self.state = "MOVE_TO_GRASP_OBJECT"

                elif self.state == "CALIBRATE_CAMERA":
                    self.calibrateCamera()
                    self.state = "SCANNING_BIN"

                elif self.state == "WAITING_FOR_SEGMENTATION":
                    if os.path.isfile(CHENYU_DONE_PATH):
                        os.remove(CHENYU_DONE_PATH)
                        self.state = "POSTPROCESS_SEGMENTATION"

                elif self.state == "POSTPROCESS_SEGMENTATION":
                    object_blobs = []
                    time.sleep(5)
                    for i in range(1, 20):
                        seg_file_path = MAT_PATH + "seg" + str(i) + ".mat"
                        print seg_file_path
                        if os.path.isfile(seg_file_path):
                            print seg_file_path + " exists"
                            mat_contents = sio.loadmat(seg_file_path)
                            r = mat_contents["r"]
                            r = r[r[:, 1] != 0, :]
                            object_blobs.append(r)
                            os.remove(seg_file_path)
                    if PRINT_BLOBS:
                        print "============="
                        print "object blobs"
                        print object_blobs
                        print "============="

                    if len(object_blobs) == 0:
                        self.state = "BIN_DONE"
                    else:
                        object_list = [
                            ITEM_NUMBERS[item_str] for item_str in self.bin_state[self.current_bin]["contents"]
                        ]
                        target = ITEM_NUMBERS[self.bin_state[self.current_bin]["target"]]

                        histogram_dict = perception.loadHistogram(object_list)
                        cloud_label = {}  # key is the label of object, value is cloud points
                        label_score = {}  # key is the label, value is the current score for the object
                        for object_cloud in object_blobs:
                            if PRINT_BLOBS:
                                print "====================="
                                print "object_cloud"
                                print object_cloud
                                print "====================="
                            object_cloud = perception.resample(cloud, object_cloud, 3)
                            label, score = perception.objectMatch(object_cloud, histogram_dict)
                            if label in cloud_label:
                                if label_score[label] < score:
                                    label_score[label] = score
                                    cloud_label[label] = object_cloud
                            else:
                                cloud_label[label] = object_cloud
                                label_score[label] = score

                        if PRINT_LABELS_AND_SCORES:
                            print cloud_label
                            print "============="
                            print label_score
                        if target in cloud_label:
                            self.object_com = se3.apply(self.Tcamera, perception.com(cloud_label[target]))

                            self.points1 = []
                            for point in cloud_label[target]:
                                transformed = se3.apply(self.Tcamera, point)
                                self.points1.append(transformed)

                            self.points2 = []
                        else:
                            cloud_score = {}
                            histogram_dict = perception.loadHistogram([target])
                            for object_cloud in object_blobs:
                                object_cloud = perception.resample(cloud, object_cloud, 3)
                                label, score = perception.objectMatch(object_cloud, histogram_dict)
                                cloud_score[score] = object_cloud
                            sorted_cloud = sorted(cloud_score.items(), key=operator.itemgetter(0), reverse=True)
                            score = sorted_cloud[0][0]
                            com = perception.com(sorted_cloud[0][1])
                            self.points1 = []
                            self.points2 = []
                            for point in sorted_cloud[0][1]:
                                transformed = se3.apply(self.Tcamera, point)
                                self.points1.append(transformed)
                            self.object_com = se3.apply(self.Tcamera, com)
                        self.state = "MOVE_TO_GRASP_OBJECT"

                elif self.state == "MOVE_TO_GRASP_OBJECT":
                    for milestone in eval("Q_AFTER_SCAN_" + self.current_bin):
                        print "Moving to " + str(milestone)
                        motion.robot.right_mq.appendLinear(MOVE_TIME, planning.cleanJointConfig(milestone))
                        while motion.robot.right_mq.moving():
                            time.sleep(1)
                        time.sleep(1)

                    motion.robot.right_mq.appendLinear(
                        MOVE_TIME, planning.cleanJointConfig(eval("Q_IK_SEED_" + self.current_bin))
                    )
                    while motion.robot.right_mq.moving():
                        time.sleep(1)
                    time.sleep(1)

                    self.load_real_robot_state()
                    self.Tvacuum = se3.mul(self.robotModel.link("right_wrist").getTransform(), VACUUM_POINT_XFORM)

                    print WARNING_COLOR + str(self.object_com) + END_COLOR
                    self.object_com = vectorops.add(self.object_com, COM_ADJUSTMENT)

                    current_vacuum_point = se3.apply(self.Tvacuum, [0, 0, 0])
                    milestone = vectorops.add(current_vacuum_point, self.object_com)
                    milestone = vectorops.div(milestone, 2)

                    if DRY_RUN:
                        self.state = "MOVING_TO_GRASP_OBJECT"
                    else:
                        if self.right_arm_ik(milestone):
                            destination = self.robotModel.getConfig()
                            self.q_milestone = [destination[v] for v in self.right_arm_indices]
                            print WARNING_COLOR + "IK config for " + str(milestone) + ": " + str(
                                self.q_milestone
                            ) + END_COLOR
                            motion.robot.right_mq.appendLinear(MOVE_TIME, planning.cleanJointConfig(self.q_milestone))
                        else:
                            print FAIL_COLOR + "Error: IK failed" + END_COLOR
                            sys.stdout.flush()
                            motion.robot.right_mq.appendLinear(
                                MOVE_TIME, planning.cleanJointConfig(eval("Q_IK_SEED_" + self.current_bin))
                            )
                            while motion.robot.right_mq.moving():
                                time.sleep(1)
                            time.sleep(1)

                        while motion.robot.right_mq.moving():
                            time.sleep(1)
                        time.sleep(1)

                        if self.right_arm_ik(self.object_com):
                            destination = self.robotModel.getConfig()
                            print WARNING_COLOR + "IK config for " + str(self.object_com) + ": " + str(
                                [destination[v] for v in self.right_arm_indices]
                            ) + END_COLOR
                            motion.robot.right_mq.appendLinear(
                                MOVE_TIME, planning.cleanJointConfig([destination[v] for v in self.right_arm_indices])
                            )
                            self.state = "MOVING_TO_GRASP_OBJECT"
                        else:
                            print FAIL_COLOR + "Error: IK failed" + END_COLOR
                            sys.stdout.flush()
                            motion.robot.right_mq.appendLinear(
                                MOVE_TIME, planning.cleanJointConfig(eval("Q_IK_SEED_" + self.current_bin))
                            )
                            while motion.robot.right_mq.moving():
                                time.sleep(1)
                            time.sleep(1)

                elif self.state == "MOVING_TO_GRASP_OBJECT":
                    if not motion.robot.right_mq.moving():
                        time.sleep(1)
                        self.state = "GRASP_OBJECT"

                elif self.state == "GRASP_OBJECT":
                    move_target = se3.apply(self.Tvacuum, [0, 0, 0])
                    move_target[2] = move_target[2] - GRASP_MOVE_DISTANCE - (MEAN_OBJ_HEIGHT / 2)
                    if self.right_arm_ik(move_target):
                        self.turnOnVacuum()
                        destination = self.robotModel.getConfig()
                        motion.robot.right_mq.appendLinear(
                            MOVE_TIME, planning.cleanJointConfig([destination[v] for v in self.right_arm_indices])
                        )
                    else:
                        print FAIL_COLOR + "Error: IK failed" + END_COLOR
                        sys.stdout.flush()
                        motion.robot.right_mq.appendLinear(
                            MOVE_TIME, planning.cleanJointConfig(eval("Q_IK_SEED_" + self.current_bin))
                        )
                        while motion.robot.right_mq.moving():
                            time.sleep(1)
                        time.sleep(1)
                    self.wait_start_time = time.time()
                    self.state = "WAITING_TO_GRASP_OBJECT"

                elif self.state == "WAITING_TO_GRASP_OBJECT":
                    if time.time() - self.wait_start_time > GRASP_WAIT_TIME:
                        self.state = "MOVE_UP_BEFORE_RETRACT"

                elif self.state == "MOVE_UP_BEFORE_RETRACT":
                    move_target = se3.apply(self.Tvacuum, [0, 0, 0])
                    move_target[2] = move_target[2] + GRASP_MOVE_DISTANCE + (MEAN_OBJ_HEIGHT / 2)
                    if self.right_arm_ik(move_target):
                        self.turnOnVacuum()
                        destination = self.robotModel.getConfig()
                        motion.robot.right_mq.appendLinear(
                            MOVE_TIME, planning.cleanJointConfig([destination[v] for v in self.right_arm_indices])
                        )
                    else:
                        print FAIL_COLOR + "Error: IK failed" + END_COLOR
                        sys.stdout.flush()
                        motion.robot.right_mq.appendLinear(
                            MOVE_TIME, planning.cleanJointConfig(eval("Q_IK_SEED_" + self.current_bin))
                        )
                        while motion.robot.right_mq.moving():
                            time.sleep(1)
                        time.sleep(1)
                    self.state = "MOVE_TO_STOW_OBJECT"

                elif self.state == "MOVE_TO_STOW_OBJECT":
                    motion.robot.right_mq.appendLinear(
                        MOVE_TIME, planning.cleanJointConfig(eval("Q_IK_SEED_" + self.current_bin))
                    )
                    while motion.robot.right_mq.moving():
                        time.sleep(1)
                    time.sleep(1)

                    if not DRY_RUN:
                        motion.robot.right_mq.appendLinear(MOVE_TIME, planning.cleanJointConfig(self.q_milestone))
                        while motion.robot.right_mq.moving():
                            time.sleep(1)
                        time.sleep(1)

                    for milestone in eval("Q_AFTER_GRASP_" + self.current_bin):
                        print "Moving to " + str(milestone)
                        motion.robot.right_mq.appendLinear(MOVE_TIME, planning.cleanJointConfig(milestone))
                        while motion.robot.right_mq.moving():
                            time.sleep(1)
                        time.sleep(1)
                    motion.robot.right_mq.appendLinear(MOVE_TIME, Q_STOW)
                    self.state = "MOVING_TO_STOW_OBJECT"

                elif self.state == "MOVING_TO_STOW_OBJECT":
                    if not motion.robot.right_mq.moving():
                        self.state = "STOWING_OBJECT"

                elif self.state == "STOWING_OBJECT":
                    self.turnOffVacuum()
                    self.wait_start_time = time.time()
                    self.state = "WAITING_FOR_SECURE_STOW"

                elif self.state == "WAITING_FOR_SECURE_STOW":
                    if time.time() - self.wait_start_time > GRASP_WAIT_TIME:
                        self.state = "BIN_DONE" if SELECT_REAL_BIN else "DONE"

                elif self.state == "BIN_DONE":
                    self.bin_state[self.current_bin]["done"] = True
                    self.current_bin = planning.selectBin(self.bin_state)
                    if self.current_bin is None:
                        self.state = "DONE"
                    else:
                        self.state = "START"

                elif self.state == "DONE":
                    print "actual vacuum point: ", se3.apply(self.Tvacuum, [0, 0, 0])

                else:
                    print FAIL_COLOR + "Unknown state" + END_COLOR

                time.sleep(1)
        except KeyboardInterrupt:
            motion.robot.stopMotion()
            sys.exit(0)