def load_settings_json(self, path_to_settings_file):
        """
        Load the settings stored in the JSON file

        """

        file = open(path_to_settings_file, 'r')
        json_text = file.read().strip()
        file.close()

        for (device_serial, device) in self._enabled_devices.items():
            # Get the active profile and load the json file which contains settings readable by the realsense
            device = device.pipeline_profile.get_device()
            advanced_mode = rs.rs400_advanced_mode(device)
            advanced_mode.load_json(json_text)
def get_debug_device(serial_no):
    ctx = rs.context()
    devices = ctx.query_devices()
    found_dev = False
    for dev in devices:
        if len(serial_no) == 0 or serial_no == dev.get_info(rs.camera_info.serial_number):
            found_dev = True
            break
    if not found_dev:
        print('No RealSense device found' + str('.' if len(serial_no) == 0 else ' with serial number: '+serial_no))
        return 0

    # set to advance mode:
    advanced = rs.rs400_advanced_mode(dev)
    if not advanced.is_enabled():
        advanced.toggle_advanced_mode(True)

    # print(a few basic information about the device)
    print('  Device PID: ',  dev.get_info(rs.camera_info.product_id))
    print('  Device name: ',  dev.get_info(rs.camera_info.name))
    print('  Serial number: ',  dev.get_info(rs.camera_info.serial_number))
    print('  Firmware version: ',  dev.get_info(rs.camera_info.firmware_version))
    debug = rs.debug_protocol(dev)
    return debug
DS5_product_ids = ["0AD1", "0AD2", "0AD3", "0AD4", "0AD5", "0AF6", "0AFE", "0AFF", "0B00", "0B01", "0B03", "0B07"]

def find_device_that_supports_advanced_mode() :
    ctx = rs.context()
    ds5_dev = rs.device()
    devices = ctx.query_devices();
    for dev in devices:
        if dev.supports(rs.camera_info.product_id) and str(dev.get_info(rs.camera_info.product_id)) in DS5_product_ids:
            if dev.supports(rs.camera_info.name):
                print("Found device that supports advanced mode:", dev.get_info(rs.camera_info.name))
            return dev
    raise Exception("No device that supports advanced mode was found")

try:
    dev = find_device_that_supports_advanced_mode()
    advnc_mode = rs.rs400_advanced_mode(dev)
    print("Advanced mode is", "enabled" if advnc_mode.is_enabled() else "disabled")

    # Loop until we successfully enable advanced mode
    while not advnc_mode.is_enabled():
        print("Trying to enable advanced mode...")
        advnc_mode.toggle_advanced_mode(True)
        # At this point the device will disconnect and re-connect.
        print("Sleeping for 5 seconds...")
        time.sleep(5)
        # The 'dev' object will become invalid and we need to initialize it again
        dev = find_device_that_supports_advanced_mode()
        advnc_mode = rs.rs400_advanced_mode(dev)
        print("Advanced mode is", "enabled" if advnc_mode.is_enabled() else "disabled")

    # Get each control's current value
Beispiel #4
0
    def find_device_that_supports_advanced_mode():
        ctx = rs.context()
        ds5_dev = rs.device()
        devices = ctx.query_devices()
        for dev in devices:
            if dev.supports(rs.camera_info.product_id) and str(
                    dev.get_info(
                        rs.camera_info.product_id)) in DS5_product_ids:
                if dev.supports(rs.camera_info.name):
                    print("Found device that supports advanced mode:",
                          dev.get_info(rs.camera_info.name))
                return dev
        raise Exception("No device that supports advanced mode was found")

    dev = find_device_that_supports_advanced_mode()
    advnc_mode = rs.rs400_advanced_mode(dev)
    print("Advanced mode is",
          "enabled" if advnc_mode.is_enabled() else "disabled")

    # Loop until we successfully enable advanced mode
    while not advnc_mode.is_enabled():
        print("Trying to enable advanced mode...")
        advnc_mode.toggle_advanced_mode(True)
        # At this point the device will disconnect and re-connect.
        print("Sleeping for 5 seconds...")
        time.sleep(5)
        # The 'dev' object will become invalid and we need to initialize it again
        dev = find_device_that_supports_advanced_mode()
        advnc_mode = rs.rs400_advanced_mode(dev)
        print("Advanced mode is",
              "enabled" if advnc_mode.is_enabled() else "disabled")
Beispiel #5
0
def realsense_configure_setting(setting_file):
    device = find_device_that_supports_advanced_mode()
    advnc_mode = rs.rs400_advanced_mode(device)
    realsense_enable_advanced_mode(advnc_mode)
    realsense_load_settings_file(advnc_mode, setting_file)
                                 json_temp['stream-fps'])

        # Start streaming
        profile_1 = pipeline_1.start(config_1)
        profile_2 = pipeline_2.start(config_2)

        with open("Settings_temp.json", \
                  'w') as file:
            json.dump(json_temp, file, indent=2)

        with open("Settings_temp.json", \
                  'r') as file:
            json_text = file.read()
            # Get the active profile and load the json file which contains settings
            # readable by the realsense
            advanced_mode_1 = rs.rs400_advanced_mode(profile_1.get_device())
            advanced_mode_1.load_json(json_text)
            advanced_mode_2 = rs.rs400_advanced_mode(profile_2.get_device())
            advanced_mode_2.load_json(json_text)

    # Load in File if .bag file was selected
    if technique == 2:
        # Configure depth and color streams...
        # ...from Camera 1
        pipeline_1 = rs.pipeline()
        config_1 = rs.config()

        # ...from Camera 2
        pipeline_2 = rs.pipeline()
        config_2 = rs.config()
Beispiel #7
0
def rs_load_json(devices, file):
    with open(file, 'r') as f: ff = f.read().strip()
    for (pipe, profile) in devices.values():
        advanced_mode = rs.rs400_advanced_mode(profile.get_device())
        advanced_mode.load_json(ff)
    def __init__(self, lower=(96, 59, 53,), upper=(160, 255, 130), callback=None) -> None:
        self.lower, self.upper = lower, upper

        # os.system('rosnode kill image_server')

        self.pipeline = rs.pipeline()
        config = rs.config()

        config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
        config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

        # set dep-th units, this should give us better resolution sub 5m?
        device: rs.device = rs.context().query_devices()[0]
        advnc_mode = rs.rs400_advanced_mode(device)
        depth_table_control_group: STDepthTableControl = advnc_mode.get_depth_table()
        depth_table_control_group.depthUnits = 500
        advnc_mode.set_depth_table(depth_table_control_group)

        # Start streaming
        profile = self.pipeline.start(config)

        # Getting the depth sensor's depth scale (see rs-align example for explanation)
        depth_sensor: DepthSensor = profile.get_device().first_depth_sensor()
        depth_scale = depth_sensor.get_depth_scale()
        print("Depth Scale is: ", depth_scale)

        align_to = rs.stream.color
        self.align = rs.align(align_to)

        for i in range(10):
            try:
                sensors = sum([dev.query_sensors() for dev in rs.context().query_devices()], [])
                color_sensor = next(
                    sensor for sensor in sensors if sensor.get_info(rs.camera_info.name) == "RGB Camera")
                break
            except Exception as e:
                print(e)
                sleep(0.3)

        print("Setting RGB camera sensor settings")
        # exposure: 166.0
        # white balance: 4600.0
        # gain: 64.0
        # auto exposure enabled: 1.0
        # exposure: 166.0
        # white balance: 4600.0
        # gain: 64.0
        color_sensor.set_option(rs.option.saturation, 60)
        color_sensor.set_option(rs.option.exposure, 166)
        color_sensor.set_option(rs.option.enable_auto_exposure, 0)
        color_sensor.set_option(rs.option.white_balance, 4600)
        color_sensor.set_option(rs.option.enable_auto_white_balance, 0)
        color_sensor.set_option(rs.option.gain, 64)
        align = rs.align(align_to)

        # color_sensor.set_option(rs.option.enable_auto_exposure, 0)
        print("auto exposure enabled: {}".format(color_sensor.get_option(rs.option.enable_auto_exposure)))
        print("exposure: {}".format(color_sensor.get_option(rs.option.exposure)))  # 166
        print("white balance: {}".format(color_sensor.get_option(rs.option.white_balance)))
        print("gain: {}".format(color_sensor.get_option(rs.option.gain)))  # 64

        self.average = StreamingMovingAverage(20)
        self.average_raw = StreamingMovingAverage(20)

        self.average_fps = StreamingMovingAverage(20)
        self.average_area = StreamingMovingAverage(20)

        self.color = None
        self.distance = None
        self.fps = 0
        self.area = 0
        sleep(2)
    def __init__(self, scanDuration, rootDir, sharpening, configFile, fps, width, height, visualPreset, laserPower, exposure, gain) :

        self.scanDuration = scanDuration
        self.fps = fps
        self.rootDir = rootDir
        self.sharpening = sharpening
        self.width = width
        self.height = height
        self.configFile = configFile
        self.captureName = None
        self.closed = False

        if self.configFile :

            try :

                with open(self.configFile) as file:

                        configString = json.load(file)
                        configDict = configString
                        configString = json.dumps(configString)

                        self.fps = int(configDict["stream-fps"])
                        self.width = int(configDict["stream-width"])
                        self.height = int(configDict["stream-height"])

                        Logger.printSuccess("Parameters successfully loaded !")

            except :

                Logger.printError("Could not read the RealSense configuration file")

        self.window = cv2.namedWindow("Capture", cv2.WINDOW_AUTOSIZE)

        Logger.printInfo("Initializing scanner ...")

        try :

            #Initialization
            self.pipeline = rs2.pipeline()
            self.config = rs2.config()
            #Enable streams
            self.config.enable_stream(rs2.stream.depth, self.width, self.height, rs2.format.z16, self.fps)
            self.config.enable_stream(rs2.stream.color, self.width, self.height, rs2.format.bgr8, self.fps)
            #Start streaming
            self.profile = self.pipeline.start(self.config)

        except Exception as e :

            Logger.printError("Unable to start the stream, gonna quit.\nException -> " + str(e))
            exit()



        self.device = self.profile.get_device()
        self.depthSensor = self.device.first_depth_sensor()

        try :

            #Set parameters

            if configFile :

                advancedMode = rs2.rs400_advanced_mode(self.device)

                print(configString)
                advancedMode.load_json(json_content=configString)

            else :

                """
                0 -> Custom
                1 -> Default
                2 -> Hand
                3 -> High Accuracy
                4 -> High Density
                5 -> Medium Density
                """
                self.depthSensor.set_option(rs2.option.visual_preset, visualPreset)
                self.depthSensor.set_option(rs2.option.laser_power, laserPower)
                self.depthSensor.set_option(rs2.option.gain, gain)
                self.depthSensor.set_option(rs2.option.exposure, exposure)
                #self.depthSensor.set_option(rs2.option.max_distance, 4.0)

        except Exception as e :

            Logger.printError("Unable to set one parameter on the RealSense, gonna continue to run.\nException -> " + str(e))
            pass

        self.intrinsics = self.profile.get_stream(rs2.stream.depth).as_video_stream_profile().get_intrinsics()

        #Create an align object -> aligning depth frames to color frames
        self.aligner = rs2.align(rs2.stream.color)

        Logger.printSuccess("Scanner successfully initialized !")

        self.depthDataset = []
Beispiel #10
0
def test_advanced_mode():
    ## License: Apache 2.0. See LICENSE file in root directory.
    ## Copyright(c) 2017 Intel Corporation. All Rights Reserved.

    #####################################################
    ##          rs400 advanced mode tutorial           ##
    #####################################################

    # First import the library
    import time
    import json

    DS5_product_ids = [
        "0AD1", "0AD2", "0AD3", "0AD4", "0AD5", "0AF6", "0AFE", "0AFF", "0B00",
        "0B01", "0B03", "0B07"
    ]

    def find_device_that_supports_advanced_mode():
        ctx = rs.context()
        ds5_dev = rs.device()
        devices = ctx.query_devices()
        for dev in devices:
            if dev.supports(rs.camera_info.product_id) and str(
                    dev.get_info(
                        rs.camera_info.product_id)) in DS5_product_ids:
                if dev.supports(rs.camera_info.name):
                    print("Found device that supports advanced mode:",
                          dev.get_info(rs.camera_info.name))
                return dev
        raise Exception("No device that supports advanced mode was found")

    try:
        dev = find_device_that_supports_advanced_mode()
        advnc_mode = rs.rs400_advanced_mode(dev)
        print("Advanced mode is",
              "enabled" if advnc_mode.is_enabled() else "disabled")

        # Loop until we successfully enable advanced mode
        while not advnc_mode.is_enabled():
            print("Trying to enable advanced mode...")
            advnc_mode.toggle_advanced_mode(True)
            # At this point the device will disconnect and re-connect.
            print("Sleeping for 5 seconds...")
            time.sleep(5)
            # The 'dev' object will become invalid and we need to initialize it again
            dev = find_device_that_supports_advanced_mode()
            advnc_mode = rs.rs400_advanced_mode(dev)
            print("Advanced mode is",
                  "enabled" if advnc_mode.is_enabled() else "disabled")
        optionfile = "/home/jasy/FDexter/RealSense/garnichtsoschlechtesetting.json"
        #optionfile = "/home/jasy/FDexter/RealSense/holefilletc.json"
        with open(optionfile, "r") as read_file:
            print('oh')
            settings = json.load(read_file)
            #settings = read_file
            print(settings)
            #json_string = str(settings).replace('u', 'bla')
            if type(next(iter(settings))) != str:
                settings_ = {
                    k.encode('utf-8'): v.encode("utf-8")
                    for k, v in settings.items()
                }
            json_string = str(settings_).replace("'", '\"')
            advnc_mode.load_json(json_string)
    # # Get each control's current value
    #print("Depth Control: \n", advnc_mode.get_depth_control())
    # print("RSM: \n", advnc_mode.get_rsm())
    # print("RAU Support Vector Control: \n", advnc_mode.get_rau_support_vector_control())
    # print("Color Control: \n", advnc_mode.get_color_control())
    # print("RAU Thresholds Control: \n", advnc_mode.get_rau_thresholds_control())
    # print("SLO Color Thresholds Control: \n", advnc_mode.get_slo_color_thresholds_control())
    # print("SLO Penalty Control: \n", advnc_mode.get_slo_penalty_control())
    # print("HDAD: \n", advnc_mode.get_hdad())
    # print("Color Correction: \n", advnc_mode.get_color_correction())
    # print("Depth Table: \n", advnc_mode.get_depth_table())
    # print("Auto Exposure Control: \n", advnc_mode.get_ae_control())
    # print("Census: \n", advnc_mode.get_census())
    except Exception as e:
        print(e)
        pass
Beispiel #11
0
def loadConfiguration(self):
	self.dev = self.cfg.get_device()
	self.advnc_mode = rs.rs400_advanced_mode(self.dev)

	print("Advanced mode is", "enabled" if self.advnc_mode.is_enabled() else "disabled")
	self.advnc_mode.load_json(self.json_string)
Beispiel #12
0
def activate_rs_settings():
    try:
        dev = find_device_that_supports_advanced_mode()
        advnc_mode = rs.rs400_advanced_mode(dev)
        print("Advanced mode is",
              "enabled" if advnc_mode.is_enabled() else "disabled")

        # Loop until we successfully enable advanced mode
        while not advnc_mode.is_enabled():
            print("Trying to enable advanced mode...")
            advnc_mode.toggle_advanced_mode(True)
            # At this point the device will disconnect and re-connect.
            print("Sleeping for 5 seconds...")
            time.sleep(5)
            # The 'dev' object will become invalid and we need to initialize it again
            dev = find_device_that_supports_advanced_mode()
            advnc_mode = rs.rs400_advanced_mode(dev)
            print("Advanced mode is",
                  "enabled" if advnc_mode.is_enabled() else "disabled")

        # Get each control's current value
        print("Depth Control: '\n'", advnc_mode.get_depth_control())
        print("RSM: /n", advnc_mode.get_rsm())
        print("RAU Support Vector Control: /n",
              advnc_mode.get_rau_support_vector_control())
        print("Color Control: /n", advnc_mode.get_color_control())
        print("RAU Thresholds Control: /n",
              advnc_mode.get_rau_thresholds_control())
        print("SLO Color Thresholds Control: /n",
              advnc_mode.get_slo_color_thresholds_control())
        print("SLO Penalty Control: /n", advnc_mode.get_slo_penalty_control())
        print("HDAD: /n", advnc_mode.get_hdad())
        print("Color Correction: /n", advnc_mode.get_color_correction())
        print("Depth Table: /n", advnc_mode.get_depth_table())
        print("Auto Exposure Control: /n", advnc_mode.get_ae_control())
        print("Census: /n", advnc_mode.get_census())

        # To get the minimum and maximum value of each control use the mode value:
        query_min_values_mode = 1
        query_max_values_mode = 2
        current_std_depth_control_group = advnc_mode.get_depth_control()
        min_std_depth_control_group = advnc_mode.get_depth_control(
            query_min_values_mode)
        max_std_depth_control_group = advnc_mode.get_depth_control(
            query_max_values_mode)
        print("Depth Control Min Values: \n ", min_std_depth_control_group)
        print("Depth Control Max Values: \n ", max_std_depth_control_group)

        # Set some control with a new (median) value
        current_std_depth_control_group.scoreThreshA = int(
            (max_std_depth_control_group.scoreThreshA -
             min_std_depth_control_group.scoreThreshA) / 2)
        advnc_mode.set_depth_control(current_std_depth_control_group)
        print("After Setting new value, Depth Control: \n",
              advnc_mode.get_depth_control())

        # Serialize all controls to a Json string
        serialized_string = advnc_mode.serialize_json()
        print("Controls as JSON: \n", serialized_string)

        # camsettings2310
        with open("config.json") as file:

            as_json_object = json.load(file)

            # We can also load controls from a json string
            # The C++ JSON parser requires double-quotes for the json object so we need
            #  to replace the single quote of the pythonic json to double-quotes
            json_string = str(as_json_object).replace("'", '\"')

            advnc_mode.load_json(json_string)
            print("------CONFIG LOADED---------")
    except Exception as e:
        print(e)
        pass
Beispiel #13
0
    def run(self):
        global sharedIsrunning, sharedIsrunningLock
        global sharedVision, sharedVisionLock

        # Load camera advanced settings
        ctx = rs.context()
        devices = ctx.query_devices()
        dev = devices[0]
        advnc_mode = rs.rs400_advanced_mode(dev)

        while not advnc_mode.is_enabled():
            print("Activating advanced mode")
            advnc_mode.toggle_advanced_mode(True)
            time.sleep(5)

        with open('cameraSettings.json', 'r') as json_file:
            as_json_object = json.load(json_file)

        # For Python 2, the values in 'as_json_object' dict need to be converted from unicode object to utf-8
        if type(next(iter(as_json_object))) != str:
            as_json_object = {
                k.encode('utf-8'): v.encode("utf-8")
                for k, v in as_json_object.items()
            }
        # The C++ JSON parser requires double-quotes for the json object so we need
        # to replace the single quote of the pythonic json to double-quotes
        json_string = str(as_json_object).replace("'", '\"')
        advnc_mode.load_json(json_string)

        #Create a config and configure the pipeline to stream
        # color and depth streams - should be kept same, otherwise projection won't work
        config = rs.config()
        config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 15)
        config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 15)

        # Define the codec and create VideoWriter object
        if self.config.isRecording:
            fourcc = cv2.VideoWriter_fourcc(*'x264')
            videoFPS = 15.
            videoTimer = self.config.startTime
            videoFrames = 0
            videoColored = cv2.VideoWriter(self.config.exportName + '.avi',
                                           fourcc, videoFPS, (1280, 720))
            config.enable_record_to_file(self.config.exportName + '.bag')
            exportJoints = open(self.config.exportName + "_joints.py", 'w')
            exportJoints.write("times = []\n")
            exportJoints.write("names = []\n")
            exportJoints.write("keys = []\n")

            exportKeypoints = open(self.config.exportName + "_keypoints.py",
                                   'w')
            exportKeypoints.write("times = []\n")
            exportKeypoints.write("keypoints3D = []\n")
            exportKeypoints.write("keypoints = []\n")

        # Create a pipeline
        pipeline = rs.pipeline()

        # Start streaming
        profile = pipeline.start(config)

        # Getting the depth sensor's depth scale (see rs-align example for explanation)
        depth_sensor = profile.get_device().first_depth_sensor()
        depth_scale = depth_sensor.get_depth_scale()

        # Create an align object
        # rs.align allows us to perform alignment of depth frames to others frames
        # The "align_to" is the stream type to which we plan to align depth frames.
        align_to = rs.stream.color
        align = rs.align(align_to)

        # Starting OpenPose
        opWrapper = op.WrapperPython()

        # Custom Params (refer to include/openpose/flags.hpp for more parameters)
        params = dict()
        params["model_folder"] = "/home/naolaptop/Software/openpose/models"
        params["net_resolution"] = "-1x144"
        params["number_people_max"] = 1
        params["tracking"] = 0

        opWrapper.configure(params)
        opWrapper.start()

        # Robot - camera calibration
        calibrationArm = 'LArm'

        tempMarkerZ = []
        tempMarkerPositions = []
        tempRobotPositions = []
        tempRobotTransformations = []

        markerPositions = []
        robotPositions = []
        robotTransformations = []
        closestObject = None

        robotOffset = {
            'ChestBoard/Button':
            np.array(self.config.motion.getPosition("ChestBoard/Button")[:3]),
            'CameraTop':
            np.array(self.config.motion.getPosition("CameraTop")[:3])
        }
        nosePosition2D = [None, None]
        try:
            if self.config.startup:
                self.config.motion.doBlocking('calibration')
                self.config.motion.doBlocking('calibration0')
            elif self.config.isRecording:
                self.exportSetup(self.config.exportName, self.config.SCENE,
                                 self.config.toRobRotation,
                                 self.config.toRobTranslation)
                videoFrameTimer = time.time()

            sharedIsrunningLock.acquire()
            while sharedIsrunning:
                sharedIsrunningLock.release()
                # Get frameset of color and depth
                frames = pipeline.wait_for_frames()

                # Align the depth frame to color frame
                aligned_frames = align.process(frames)

                # Get aligned frames
                aligned_depth_frame = aligned_frames.get_depth_frame(
                )  # aligned_depth_frame
                aligned_color_frame = aligned_frames.get_color_frame()

                # Validate that both frames are valid
                if not aligned_depth_frame or not aligned_color_frame:
                    sharedIsrunningLock.acquire()
                    continue

                # Intrinsics
                depth_intrin = aligned_depth_frame.profile.as_video_stream_profile(
                ).intrinsics

                # Get depth and color values in a np array
                depth_image = np.asanyarray(aligned_depth_frame.get_data())
                color_image = np.asanyarray(aligned_color_frame.get_data())

                # Measure processing time
                timeP = dt.datetime.now()

                # Update robot offset to be aligned with current frame
                robotOffset = {
                    'ChestBoard/Button':
                    np.array(
                        self.config.motion.getPosition(
                            "ChestBoard/Button", legCorrection=False)[:3]),
                    'CameraTop':
                    np.array(
                        self.config.motion.getPosition(
                            "CameraTop", legCorrection=False)[:3])
                }
                robotOffset2D = {}
                for part in robotOffset:
                    robotOffset2D[part] = self.torso3DToPixel(
                        depth_intrin, robotOffset[part],
                        self.config.toRobRotation,
                        self.config.toRobTranslation)

                if self.config.startup:
                    # Calibration of camera position
                    markers = cv2.aruco.detectMarkers(
                        color_image,
                        cv2.aruco.getPredefinedDictionary(
                            cv2.aruco.DICT_7X7_50))
                    marker3DCenter = np.array([0, 0, 0])
                    for markerIndex in range(len(markers[0])):
                        marker = markers[0][markerIndex]
                        markerID = markers[1][markerIndex][0]
                        markerCenter = marker[0, :, :].sum(axis=0) * 0.25
                        marker3DCenter = self.pixelTo3D(
                            depth_intrin, depth_image, depth_scale,
                            markerCenter[0], markerCenter[1])

                    if marker3DCenter[2] > 0:
                        robotPosition = np.array(
                            self.config.motion.getPosition(calibrationArm)[:3])
                        tempMarkerZ.append(
                            (marker3DCenter[2], len(tempMarkerPositions)))
                        tempMarkerPositions.append(marker3DCenter)
                        tempRobotPositions.append(robotPosition)
                        tempRobotTransformations.append(
                            self.config.motion.getTransform(calibrationArm))
                        #print("tempCAM", marker3DCenter)
                        #print("tempROBOT", robotPosition)

                        if len(tempMarkerZ
                               ) > 1:  # how many samples of each move
                            tempIndex = sorted(
                                tempMarkerZ,
                                key=lambda s: s[0])[len(tempMarkerZ) // 2][
                                    1]  # Get index of median
                            markerPositions.append(
                                tempMarkerPositions[tempIndex])
                            robotTransformations.append(
                                tempRobotTransformations[tempIndex])
                            robotPositions.append(
                                tempRobotPositions[tempIndex])
                            print("CAM", markerPositions[-1])
                            print("ROBOT", robotPositions[-1])

                            tempMarkerZ = []
                            tempMarkerPositions = []
                            tempRobotPositions = []
                            tempRobotTransformations = []

                    if len(markerPositions) < 7:
                        self.config.motion.doBlocking(
                            'calibration' + str(len(markerPositions)))
                    else:
                        A = np.array(markerPositions)
                        B = np.array(robotPositions)
                        self.config.toRobRotation, self.config.toRobTranslation = rigid_transform_3D(
                            A, B)
                        print(
                            "Calibration done: ", "\"" + json.dumps([
                                self.config.toRobRotation.tolist(),
                                self.config.toRobTranslation.tolist()
                            ]) + "\"")
                        self.config.motion.doBlocking('Stand')
                        self.config.startup = False
                        if self.config.isRecording:
                            self.exportSetup(self.config.exportName,
                                             self.config.SCENE,
                                             self.config.toRobRotation,
                                             self.config.toRobTranslation)
                            videoFrameTimer = time.time()

                        sharedIsrunningLock.acquire()
                        continue
                else:
                    visibleObjects = {}
                    closestObject = None
                    self.config.ROBOT['vision']['objects']['nose'][
                        'current'] = False

                    # Process Image
                    keypoints3DPrint = {}
                    datum = op.Datum()
                    imageToProcess = color_image
                    for part in robotOffset:
                        if not math.isnan(robotOffset2D[part][0]):
                            rob2DTuple = self.toTupleInt(robotOffset2D[part])
                            cv2.circle(imageToProcess, rob2DTuple, 10,
                                       (0, 0, 0), 10)
                    datum.cvInputData = imageToProcess
                    opWrapper.emplaceAndPop([datum])

                    # Keypoints detection
                    distances = []
                    if datum.poseKeypoints.size > 1:
                        keypoints3D = self.keypointsTo3D(
                            depth_intrin,
                            depth_image,
                            depth_scale,
                            datum.poseKeypoints[0],
                            filtered=[
                                1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
                                19, 20, 21, 22, 23, 24, 25
                            ],
                            area=5)
                        for keypointIndex in keypoints3D.keys():
                            # Closest object detection
                            keypoint3D = keypoints3D[keypointIndex]
                            keypoints3DPrint[keypointIndex] = keypoints3D[
                                keypointIndex].tolist()
                            keypoint2D = datum.poseKeypoints[0][keypointIndex]
                            coords = np.dot(
                                self.config.toRobRotation,
                                keypoint3D) + self.config.toRobTranslation

                            partDistances = {}
                            distance = (None, float('inf'))
                            for part in robotOffset:
                                partDistances[part] = np.linalg.norm(
                                    coords - robotOffset[part])
                                if partDistances[part] < distance[1]:
                                    distance = (part, partDistances[part])

                            if keypoint3D[2] > 0:
                                visibleObjects[keypointIndex] = {
                                    'position': keypoint2D,
                                    'coords': coords,
                                    'coordsOrigin': keypoint3D,
                                    'distance': distance[1],
                                    'distanceKey': distance[0],
                                    'partDistances': partDistances
                                }
                                distances.append(distance[1])
                                if closestObject == None or distance[
                                        1] < visibleObjects[closestObject][
                                            'distance']:
                                    closestObject = keypointIndex
                    else:
                        datum = datumFiller()

                    if len(distances) < 3 or np.var(distances, ddof=1) > 0.1:
                        closestObject = None

                    if not closestObject is None:
                        # Measurements
                        distance = visibleObjects[closestObject]['distance']

                        noseKeypoint = filter(
                            lambda x: x in visibleObjects.keys(),
                            [0, 16, 15, 18, 17])
                        if len(noseKeypoint) > 0:
                            noseKeypoint = noseKeypoint[0]
                            noseCopy = dict(visibleObjects[noseKeypoint])
                            noseCopy['current'] = True
                            self.config.ROBOT['vision']['objects'][
                                'nose'] = noseCopy

                            #z = visibleObjects[noseKeypoint]['coordsOrigin'][2]
                            #if abs(visibleObjects[noseKeypoint]['distance'] - visibleObjects[closestObject]['distance']) > 0.3:
                            #   z = visibleObjects[closestObject]['coordsOrigin'][2]

                            #nose3D = np.array(rs.rs2_deproject_pixel_to_point(depth_intrin, [float(visibleObjects[noseKeypoint]['position'][0]), float(visibleObjects[noseKeypoint]['position'][1])], z))
                            #coords = np.dot(self.config.toRobRotation, nose3D) + self.config.toRobTranslation - robotOffset['CameraTop']
                            #if (noseDistance < 1.5 and noseKeypoint == 0) or noseDistance > 1.5:
                            #   visibleObjects[noseKeypoint]['distance'] = noseDistance
                            #   visibleObjects[noseKeypoint]['distanceKey'] = 'CameraTop'

                        processingTime = dt.datetime.now() - timeP
                        self.config.ROBOT['vision']['distance']['velocity'] = (
                            distance -
                            self.config.ROBOT['vision']['distance']['current']
                        ) / processingTime.total_seconds()
                        self.config.ROBOT['vision']['distance'][
                            'current'] = distance

                        updated = False
                        for i in range(len(self.config.ZONE)):
                            if distance <= self.config.ZONE[i]:
                                self.config.ROBOT['vision']['zone'][
                                    'velocity'] = i + 1 - self.config.ROBOT[
                                        'vision']['zone']['current']
                                self.config.ROBOT['vision']['zone'][
                                    'current'] = i + 1
                                updated = True
                                break
                        if not updated:
                            self.config.ROBOT['vision']['zone'][
                                'velocity'] = 4 - self.config.ROBOT['vision'][
                                    'zone']['current']
                            self.config.ROBOT['vision']['zone']['current'] = 4
                    else:
                        self.config.ROBOT['vision']['zone']['velocity'] = 0

                    self.config.ROBOT['touch'][
                        'side'] = self.config.skin.whichSide(True)
                    if self.config.ROBOT['touch']['side'] != None:
                        self.config.ROBOT['vision']['zone']['current'] = 0

                    sharedVisionLock.acquire()
                    sharedVision = dict(self.config.ROBOT['vision'])
                    sharedVisionLock.release()

                # Processing time
                processingTime = dt.datetime.now() - timeP
                if self.config.isDebug:
                    print(
                        str(round(
                            (time.time() - videoTimer) / 60., 2)) + " min " +
                        str(round(1 / processingTime.total_seconds(), 2)) +
                        " FPS")  # Frame-rate

                # Prepare image to be used by opencv in order to show it on the screen
                # mostly used for testing/debug, we will probably need to remove this in actual scripts
                # to improve performance.
                if self.config.isView or self.config.isRecording:
                    depth_image_3d = np.dstack(
                        (depth_image, depth_image, depth_image
                         ))  #depth image is 1 channel, color is 3 channels
                    depth_colormap = cv2.applyColorMap(
                        cv2.convertScaleAbs(depth_image, alpha=0.03),
                        cv2.COLORMAP_JET)
                    try:
                        images = datum.cvOutputData  #np.hstack((datum.cvOutputData, depth_colormap))
                    except:
                        images = color_image  #np.hstack((color_image, depth_colormap

                    if self.config.isView:
                        cv2.namedWindow('RealSense', cv2.WINDOW_NORMAL)

                    if images is not None:
                        if self.config.ROBOT['vision']['zone'][
                                'current'] <= 3 and self.config.ROBOT[
                                    'vision']['zone']['current'] > 0 and (
                                        self.config.ROBOT['vision']['objects']
                                        ['nose']['current']
                                        or nosePosition2D[0] != None):
                            if self.config.ROBOT['vision']['objects']['nose'][
                                    'current']:
                                nosePosition2D = self.config.ROBOT['vision'][
                                    'objects']['nose']['position']
                            nose2DTuple = self.toTupleInt(nosePosition2D)
                            rob2DTuple = self.toTupleInt(
                                robotOffset2D['CameraTop'])
                            cv2.arrowedLine(
                                images,
                                rob2DTuple,
                                nose2DTuple, (0, 255, 0),
                                int(10 * self.config.ROBOT['vision']['objects']
                                    ['nose']['partDistances']['CameraTop']) +
                                1,
                                tipLength=0.01 +
                                0.05 * self.config.ROBOT['vision']['objects']
                                ['nose']['partDistances']['CameraTop'])

                            text = "|" + str(
                                round(
                                    self.config.ROBOT['vision']['objects']
                                    ['nose']['partDistances']['CameraTop'],
                                    2)) + "| m"
                            cv2.putText(images, text, nose2DTuple,
                                        cv2.FONT_HERSHEY_DUPLEX, 2, (0, 0, 0),
                                        5)
                            cv2.putText(images, text, nose2DTuple,
                                        cv2.FONT_HERSHEY_DUPLEX, 2,
                                        (255, 255, 255), 4)
                            nose2DTuple = (nose2DTuple[0], nose2DTuple[1] + 35)

                            text = str(
                                map(
                                    round, self.config.ROBOT['vision']
                                    ['objects']['nose']['coords'],
                                    len(self.config.ROBOT['vision']['objects']
                                        ['nose']['coords']) * [2]))
                            cv2.putText(images, text, nose2DTuple,
                                        cv2.FONT_HERSHEY_DUPLEX, 1.1,
                                        (0, 0, 0), 3)
                            cv2.putText(images, text, nose2DTuple,
                                        cv2.FONT_HERSHEY_DUPLEX, 1.1,
                                        (255, 255, 255), 2)

                        if closestObject != None:
                            closestPosition2D = visibleObjects[closestObject][
                                'position']
                            closest2DTuple = self.toTupleInt(closestPosition2D)
                            rob2DTuple = self.toTupleInt(robotOffset2D[
                                visibleObjects[closestObject]['distanceKey']])
                            cv2.circle(images, closest2DTuple, 10, (0, 0, 255),
                                       10)
                            cv2.arrowedLine(images,
                                            rob2DTuple,
                                            closest2DTuple, (255, 0, 0),
                                            5,
                                            tipLength=0.05)

                            text = "|" + str(
                                round(
                                    visibleObjects[closestObject]['distance'],
                                    2)) + "| m"
                            cv2.putText(images, text, closest2DTuple,
                                        cv2.FONT_HERSHEY_DUPLEX, 2, (0, 0, 0),
                                        5)
                            cv2.putText(images, text, closest2DTuple,
                                        cv2.FONT_HERSHEY_DUPLEX, 2,
                                        (255, 255, 255), 4)
                            closest2DTuple = (closest2DTuple[0],
                                              closest2DTuple[1] + 35)

                            text = str(
                                map(
                                    round,
                                    visibleObjects[closestObject]['coords'],
                                    len(visibleObjects[closestObject]
                                        ['coords']) * [2]))
                            cv2.putText(images, text, closest2DTuple,
                                        cv2.FONT_HERSHEY_DUPLEX, 1.1,
                                        (0, 0, 0), 3)
                            cv2.putText(images, text, closest2DTuple,
                                        cv2.FONT_HERSHEY_DUPLEX, 1.1,
                                        (255, 255, 255), 2)

                        text = "Action: " + str(self.config.motion.lastMove())
                        cv2.putText(images, text, (10, 35),
                                    cv2.FONT_HERSHEY_DUPLEX, 1.1, (0, 0, 0), 3)
                        cv2.putText(images, text, (10, 35),
                                    cv2.FONT_HERSHEY_DUPLEX, 1.1,
                                    (255, 255, 255), 2)

                        text = "Zone: " + str(
                            self.config.ROBOT['vision']['zone']['current'])
                        cv2.putText(images, text, (10, 75),
                                    cv2.FONT_HERSHEY_DUPLEX, 1.1, (0, 0, 0), 3)
                        cv2.putText(images, text, (10, 75),
                                    cv2.FONT_HERSHEY_DUPLEX, 1.1,
                                    (255, 255, 255), 2)

                        text = "Touch: " + str(
                            self.config.ROBOT['touch']
                            ['side'])  # touch is no longer avalaible here
                        cv2.putText(images, text, (10, 115),
                                    cv2.FONT_HERSHEY_DUPLEX, 1.1, (0, 0, 0), 3)
                        cv2.putText(images, text, (10, 115),
                                    cv2.FONT_HERSHEY_DUPLEX, 1.1,
                                    (255, 255, 255), 2)

                        text = dt.datetime.now().strftime("%Y.%m.%d %H:%M:%S")
                        cv2.putText(images, text, (10, 700),
                                    cv2.FONT_HERSHEY_DUPLEX, 1.1, (0, 0, 0), 3)
                        cv2.putText(images, text, (10, 700),
                                    cv2.FONT_HERSHEY_DUPLEX, 1.1,
                                    (255, 255, 255), 2)

                        if self.config.isRecording and not self.config.startup:
                            timer = time.time() - videoTimer
                            while videoFrames < timer * videoFPS:
                                # Export keypoints
                                exportKeypoints.write("times.append(" +
                                                      str(timer) + ")\n")
                                exportKeypoints.write("keypoints3D.append(" +
                                                      str(keypoints3DPrint) +
                                                      ")\n")

                                exportKeypoints.write(
                                    "keypoints.append(" +
                                    str(datum.poseKeypoints[0].tolist()) +
                                    ")\n")

                                videoColored.write(images)
                                videoFrames += 1
                                timer = time.time() - videoTimer

                            # Export robot angles
                            bodyJoints = self.config.motion.getBodyNames(
                                "Body")
                            exportJoints.write("times.append(" + str(timer) +
                                               ")\n")
                            exportJoints.write("names.append(" +
                                               str(bodyJoints) + ")\n")
                            exportJoints.write(
                                "keys.append(" +
                                str(self.config.motion.getAngles("Body")) +
                                ")\n")

                        if self.config.isView:
                            cv2.imshow('RealSense', images)

                key = cv2.waitKey(1)
                # Press esc or 'q' to close the image window
                if key & 0xFF == ord('q') or key == 27:
                    break

                sharedIsrunningLock.acquire()
        finally:
            if self.config.isRecording:
                videoColored.release()
                exportJoints.close()
                exportKeypoints.close()

            if self.config.isView:
                cv2.destroyAllWindows()
            # Stop streaming
            pipeline.stop()
            sharedIsrunningLock.acquire()
            sharedIsrunning = False
            sharedIsrunningLock.release()
            print("Qutting video.")
    def run(self):
        try:
            machien = {'id': 1, 'ip': 'http://192.168.0.10'}
            response_machien = requests.put(SERVER_URL + "/myfarm/register/ip",
                                            data=machien)

            print(f" machien ip set server : {response_machien.status_code}")

            # GUI 코드
            while True:
                response_user = requests.get(SERVER_URL + "/user/info/3")

                print(f" user data : {response_user.status_code}")

                if response_user.status_code == 200:
                    break
                time.sleep(5)

            # while True :
            # time.sleep(1)
            # print("exist check ...")
            # pipeline.start()
            # frames = pipeline.wait_for_frames()
            # color_frame = frames.get_color_frame()
            # color_image = np.asarray(color_frame.get_data())

            ## take the only location of mushroom pot -> 1/3 * width,1/2*height
            # recent_image = color_image[100:350, 290:550]
            # check_image = cv2.imread('./check.jpg')[100:350, 290:550]
            # cv2.imwrite('./rec.jpg',check_image)
            # cv2.imwrite('./recent.jpg',recent_image)
            # hist_recent = cv2.calcHist(recent_image, [0,1], None, [180,256], [0,180,0,256])
            # hist_check = cv2.calcHist(check_image, [0,1], None, [180,256], [0,180,0,256])
            # number = cv2.compareHist(hist_recent, hist_check, cv2.HISTCMP_CORREL)

            # print(number)
            # pipeline.stop()
            # if number > 0.4:
            # print('Not exist')

            # else:
            #            배지입력 확인
            # print("Exist !!")
            # break

            while self.isRun:
                params = {'pin': PIN}
                response = requests.get(SERVER_URL + '/farm/exist',
                                        params=params)

                if response.status_code == 200:
                    prg_id = response.text
                    break
                else:
                    print("Not prg")
                time.sleep(5)  # 429 에러 방지

            params = {'id': prg_id, 'type': 'custom'}
            response = requests.get(SERVER_URL + '/farm/data', params=params)
            result = json.loads(response.text)
            water_num = result['water']
            temp_array = result['temperature']
            hum_array = result['humidity']

            params_status = {'id': "1", "status": "true"}
            response_status = requests.put(SERVER_URL + '/myfarm/status',
                                           params=params_status)

            print(f"machien status : {response_status.status_code}")

            data_len = len(temp_array)

            data = [None] * data_len

            for i in range(0, data_len):
                temp_array[i] = str(temp_array[i]['setting_value'])
                hum_array[i] = str(hum_array[i]['setting_value'])
                data[i] = R + C + temp_array[i] + S + hum_array[i]

            print(f"water_num : {water_num}")
            print(f"temp_array : {temp_array}")
            print(f"hum_array : {hum_array}")
            print(f"total_data : {data}")

            # return True if data else False

            # HOUR * 24 / water_num
            WATER_TIME = HOUR * 24 / water_num

            serial_send_len = 0
            hour = 0  # 시간 초로 변환
            # WATER_TIME
            water_time = 10000  # 값 받아 오면 연산할 물주기 시간
            # MOTER_TIME
            moter_time = MOTER_TIME
            picTime = 100000

            now = datetime.datetime.now()
            Arduino = serial.Serial(port='COM5', baudrate=9600)
            print(f"data success : {data}")

            seconds = 0
            dt2 = None
            loadTime = 0
            hum = 0
            temp = 0

            while self.isRun:
                dt1 = datetime.datetime.now()
                result = dt1 - now
                seconds = int(result.total_seconds()) - loadTime

                print(
                    f"Python hour : {hour}, water_time : {water_time} moter_time : {moter_time} image : {picTime}"
                )
                print(f"Python seconds : {seconds}")

                if Arduino.readable():
                    LINE = Arduino.readline()
                    code = str(LINE.decode().replace('\n', ''))
                    print(code)
                    hum = code[18:20]
                    temp = code[38:40]
                    # socket_data(temp, hum)
                    self.signal.emit(str(temp), str(hum))
                if seconds - 2 <= hour <= seconds + 2:

                    if len(data) - 1 < serial_send_len:
                        response_status_prg = requests.put(
                            SERVER_URL + f'/farm/end?id={prg_id}')
                        print(
                            f"prg status : {response_status_prg.status_code}")

                        params_status = {'id': "1", "status": "false"}
                        response_status = requests.put(SERVER_URL +
                                                       '/myfarm/status',
                                                       params=params_status)
                        print(
                            f"machien status : {response_status.status_code}")
                        break

                    Arduino.write(data[serial_send_len].encode('utf-8'))
                    req_data_humi_temp = {
                        'prgId': prg_id,
                        'tempValue': temp,
                        'humiValue': hum
                    }
                    humi_temp_res = requests.post(SERVER_URL + "/data/add",
                                                  data=req_data_humi_temp)
                    print(f"Python res_temp : {humi_temp_res.status_code}")
                    serial_send_len += 1
                    hour += DAY

                if seconds - 2 <= water_time <= seconds + 2:
                    Arduino.write(WATER_ORDER.encode('utf-8'))
                    dt2 = datetime.datetime.now()

                    while Arduino.readable():
                        LINE = Arduino.readline()
                        code = str(LINE.decode().replace('\n', ''))
                        print(code)
                        if code[0:3] == 'end':
                            loadTime += int((datetime.datetime.now() -
                                             dt2).total_seconds())
                            break

                    water_time += WATER_TIME

                if seconds - 2 <= moter_time - HOUR / 3 <= seconds + 2:

                    if pipeline_check == False:
                        pipeline_check = True
                        Arduino.write(MOTER_ORDER.encode('utf-8'))
                        dt2 = datetime.datetime.now()

                        # 3d config
                        device = rs.context().query_devices()[0]
                        advnc_mode = rs.rs400_advanced_mode(device)
                        depth_table_control_group = advnc_mode.get_depth_table(
                        )
                        depth_table_control_group.disparityShift = 60
                        depth_table_control_group.depthClampMax = 4000
                        depth_table_control_group.depthClampMin = 0
                        advnc_mode.set_depth_table(depth_table_control_group)

                        pipeline1 = rs.pipeline()
                        config1 = rs.config()

                        config1.enable_stream(rs.stream.depth, rs.format.z16,
                                              30)
                        config1.enable_stream(rs.stream.color, rs.format.bgr8,
                                              30)

                        # Start streaming
                        profile1 = pipeline1.start(config)
                        # Get stream profile and camera intrinsics
                        profile = pipeline1.get_active_profile()
                        depth_profile = rs.video_stream_profile(
                            profile1.get_stream(rs.stream.depth))
                        pc = rs.pointcloud()
                        decimate = rs.decimation_filter()
                        decimate.set_option(rs.option.filter_magnitude, 1)

                        file_name_3d = [0, 90, 180, 270]
                        file_names_3d = [
                            './3dScan{}.ply'.format(i) for i in file_name_3d
                        ]

                        i = 0

                        while Arduino.readable():
                            LINE = Arduino.readline()
                            code = str(LINE.decode().replace('\n', ''))
                            print(code)
                            print(f"i : {i}")
                            if code[0:3] == 'end':
                                loadTime += int((datetime.datetime.now() -
                                                 dt2).total_seconds())
                                break
                            take_3Dpicture(file_names_3d[i], pipeline1,
                                           decimate, pc)
                            i += 0 if i >= 3 else 1

                        files = {'ply': open(file_names_3d[0], 'rb')}
                        req_data_3d = [("machineid", 1)]
                        res_3d = requests.post(SERVER_URL + "/upload/ply",
                                               files=files,
                                               data=req_data_3d)
                        print(f"Python res_3d : {res_3d.status_code}")

                        pipeline1.stop()
                        moter_time += MOTER_TIME
                        pipeline_check = False
                    else:
                        moter_time += 50

                if seconds - 2 <= picTime + 30 <= seconds + 2:

                    if pipeline_check == False:
                        pipeline_check = True
                        profile = pipeline.start(config)

                        depth_sensor = profile.get_device().first_depth_sensor(
                        )

                        frames = pipeline.wait_for_frames()
                        color_frame = frames.get_color_frame()

                        # Convert images to numpy arrays
                        color_image = np.asanyarray(color_frame.get_data())
                        cv2.imwrite('./color_sensor.jpg', color_image)

                        files = {
                            'compost':
                            ('compost.jpg', open('./color_sensor.jpg', 'rb'))
                        }
                        data1 = [('machineid', 1)]
                        response_image = requests.post(SERVER_URL +
                                                       "/upload/compost",
                                                       files=files,
                                                       data=data1)
                        print(
                            f"Python res_image : {response_image.status_code}")
                        pipeline.stop()
                        picTime += D2_TIME

                        # Turning moter

                        #  MUSHROOM DETECTION , requeset
                        frames = pipeline.wait_for_frames()
                        color_frame = frames.get_color_frame()
                        color_image = np.asanyarray(color_frame.get_data())
                        # detections = detection(color_image)
                        # boxes = get_shiitake_location(detections['detection_boxes'], detections['detection_classes'],
                        #                               0.5)
                        # print(boxes)
                        pipeline_check = False
                        pipeline.stop()

                        # for box in boxes:
                        #     size = get_size(box)
                        #     res = make_data(52, box, size)
                        #     print(res)
                    else:
                        picTime += 50

        except Exception:
            self.errors.emit(100)
            self.isRun = False