def database_run(db):
    # ROS node setup
    frame_id = 'Database_node'
    rospy.init_node(frame_id, anonymous=True)
    diag_obj = diag_class(frame_id=frame_id,
                          user_id=0,
                          user_name="N/A",
                          queue=1)

    rate = rospy.Rate(1)  # 1hz
    try:
        # Test connection
        db.connect(verbose=True)
        db.disconnect(verbose=True)
        # Make and load predefined tables
        make_tables(db)
        load_tables(db)
        diag_obj.publish(1, "Tables loaded")
    except Exception as e:
        print(f"Database node create database error: {e}")
        diag_obj.publish(2, f"Error: {e}")
        raise

    while not rospy.is_shutdown():
        try:
            # Test database connection to ensure running smoothly
            db.connect()
            db.disconnect()
            diag_obj.publish(0, "Running")
        except Exception as e:
            print(f"Database connection error: {e}")
            diag_obj.publish(2, f"Error: {e}")
        rate.sleep()
Ejemplo n.º 2
0
def test_robot_control_node():
    # ROS node setup
    rospy.init_node(f'test_robot_control_node', anonymous=True)
    frame_id = 'test_robot_control_node'
    diag_obj = diag_class(frame_id=frame_id,
                          user_id=0,
                          user_name="robot",
                          queue=1)
    capability_obj = capability_class(frame_id=frame_id, user_id=0)

    task_name = 'assemble_box'
    try:
        db = database()
        cols, data = db.query_table(task_name, 'all')
        task_data = pd.DataFrame(data, columns=cols)
    except Exception as e:
        print(e)
        raise e

    rate = rospy.Rate(1 / 10)  # 0.1hz
    i = 0

    col_names, act_data = db.query_table('current_actions', rows=0)
    while (not rospy.is_shutdown()):  # and (i < len(data)):
        try:
            print('here')
            capability_obj.publish(i, [task_data.loc[i]['action_name']])

            time = datetime.datetime.utcnow()

            data_ins = "%s" + (", %s" * (len(col_names) - 1))
            separator = ', '
            sql_cmd = f"""INSERT INTO current_actions ({separator.join(col_names)})
            VALUES (1, 'unknown', '{time}', '{task_name}', {int(task_data.loc[i]['action_no'])}, '{time}') 
            ON CONFLICT (user_id) DO UPDATE SET updated_t='{time}', task_name='{task_name}', current_action_no={int(task_data.loc[i]['action_no'])}, start_time='{time}';"""
            db.gen_cmd(sql_cmd)

            diag_obj.publish(0, "Running")

            print(task_data.loc[i])
            i = i + 1
            if i == len(data):
                i = 0

        except Exception as e:
            print(f"test_robot_control_node connection error: {e}")
            diag_obj.publish(2, f"Error: {e}")
            raise

        rate.sleep()
        print('here2')
Ejemplo n.º 3
0
def fakeIMUmain():
    print("-----Here we go-----")
    frame_id = f'fakeIMUpub_node'
    rospy.init_node(frame_id, anonymous=True)
    rate = rospy.Rate(2)  # Message publication rate, Hz => should be 2

    diag_obj = diag_class(frame_id=frame_id,
                          user_id=1,
                          user_name='unknown',
                          queue=1)
    act_obj = act_class(frame_id=frame_id,
                        user_id=1,
                        user_name='unknown',
                        queue=1)

    prediction = np.zeros(5)

    print("Starting main loop")

    class_pred = CATEGORIES[-1]
    diag_level = 1  # 0:ok, 1:warning, 2:error, 3:stale

    with open('fake_imu_data.csv', newline='') as csvfile:
        csvreader = csv.reader(csvfile, delimiter=',')
        next(csvreader)
        for row in csvreader:
            if not rospy.is_shutdown():

                prediction = [float(i) for i in row[0:5]]
                prediction = np.reshape(prediction, (-1))
                class_pred = CATEGORIES[np.argmax(prediction)]

                try:
                    act_obj.publish(prediction.tolist())
                    diag_msg = "fake_imu_pub all good"
                    diag_level = 0  # ok
                except Exception as e:
                    print(f"Error: {e}")
                    diag_msg = "fake_imu_pub not so good"
                    diag_level = 1  # warning

                diag_obj.publish(diag_level, diag_msg)
                #print(f"Action: {class_pred} Probs: {prediction}")
                rate.sleep()
Ejemplo n.º 4
0
def realsense_run():
    # ROS node setup
    frame_id = 'Realsense_node'

    if not test:
        rospy.init_node(frame_id, anonymous=True)
        diag_obj = diag_class(frame_id=frame_id,
                              user_id=args.user_id,
                              user_name=args.user_name,
                              queue=1)
        screw_publisher = rospy.Publisher('RawScrewCount', Int8, queue_size=1)
        rate = rospy.Rate(10)

    diag_timer = time.time()
    while (not rospy.is_shutdown()) or test:
        try:
            # Get frameset of color and depth
            frames = cam.pipeline.wait_for_frames()

            if args.depth:
                color_image_raw, depth_colormap, depth_image = cam.depth_frames(
                    frames)
            else:
                color_image_raw = cam.colour_frames(frames)

            if args.classify:
                try:
                    approx, thrash = rectangle_detector(color_image_raw)
                    try:
                        key_points = detect_blobs(thrash)
                        im_with_keypoints = cv2.drawKeypoints(
                            color_image_raw, key_points, np.array([]),
                            (0, 0, 255),
                            cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
                    except Exception as e:
                        print("blob error: ", e)
                        pass

                    if approx is not None:
                        cv2.drawContours(im_with_keypoints, [approx], 0,
                                         (0, 255, 0), 5)
                        if not test:
                            screw_publisher.publish(len(key_points))

                except Exception as e:
                    print("rectangle error: ", e)
                    pass

            if (time.time() - diag_timer) > 1:
                if not test:
                    diag_obj.publish(0, f"Running")
                diag_timer = time.time()

        except TypeError as e:
            time.sleep(1)
            if not test:
                diag_obj.publish(2, f"TypeError")
        except Exception as e:
            print("**Get Image Error**")
            if not test:
                diag_obj.publish(2, f"Realsense image error: {e}")
            traceback.print_exc(file=sys.stdout)
            break

        if args.disp:
            if args.depth:
                disp_im = np.hstack((im_with_keypoints, depth_colormap))
            else:
                disp_im = im_with_keypoints  #color_image

            cv2.namedWindow('Realsense viewer', cv2.WINDOW_AUTOSIZE)
            cv2.imshow('Realsense viewer', disp_im)
            key = cv2.waitKey(1)
            # Press esc or 'q' to close the image window
            if key & 0xFF == ord('q') or key == 27:
                cv2.destroyAllWindows()
                break

        if not test:
            rate.sleep()
        else:
            #time.sleep(1)
            pass
Ejemplo n.º 5
0
def users_node():

    frame_id = "users_node"
    rospy.init_node(frame_id, anonymous=True)
    keyvalues = []
    diag_obj = diag_class(frame_id=frame_id,
                          user_id=0,
                          user_name="N/A",
                          queue=1,
                          keyvalues=keyvalues)
    users = []

    rospy.Subscriber("SystemStatus", diagnostics, sys_stat_callback, (users))
    rospy.Subscriber("NextActionOverride", String,
                     next_action_override_callback, (users))
    global database_stat
    # Wait for postgresql node to be ready
    while database_stat != 0 and not rospy.is_shutdown():
        print(f"Waiting for postgresql node status, currently {database_stat}")
        diag_obj.publish(1, "Waiting for postgresql node")
        time.sleep(0.5)

    task = args.task_type  #'assemble_box'
    global use_vision
    if use_vision:
        global imrecog_stat
        # Wait for postgresql node to be ready
        while imrecog_stat != 0 and not rospy.is_shutdown():
            print(
                f"Waiting for imrecog_stat node status, currently {imrecog_stat}"
            )
            diag_obj.publish(1, "Waiting for imrecog_stat node")
            time.sleep(0.5)

    for name in args.user_names:
        users = setup_user(users, frame_id, task, name, use_vision)

    timer = time.time()

    global shimmer_stat
    # Wait for postgresql node to be ready
    while shimmer_stat != 0 and not rospy.is_shutdown():
        print(f"Waiting for shimmer node status, currently {shimmer_stat}")
        diag_obj.publish(1, "Waiting for shimmer node")
        time.sleep(0.5)

    rospy.Subscriber("HandStates", hand_pos, hand_pos_callback, (users))
    rospy.Subscriber("CurrentAction", current_action, current_action_callback,
                     (users))

    if use_vision:
        # Get first reading for screw counters, need to wait for good readings
        while time.time() - timer < 5:
            time.sleep(0.5)
        for user in users:
            user.screw_counter.next_screw()

    rate = rospy.Rate(1)  # 1hz
    while not rospy.is_shutdown():
        rospy.loginfo(f"{frame_id} active")

        diag_obj.publish(0, "Running")
        rate.sleep()
Ejemplo n.º 6
0
def IMUsensorsMain():
    print("-----Here we go-----")
    rospy.init_node(f'shimmerBase_{args.user_name}_{args.user_id}',
                    anonymous=True)
    rate = rospy.Rate(2)  # Message publication rate, Hz => should be 2

    keyvalues = [
        KeyValue(key=f'Shimmer {POSITIONS[0]} {SHIM_IDs[0]}',
                 value=IMU_MSGS[2]),
        KeyValue(key=f'Shimmer {POSITIONS[1]} {SHIM_IDs[1]}',
                 value=IMU_MSGS[2]),
        KeyValue(key=f'Shimmer {POSITIONS[2]} {SHIM_IDs[2]}',
                 value=IMU_MSGS[2]),
        KeyValue(key=f'Overall', value=IMU_SYS_MSGS[2])
    ]  # [unknown, unknown, unknown, setting up]
    diag_obj = diag_class(frame_id=frame_id,
                          user_id=args.user_id,
                          user_name=args.user_name,
                          queue=1,
                          keyvalues=keyvalues)

    if args.classifier_type != 'none':
        if args.classifier_type == 'all':
            if args.task_type == 'assemble_box':
                class_count = 5
                model_file = 'basic_box_classifier.h5'
                classifier = imu_classifier(model_file, CATEGORIES, WIN_LEN)
            elif args.task_type == 'assemble_complex_box':
                class_count = 5
                # model_file = 'complex_box_classifier_allvall_1.h5'
                model_file = 'complex_box_classifier_allvall_2_allclassesincl.h5'
                classifier = imu_classifier(model_file, CATEGORIES, WIN_LEN)
        elif args.classifier_type == 'one':
            if args.task_type == 'assemble_box':
                pass
            elif args.task_type == 'assemble_complex_box':
                class_count = 4
                # classifier_screw = imu_classifier('Screw In_classifier_TrainOnAll_2.h5', ['null', 'screw_in'], WIN_LEN)
                # classifier_allen = imu_classifier('Allen In_classifier_TrainOnAll_2.h5', ['null', 'allen_in'], WIN_LEN)
                # classifier_hand = imu_classifier('Hand Screw In_classifier_TrainOnAll_2.h5', ['null', 'hand_screw_in'], WIN_LEN)
                # classifier_hammer = imu_classifier('Hammer_classifier_TrainOnAll_2.h5', ['null', 'hammer'], WIN_LEN)

                classifier_screw = imu_classifier(
                    'Screw In_classifier_TrainOnAll_4_allclassesincl.h5',
                    ['screw_in'], WIN_LEN)
                classifier_allen = imu_classifier(
                    'Allen In_classifier_TrainOnAll_4_allclassesincl.h5',
                    ['allen_in'], WIN_LEN)
                classifier_hand = imu_classifier(
                    'Hand Screw In_classifier_TrainOnAll_4_allclassesincl.h5',
                    ['hand_screw_in'], WIN_LEN)
                classifier_hammer = imu_classifier(
                    'Hammer_classifier_TrainOnAll_4_allclassesincl.h5',
                    ['hammer'], WIN_LEN)

        act_obj = act_class(frame_id=frame_id,
                            class_count=class_count,
                            user_id=args.user_id,
                            user_name=args.user_name,
                            queue=10)

        prediction = np.zeros(class_count)

    # Start separate thread for collecting data from each Shimmer
    for shimthread in range(0, numsensors):
        shim_threads[shimthread] = threading.Thread(target=shimmer_thread,
                                                    args=(shimthread, ))
        shim_threads[shimthread].start()
        # time.sleep(10)
        # while not shimmers[shimthread]._connected:
        #     time.sleep(5)
        #     print(shimmers[shimthread]._connected)

    ready = np.zeros((len(shim_threads)
                      ))  # Bool array for if shimmers are setup and streaming
    alive = np.zeros(
        (len(shim_threads)))  # Bool array for if shimmer thread are active
    conn = np.zeros(
        (len(shim_threads)))  # Bool array for if connections are successful
    s_down = np.zeros(
        (len(shim_threads)))  # Bool array for if sensors are shutdown
    time.sleep(1)

    print("Starting main loop")

    class_pred = 'null'  #CATEGORIES[-1]
    status = [2, 2, 2, 2]  # [unknown, unknown, unknown, setting up]
    diag_level = 1  # 0:ok, 1:warning, 2:error, 3:stale
    while not quit_IMU:
        status[3] = 2  # setting up
        for s in shimmers:
            ready[s] = shimmers[s]._ready
            alive[s] = shim_threads[s].is_alive()
            conn[s] = shimmers[s]._connected
            s_down[s] = shimmers[s]._shutdown
            status[s] = shimmers[s]._status
            keyvalues = [
                KeyValue(key=f'Shimmer {POSITIONS[s]} {SHIM_IDs[s]}',
                         value=IMU_MSGS[status[s]])
            ]

        out_str = f"Sensors Ready:{ready} Threads:{alive} Connections:{conn} Shutdowns:{s_down} " \
                  f"Total Threads:{threading.active_count()} Quit:{quit_IMU} Prediction:{class_pred}"
        #out_str = threading.enumerate()
        print(out_str)
        class_pred = 'null'  #CATEGORIES[-1]
        new_data = np.empty((WIN_LEN, 0), dtype=np.float64)
        if all(ready) & all(conn) & all(alive):
            for p in shimmers:
                new_data = np.hstack(
                    (new_data, shimmers[p]._accel, shimmers[p]._gyro))
            new_data = np.nan_to_num(new_data)
            #scaler = preprocessing.StandardScaler()
            # for i in range(0, X.shape[0]):
            #     scaler = preprocessing.StandardScaler()
            #     X[i, :, :] = scaler.fit_transform(X[i, :, :])
            #new_data[:, :] = scaler.fit_transform(new_data[:, :])
            new_data = scale_data(new_data)
            status[3] = 1  # Ready
            diag_level = 0  # ok

            if args.classifier_type != 'none':
                if args.classifier_type == 'all':
                    if args.task_type == 'assemble_box':
                        prediction = np.reshape(
                            classifier.classify_data(new_data, args.bar),
                            (-1)).tolist()
                    elif args.task_type == 'assemble_complex_box':
                        prediction = np.reshape(
                            classifier.classify_data(new_data, args.bar),
                            (-1)).tolist()
                elif args.classifier_type == 'one':
                    if args.task_type == 'assemble_box':
                        pass
                    elif args.task_type == 'assemble_complex_box':
                        prediction = []
                        prediction.append(
                            classifier_screw.classify_data(new_data,
                                                           False))  #[1])
                        prediction.append(
                            classifier_allen.classify_data(new_data,
                                                           False))  #[1])
                        prediction.append(
                            classifier_hammer.classify_data(new_data,
                                                            False))  #[1])
                        prediction.append(
                            classifier_hand.classify_data(new_data,
                                                          False))  #[1])

                        if args.bar:
                            plot_prediction(prediction)

                #print(prediction)
                class_pred = CATEGORIES[np.argmax(prediction)]

        else:
            diag_level = 1  # warning

        if all(ready) & all(conn) & all(alive) & (not quit_IMU) & args.disp:
            plotdata = np.empty((WIN_LEN, 0), dtype=np.float64)
            for p in shimmers:
                plotdata = np.hstack(
                    (plotdata, shimmers[p]._accel, shimmers[p]._gyro))
            plotdata = np.nan_to_num(plotdata)
            plot_func(plotdata)

        #rospy.loginfo(out_str)

        diag_msg = "Some helpful message"
        keyvalues = [
            KeyValue(key=f'Shimmer {POSITIONS[0]} {SHIM_IDs[0]}',
                     value=IMU_MSGS[status[0]]),
            KeyValue(key=f'Shimmer {POSITIONS[1]} {SHIM_IDs[1]}',
                     value=IMU_MSGS[status[1]]),
            KeyValue(key=f'Shimmer {POSITIONS[2]} {SHIM_IDs[2]}',
                     value=IMU_MSGS[status[2]]),
            KeyValue(key=f'Overall', value=IMU_SYS_MSGS[status[3]])
        ]

        if args.classifier_type != 'none':
            try:
                act_obj.publish(prediction)
            except Exception as e:
                print(e)
                print(prediction)

        diag_obj.publish(diag_level, diag_msg, keyvalues)

        rate.sleep()
def robot_control_node():
    # ROS node setup
    frame_id = 'robot_control_node'
    rospy.init_node(frame_id, anonymous=True)
    diag_obj = diag_class(frame_id=frame_id,
                          user_id=0,
                          user_name="robot",
                          queue=1)
    diag_obj.publish(1, "Starting")

    rospy.Subscriber("SystemStatus", diagnostics, sys_stat_callback)
    global database_stat
    # Wait for postgresql node to be ready
    while database_stat != 0 and not rospy.is_shutdown():
        print(f"Waiting for postgresql node status, currently {database_stat}")
        diag_obj.publish(1, "Waiting for postgresql node")
        time.sleep(0.5)

    global user_node_stat
    # Wait for users node to be ready
    while user_node_stat != 0 and not rospy.is_shutdown():
        print(f"Waiting for users node status, currently {user_node_stat}")
        diag_obj.publish(1, "Waiting for users node")
        time.sleep(0.5)

    predictor = future_predictor()
    robot_task = robot_solo_task()

    move_obj = move_class(frame_id=frame_id, queue=10)
    rospy.Subscriber("CurrentState", capability,
                     predictor.user_prediction_callback)
    rospy.Subscriber("RobotStatus", String, predictor.robot_stat_callback)

    rate = rospy.Rate(1)  # 1hz
    db = database()
    home = False
    while not rospy.is_shutdown():
        try:
            predictor.update_predictions()

            # Update future prediction in sql table
            for _, row in predictor.future_estimates.iterrows():
                sql_cmd = f"""DELETE FROM robot_future_estimates WHERE user_id = {row['user_id']};"""
                db.gen_cmd(sql_cmd)
                db.insert_data_list("robot_future_estimates",
                                    predictor.fut_cols, [tuple(row.tolist())])

            # Select row with minimum time until robot required
            row = predictor.future_estimates[
                predictor.future_estimates.robot_start_t ==
                predictor.future_estimates.robot_start_t.min()]

            if (row['robot_start_t'][0] < pd.Timedelta(0)) and (row['done'][0]
                                                                == False):
                home = False
                # if time to next colab < action time start colab action
                action = predictor.task_overview.loc[
                    row['current_action_no']]['action_name'].values[0]
                predictor.task_now = row['task_name'][0]
                predictor.action_no_now = row['current_action_no'][0]
                print(f"action: {action}")
                # Wait for confirmation task has been received
                while predictor.robot_status != action:
                    move_obj.publish(action)
                predictor.robot_start_t = datetime.datetime.now().time()
                predictor.done = False
                # Wait for confirmation task has been completed
                while not predictor.done:
                    time.sleep(0.01)
                predictor.future_estimates.loc[
                    predictor.future_estimates['user_id'] ==
                    row['user_id'].values[0], 'done'] = True

            elif ((row['robot_start_t'][0] > robot_task.next_task_time) or
                  (row['done'][0] == True)) and (not robot_task.finished):
                home = False
                # if time to colb action > time to do solo action
                predictor.task_now = robot_task.task_name
                predictor.action_no_now = robot_task.next_action_id
                print(f"Robot solo task {robot_task.next_action}")
                # Wait for confirmation task has been received
                while predictor.robot_status != robot_task.next_action:
                    move_obj.publish(robot_task.next_action)
                predictor.robot_start_t = datetime.datetime.now().time()
                predictor.done = False
                # Wait for confirmation task has been completed
                while not predictor.done:
                    time.sleep(0.01)
                robot_task.update_progress()

            elif not home:
                # else wait for next colab action
                predictor.task_now = None
                predictor.action_no_now = None
                print(f"Sending robot home")
                # Wait for confirmation task has been received
                while predictor.robot_status != 'home':
                    move_obj.publish('home')
                predictor.robot_start_t = datetime.datetime.now().time()
                predictor.done = False
                # Wait for confirmation task has been completed
                while not predictor.done:
                    time.sleep(0.01)
                # move_obj.publish('')
                home = True

            diag_obj.publish(0, "Running")
            rospy.loginfo(f"{frame_id} active")

        except Exception as e:
            print(f"robot_control_node error: {e}")
            diag_obj.publish(2, f"Error: {e}")
            traceback.print_exc(file=sys.stdout)

        rate.sleep()
def realsense_run():
    # ROS node setup
    frame_id = 'Realsense_node'

    if not test:
        rospy.init_node(frame_id, anonymous=True)
        diag_obj = diag_class(frame_id=frame_id,
                              user_id=args.user_id,
                              user_name=args.user_name,
                              queue=1)
        rate = rospy.Rate(10)

    if args.classify:
        try:
            frames = cam.pipeline.wait_for_frames()
            if args.depth:
                color_image, depth_colormap, depth_image = cam.depth_frames(
                    frames)
            else:
                color_image = cam.colour_frames(frames)

            im_classifier = classifier(args.comp_device, args.weights,
                                       args.img_size, color_image,
                                       args.conf_thres, args.iou_thres)
            if not test:
                obj_obj = obj_class(frame_id=frame_id,
                                    names=im_classifier.names,
                                    queue=1)

        except Exception as e:
            print("**Classifier Load Error**")
            traceback.print_exc(file=sys.stdout)
            if not test:
                diag_obj.publish(2, f"load classifier error: {e}")
            raise

    diag_timer = time.time()
    while (not rospy.is_shutdown()) or test:
        try:
            # Get frameset of color and depth
            frames = cam.pipeline.wait_for_frames()

            if args.depth:
                color_image_raw, depth_colormap, depth_image = cam.depth_frames(
                    frames)
            else:
                color_image_raw = cam.colour_frames(frames)

            if args.classify:
                try:
                    # Run YOLOv3 classifier on image
                    if args.depth:
                        color_image, det = im_classifier.detect(
                            color_image_raw,
                            depth_image,
                            depth_histogram=False)
                    else:
                        color_image, det = im_classifier.detect(
                            color_image_raw, None)

                    try:
                        mask_img = skinmask(color_image_raw)
                        contours, hull = getcnthull(mask_img)
                        if cv2.contourArea(contours) > 1500:
                            det = torch.cat(
                                (det, torch.tensor([[0, 0, 0, 0, 1, 10, 0]])))
                            #cv2.drawContours(img, [contours], -1, (255,255,0), 2)
                            cv2.drawContours(color_image, [hull], -1,
                                             (0, 255, 255), 2)
                    except Exception as e:
                        print("hand mask error: ", e)
                        pass

                    try:
                        approx, thrash = rectangle_detector(color_image_raw)
                        if approx is not None:
                            cv2.drawContours(color_image, [approx], 0,
                                             (0, 255, 0), 5)
                            if not test:
                                obj_obj.publish(det)

                    except Exception as e:
                        print("rectangle error: ", e)
                        pass

                    try:
                        detect_blobs(thrash)
                    except Exception as e:
                        print("blob error: ", e)
                        pass

                    # if not test:
                    #     obj_obj.publish(det)
                except Exception as e:
                    print("**Classifier Detection Error**")
                    traceback.print_exc(file=sys.stdout)
                    if not test:
                        diag_obj.publish(2, f"load classifier error: {e}")

            # Remove background - Set pixels further than clipping_distance to grey
            #grey_color = 153
            #depth_image_3d = np.dstack((depth_image,depth_image,depth_image)) #depth image is 1 channel, color is 3 channels
            #bg_removed = np.where((depth_image_3d > clipping_distance) | (depth_image_3d <= 0), grey_color, color_image)
            #image = scale(bg_removed)

            if (time.time() - diag_timer) > 1:
                if not test:
                    diag_obj.publish(0, f"Running")
                diag_timer = time.time()

        except TypeError as e:
            time.sleep(1)
            if not test:
                diag_obj.publish(2, f"TypeError")
        except Exception as e:
            print("**Get Image Error**")
            if not test:
                diag_obj.publish(2, f"Realsense image error: {e}")
            traceback.print_exc(file=sys.stdout)
            break

        #im_screw_states, tally = im_screw_detect.detect_screws(image, args.disp)
        #im_screw_states = im_screw_states.tolist()

        if args.disp:
            if args.depth:
                disp_im = np.hstack((color_image, depth_colormap))
            else:
                disp_im = color_image

            cv2.namedWindow('Realsense viewer', cv2.WINDOW_AUTOSIZE)
            cv2.imshow('Realsense viewer', disp_im)
            key = cv2.waitKey(1)
            # Press esc or 'q' to close the image window
            if key & 0xFF == ord('q') or key == 27:
                cv2.destroyAllWindows()
                break

        if not test:
            rate.sleep()
        else:
            #time.sleep(1)
            pass
def realsense_run():
    # ROS node setup
    frame_id = 'Realsense_node'
    if not test:
        rospy.init_node(frame_id, anonymous=True)
        diag_obj = diag_class(frame_id=frame_id,
                              user_id=args.user_id,
                              user_name=args.user_name,
                              queue=1)
        rate = rospy.Rate(10)

    # Create block detector
    detector = block_detector(test, frame_id)

    # Get initial frames from camera
    frames = cam.pipeline.wait_for_frames()
    color_image, depth_colormap, depth_image = cam.depth_frames(frames)
    # Create a window
    cv2.namedWindow('image')

    if args.tuning_mode:
        # Create trackbars for color change
        # Hue is from 0-179 for Opencv
        cv2.createTrackbar('HMin', 'image', 0, 179, nothing)
        cv2.createTrackbar('SMin', 'image', 0, 255, nothing)
        cv2.createTrackbar('VMin', 'image', 0, 255, nothing)
        cv2.createTrackbar('HMax', 'image', 0, 179, nothing)
        cv2.createTrackbar('SMax', 'image', 0, 255, nothing)
        cv2.createTrackbar('VMax', 'image', 0, 255, nothing)

        # Set default value for Max HSV trackbars
        cv2.setTrackbarPos('HMax', 'image', 179)
        cv2.setTrackbarPos('SMax', 'image', 255)
        cv2.setTrackbarPos('VMax', 'image', 255)

        # Initialize HSV min/max values
        hMin = sMin = vMin = hMax = sMax = vMax = 0
        phMin = psMin = pvMin = phMax = psMax = pvMax = 0

        while (1):
            # Get initial frames from camera
            frames = cam.pipeline.wait_for_frames()
            image, depth_colormap, depth_image = cam.depth_frames(frames)
            # Get current positions of all trackbars
            hMin = cv2.getTrackbarPos('HMin', 'image')
            sMin = cv2.getTrackbarPos('SMin', 'image')
            vMin = cv2.getTrackbarPos('VMin', 'image')
            hMax = cv2.getTrackbarPos('HMax', 'image')
            sMax = cv2.getTrackbarPos('SMax', 'image')
            vMax = cv2.getTrackbarPos('VMax', 'image')

            # Set minimum and maximum HSV values to display
            lower = np.array([hMin, sMin, vMin])
            upper = np.array([hMax, sMax, vMax])

            # Convert to HSV format and color threshold
            hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
            mask = cv2.inRange(hsv, lower, upper)
            result = cv2.bitwise_and(image, image, mask=mask)

            # Print if there is a change in HSV value
            if ((phMin != hMin) | (psMin != sMin) | (pvMin != vMin) |
                (phMax != hMax) | (psMax != sMax) | (pvMax != vMax)):
                print(
                    "(hMin = %d , sMin = %d, vMin = %d), (hMax = %d , sMax = %d, vMax = %d)"
                    % (hMin, sMin, vMin, hMax, sMax, vMax))
                phMin = hMin
                psMin = sMin
                pvMin = vMin
                phMax = hMax
                psMax = sMax
                pvMax = vMax

            # Display result image
            # cv2.imshow('image', result)
            # if cv2.waitKey(10) & 0xFF == ord('q'):
            # break

        cv2.destroyAllWindows()

    diag_timer = time.time()
    while (not rospy.is_shutdown()) or test:
        try:
            # Get frames from camera
            frames = cam.pipeline.wait_for_frames()
            color_image, depth_colormap, depth_image = cam.depth_frames(frames)

            detector.process_img(color_image, depth_image)

            if (time.time() - diag_timer) > 1:
                if not test:
                    diag_obj.publish(0, f"Running")
                diag_timer = time.time()

        except TypeError as e:
            time.sleep(1)
            if not test:
                diag_obj.publish(2, f"TypeError")
        except Exception as e:
            print("**Get Image Error**")
            if not test:
                diag_obj.publish(2, f"Realsense image error: {e}")
            traceback.print_exc(file=sys.stdout)
            break

        if args.disp:
            disp_im = np.hstack((color_image, depth_colormap))

            cv2.namedWindow('Realsense viewer', cv2.WINDOW_AUTOSIZE)
            cv2.imshow('Realsense viewer', disp_im)
            key = cv2.waitKey(1)
            # Press esc or 'q' to close the image window
            if key & 0xFF == ord('q') or key == 27:
                cv2.destroyAllWindows()
                break

        if not test:
            rate.sleep()
        else:
            #time.sleep(1)
            pass
Ejemplo n.º 10
0
def realsense_run():
    # ROS node setup
    frame_id = 'Realsense node'

    if not test:
        rospy.init_node(f'Realsense_main', anonymous=True)
        diag_obj = diag_class(frame_id=frame_id,
                              user_id=args.user_id,
                              user_name=args.user_name,
                              queue=1)
        rate = rospy.Rate(10)

    if args.classify:
        try:
            frames = cam.pipeline.wait_for_frames()
            if args.depth:
                color_image, depth_colormap, depth_image = cam.depth_frames(
                    frames)
            else:
                color_image = np.array(
                    frame_convert2.video_cv(freenect.sync_get_video()[0]))
                #color_image = cam.colour_frames(frames)

            im_classifier = classifier(args.comp_device, args.weights,
                                       args.img_size, color_image,
                                       args.conf_thres, args.iou_thres)
            if not test:
                obj_obj = obj_class(frame_id=frame_id,
                                    names=im_classifier.names,
                                    queue=1)

        except Exception as e:
            print("**Classifier Load Error**")
            traceback.print_exc(file=sys.stdout)
            if not test:
                diag_obj.publish(2, f"load classifier error: {e}")
            raise

    diag_timer = time.time()
    while (not rospy.is_shutdown()) or test:
        try:
            # Get frameset of color and depth
            frames = cam.pipeline.wait_for_frames()

            if args.depth:
                color_image, depth_colormap, depth_image = cam.depth_frames(
                    frames)
            else:
                #color_image = cam.colour_frames(frames)
                color_image = np.array(
                    frame_convert2.video_cv(freenect.sync_get_video()[0]))

            if args.classify:
                try:
                    if args.depth:
                        color_image, det = im_classifier.detect(
                            color_image, depth_image, depth_histogram=False)
                    else:
                        color_image, det = im_classifier.detect(
                            color_image, None)

                    if not test:
                        obj_obj.publish(det)
                except Exception as e:
                    print("**Classifier Detection Error**")
                    traceback.print_exc(file=sys.stdout)
                    if not test:
                        diag_obj.publish(2, f"load classifier error: {e}")

            # Remove background - Set pixels further than clipping_distance to grey
            #grey_color = 153
            #depth_image_3d = np.dstack((depth_image,depth_image,depth_image)) #depth image is 1 channel, color is 3 channels
            #bg_removed = np.where((depth_image_3d > clipping_distance) | (depth_image_3d <= 0), grey_color, color_image)
            #image = scale(bg_removed)

            if (time.time() - diag_timer) > 1:
                print(time.time())
                if not test:
                    diag_obj.publish(0, f"Running")
                diag_timer = time.time()

        except TypeError as e:
            time.sleep(1)
            if not test:
                diag_obj.publish(2, f"TypeError")
        except Exception as e:
            print("**Get Image Error**")
            if not test:
                diag_obj.publish(2, f"Realsense image error: {e}")
            traceback.print_exc(file=sys.stdout)
            break

        #im_screw_states, tally = im_screw_detect.detect_screws(image, args.disp)
        #im_screw_states = im_screw_states.tolist()
        if args.disp:
            if args.depth:
                disp_im = np.hstack((color_image, depth_colormap))
            else:
                disp_im = color_image

            cv2.namedWindow('Realsense viewer', cv2.WINDOW_AUTOSIZE)
            cv2.imshow('Realsense viewer', disp_im)
            key = cv2.waitKey(1)
            # Press esc or 'q' to close the image window
            if key & 0xFF == ord('q') or key == 27:
                cv2.destroyAllWindows()
                break

        if not test:
            rate.sleep()
        else:
            #time.sleep(1)
            pass