Ejemplo n.º 1
0
 def test_load_conf_os_env(self, mock_args):
     Config.clear()
     Config.define_bool("VaR", False, "turn me true")
     Config.define_int("v1", 0, "var")
     Config.define_int("nm.v2", 0, "var")
     Config.load_conf()
     self.assertEqual(Config.get_var("v1"), 1)
     self.assertEqual(Config.get_var("nm.v2"), 2)
     self.assertEqual(Config.get_var("VAr"), True)
Ejemplo n.º 2
0
def main():
    global image_provider
    Config.load_conf()

    image_provider = ImageProvider()

    # Write txt file containing class info
    with open(
            os.path.join(Config.get_var("human_annotations_path"),
                         "class_names.txt"), "w") as f:
        f.write(','.join(Config.get_var("class.names")))

    app.run(host='0.0.0.0', threaded=True)
Ejemplo n.º 3
0
 def test_load_conf_cmd(self, mock_args):
     Config.clear()
     Config.define_bool("VaR", False, "turn me true")
     Config.define_int("v1", 1, "var")
     Config.define_int("v2", 2, "var")
     Config.define_float_list("list", [5., 6, "7."], "A test for list")
     with Config.namespace("n1"):
         Config.define_int("v1", 1, "var")
         Config.define_int("v2", 2, "var")
         Config.define_bool("V3", True, "turn me false")
     Config.load_conf()
     self.assertEqual(Config.get_var("v1"), 2)
     self.assertEqual(Config.get_var("v2"), 3)
     self.assertEqual(Config.get_var("n1.v1"), 2)
     self.assertEqual(Config.get_var("n1.v2"), 3)
     self.assertEqual(Config.get_var("n1.v3"), False)
     self.assertEqual(Config.get_var("VAr"), True)
     self.assertEqual(Config.get_var("list"), [1.0, 2.0, 3.0])
Ejemplo n.º 4
0
def main():
    Config.load_conf()
    config = Config.get_dict()

    reachy = Reachy()
    for motor in reachy.motors:
        motor.compliant = True

    if config["record"]:
        # First part of the script : the user can move the arm and hit return to save the arm position
        all_positions = []
        action = input("> ")
        while action != "save":
            # Save the position of the motors
            position = {}
            for motor in reachy.motors:
                print(
                    f"The motor \"{motor.name}\" is currently in position: {motor.present_position}"
                )
                position[motor.name] = motor.present_position
            all_positions.append(position)
            action = input("> ")
        # Save the list of position in a file
        with open(config["file_path"], "w") as f:
            json.dump(all_positions, f)
    else:
        # If this first part was not executed, then we need to read the json file to load the key points
        with open(config["file_path"], "r") as f:
            all_positions = json.load(f)

    # Move the arm
    for motor in reachy.motors:
        motor.compliant = False

    for position in all_positions:
        # Move to position
        for motor in reachy.motors:
            motor.goal_position = position[motor.name]

        # Wait 2 s
        time.sleep(2)

    for motor in reachy.motors:
        motor.compliant = True
Ejemplo n.º 5
0
def main():
    Config.load_conf("config_video_burst.yml")
    config = Config.get_dict()

    # check if the script can run
    assert os.path.isfile(config["file"]), f"Option 'file' need to be provided"
    os.makedirs(config["outputdir"], exist_ok=True)

    if (config["prefix"] is ""):
        config["prefix"] = get_prefix(config["file"])
        logging.info(f'prefix: {config["prefix"]}')

    frame_id = 0
    last_save = -10000
    video = cv2.VideoCapture(config["file"])
    if not video.isOpened():
        raise Exception(f"Cannot open video {config['file']}")
    interval_between_pic = int(
        video.get(cv2.CAP_PROP_FPS) * config["extract_every"] / 1000)
    frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
    logging.info(f'frame_count: {frame_count}')
    frame_count_length = len(str(frame_count))

    while True:
        got_frame, img = video.read()
        if not got_frame:
            logging.info('end of video')
            break
        if (frame_id - last_save > interval_between_pic):
            picture_path = os.path.join(
                config["outputdir"],
                f'{config["prefix"]}_{frame_id:0{frame_count_length}}.jpg')
            cv2.imwrite(picture_path, img)
            last_save = frame_id
            logging.info('Saving picture ' + picture_path)
        frame_id += 1
Ejemplo n.º 6
0
def main():
    Config.load_conf()
    config = Config.get_dict()
    assert config["model_path"] != "", "model_path can't be empty"
    assert config["input_dir"] != "", "input_dir can't be empty"
    assert config["output_dir"] != "", "output_dir can't be empty"

    os.makedirs(config["output_dir"], exist_ok=True)
    images_list = os.listdir(config["input_dir"])
    annotations_list = os.listdir(config["output_dir"])

    # Only keep images that aren't processed yet
    new_list = []
    annotation_ids = [
        os.path.splitext(file_name)[0] for file_name in annotations_list
    ]
    for image_name in images_list:
        image_id, _ = os.path.splitext(image_name)
        if image_id not in annotation_ids:
            new_list.append(image_name)
    images_list = new_list
    images_list.sort()
    logging.info("there are {} images to annotate".format(len(images_list)))

    # load tensorflow model (must be a frozen model)
    od_graph_def = tf.GraphDef()
    with tf.gfile.GFile(config["model_path"], 'rb') as fid:
        serialized_graph = fid.read()
        od_graph_def.ParseFromString(serialized_graph)
        tf.import_graph_def(od_graph_def, name='')
    with tf.Session() as session:
        # Get all tensors
        ops = tf.get_default_graph().get_operations()
        all_tensor_names = {output.name for op in ops for output in op.outputs}
        tensor_dict = {}
        for key in [
                'num_detections', 'detection_boxes', 'detection_scores',
                'detection_classes'
        ]:
            tensor_name = key + ':0'
            if tensor_name in all_tensor_names:
                tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(
                    tensor_name)
        image_tensor = tf.get_default_graph().get_tensor_by_name(
            'image_tensor:0')

        # Run inference
        first_iter = True
        for image_id in tqdm(range(len(images_list))):
            image = cv2.cvtColor(
                cv2.imread(
                    os.path.join(config["input_dir"], images_list[image_id])),
                cv2.COLOR_BGR2RGB)

            if first_iter:
                logging.info(f"image.shape: {image.shape}")
                first_iter = False
            height, width = image.shape[:2]
            image_expanded = np.expand_dims(image, axis=0)
            output_dict = session.run(tensor_dict,
                                      feed_dict={image_tensor: image_expanded})

            good_rectangles = []
            for i, detection_score in enumerate(
                    output_dict["detection_scores"][0]):
                if detection_score >= config["object_detection"]["threshold"]:
                    box = output_dict["detection_boxes"][0][
                        i]  # ymin, xmin, ymax, xmax
                    if box[3] - box[1] < config["object_detection"][
                            "max_width"]:
                        good_rectangles.append({
                            "xMin":
                            int(box[1] * width),
                            "yMin":
                            int(box[0] * height),
                            "xMax":
                            int(box[3] * width),
                            "yMax":
                            int(box[2] * height),
                            "detection_score":
                            detection_score.item(),
                            "class":
                            config["class"]["names"]
                            [int(output_dict["detection_classes"][0][i]) - 1]
                        })
                else:
                    break

            json_name = os.path.splitext(images_list[image_id])[0] + ".json"
            with open(os.path.join(config["output_dir"], json_name),
                      'w') as outfile:
                json.dump({"rectangles": good_rectangles}, outfile)