def test_define_str(self): Config.clear() Config.define_str("test", 5, "A test for int") Config.define_str("test_f", 5., "A test for int with a float var") Config.define_str("test_f2", 5.6, "A test for int with a float var") Config.define_str("test_str", "5.6", "A test for int with a float var") self.assertDictEqual(Config.get_dict(), { 'test': "5", 'test_f': "5.0", 'test_f2': "5.6", 'test_str': "5.6" })
import cv2 import logging import os import coloredlogs from distribute_config import Config coloredlogs.install(level="DEBUG") Config.define_str("file", "", "input file: video to read and split") Config.define_float("extract_every", 100, "Time in ms between two extracted images") Config.define_str("prefix", "", "Prefix to the name of the images") Config.define_str("outputdir", ".", "Where to save the pictures") def main(): Config.load_conf("config_video_burst.yml") config = Config.get_dict() # check if the script can run assert os.path.isfile(config["file"]), f"Option 'file' need to be provided" os.makedirs(config["outputdir"], exist_ok=True) if (config["prefix"] is ""): config["prefix"] = get_prefix(config["file"]) logging.info(f'prefix: {config["prefix"]}') frame_id = 0 last_save = -10000 video = cv2.VideoCapture(config["file"]) if not video.isOpened():
import logging import json import coloredlogs import cv2 import tensorflow as tf import numpy as np from tqdm import tqdm # progress bar from distribute_config import Config # Run a frozen model on a set of images and output the detections as .json files, one per image. # For now it only keeps "detection_classes" == 1, i.e. "class"="person" coloredlogs.install(level="DEBUG") Config.define_str( "model_path", "/opt/model/frozen_inference_graph.pb", "Path of the model to load and execute, for instance" "/opt/model/frozen_inference_graph.pb. If you're using docker-compose you shouldn't change this." ) Config.define_str("input_dir", "", "Path where the images to annotate are stored") Config.define_str( "output_dir", "", "Path to store pre-annotations (model annotations to help human annotators)" ) with Config.namespace("class"): Config.define_str_list("names", [], "name of the classes to annotate") with Config.namespace("object_detection"): Config.define_float("threshold", 0.2, "Discard boxes with score below this value") Config.define_float( "max_width", 1.0,
import threading import json import logging import coloredlogs from PIL import Image from flask import Flask, url_for, redirect, Response, jsonify from flask_cors import CORS import flask from distribute_config import Config coloredlogs.install(level='DEBUG') Config.define_str("images_path", "static/images", "Path where are stored the images to annotate") Config.define_str("human_annotations_path", "static/human_annotations", "Path where are stored human annotation") Config.define_str("model_annotations_path", "static/model_annotations", "Path where are stored model annotation for helping human") with Config.namespace("class"): Config.define_str_list("names", [], "name of the classes to annotate") Config.define_str_list("colors", [], "colors for each classes") Config.define_int("min_height", 0, "Rectangle with lower height will be displayed red") Config.define_int("min_width", 0, "Rectangle with lower width will be displayed red") image_provider = None
import json import time from distribute_config import Config from reachy import Reachy Config.define_str("file_path", "", "path of the file to write the different positions") Config.define_bool("record", True, "if true then record and run. Else only run ") def main(): Config.load_conf() config = Config.get_dict() reachy = Reachy() for motor in reachy.motors: motor.compliant = True if config["record"]: # First part of the script : the user can move the arm and hit return to save the arm position all_positions = [] action = input("> ") while action != "save": # Save the position of the motors position = {} for motor in reachy.motors: print( f"The motor \"{motor.name}\" is currently in position: {motor.present_position}" ) position[motor.name] = motor.present_position
import os import logging import json import coloredlogs import cv2 import tensorflow as tf import numpy as np from tqdm import tqdm # progress bar from distribute_config import Config # Run a frozen model on a set of images and output the detections as .json files, one per image. # For now it only keeps "detection_classes" == 1, i.e. "class"="person" coloredlogs.install(level="DEBUG") Config.define_str( "model_path", "", "Path of the model to load and execute, for instance models/frozen_inference_graph.pb" ) Config.define_str("input_dir", "", "Path where the images to annotate are stored") Config.define_str( "output_dir", "", "Path to store pre-annotations (model annotations to help human annotators)" ) with Config.namespace("class"): Config.define_str_list("names", [], "name of the classes to annotate") with Config.namespace("object_detection"): Config.define_float("threshold", 0.2, "Discard boxes with score below this value") Config.define_float( "max_width", 0.7, "Discard boxes with width upper this value because in some cases, very large detections are mostly false positives"