Esempio n. 1
0
def run():
    """
    Run the code.
    """

    # Main parameters
    gpu_devices = [3]

    # Model path
    model_path = "/home/andrew/projects/htracking/notebooks/posture_estimation/face_model.pth"

    # Set CUDA devices
    os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(map(str, gpu_devices))

    num_gpus = len(gpu_devices)
    if num_gpus > 0 and torch.cuda.is_available():
        use_cuda = True
        device = torch.device("cuda:0")
    else:
        use_cuda = False
        device = torch.device("cpu")

    print("gpu_devices: {}, num_gpus: {}".format(gpu_devices, num_gpus))
    print("Running on the device: {}".format(device))

    cwd = "/home/andrew/projects/htracking/tools/detect_faces"
    config_name = "config.yaml"
    config_path = os.path.join(cwd, config_name)
    config = read_config(config_path)

    training = False

    # Model with data parallel
    model = YOLO3_MODEL(config, device, training=training)

    # Load the trained models
    print("Loading the trained model from {}".format(model_path))
    model.load_state(model_path)

    # Generate the annotation files

    ann_dir = "/home/andrew/projects/datasets/face_datasets/MYFACES/buffer/xml"
    image_dir = "/home/andrew/projects/datasets/face_datasets/MYFACES/buffer/images"

    labeler = ImageLabeler(ann_dir, image_dir, model)
    labeler.run()
Esempio n. 2
0
                default='config.yaml',
                help="Configuaration file")
ap.add_argument("-a",
                "--anchors",
                required=False,
                default=9,
                help="Number of anchors")
args = vars(ap.parse_args())

config_name = args['config']
num_anchors = args['anchors']

# Read the configuration file
cwd = os.getcwd()
config_path = os.path.join(cwd, config_name)
config = read_config(config_path)

print("Finding the anchors:")

# Dataset
train_images_path = config['train_images_path']
train_ann_path = config['train_ann_path']

transform = transforms.Compose([
    transforms.Resize(416),
    transforms.ToTensor(),
])
target_transform = torchvision.transforms.Compose(
    [ListToNumpy(), NumpyToTensor()])
dataset = VOCDetection(train_images_path,
                       train_ann_path,