"""
from docopt import docopt

import sense.display
from sense.controller import Controller
from sense.downstream_tasks.gesture_recognition import INT2LAB
from sense.downstream_tasks.gesture_recognition import LAB_THRESHOLDS
from sense.downstream_tasks.nn_utils import LogisticRegression
from sense.downstream_tasks.nn_utils import Pipe
from sense.downstream_tasks.postprocess import PostprocessClassificationOutput
from sense.loading import get_relevant_weights
from sense.loading import build_backbone_network
from sense.loading import ModelConfig

SUPPORTED_MODEL_CONFIGURATIONS = [
    ModelConfig('StridedInflatedEfficientNet', 'pro', ['gesture_recognition']),
    ModelConfig('StridedInflatedMobileNetV2', 'pro', ['gesture_recognition']),
    ModelConfig('StridedInflatedEfficientNet', 'lite',
                ['gesture_recognition']),
    ModelConfig('StridedInflatedMobileNetV2', 'lite', ['gesture_recognition']),
]

if __name__ == "__main__":
    # Parse arguments
    args = docopt(__doc__)
    camera_id = int(args['--camera_id'] or 0)
    path_in = args['--path_in'] or None
    path_out = args['--path_out'] or None
    title = args['--title'] or None
    model_name = args['--model_name'] or None
    model_version = args['--model_version'] or None
Example #2
0
from sense.downstream_tasks.nn_utils import LogisticRegression
from sense.downstream_tasks.nn_utils import Pipe
from sense.finetuning import extract_features
from sense.finetuning import generate_data_loader
from sense.finetuning import set_internal_padding_false
from sense.finetuning import training_loops
from sense.loading import build_backbone_network
from sense.loading import get_relevant_weights
from sense.loading import ModelConfig
from sense.loading import update_backbone_weights
from tools.sense_studio.project_utils import load_project_config
from sense.utils import clean_pipe_state_dict_key
from tools import directories

SUPPORTED_MODEL_CONFIGURATIONS = [
    ModelConfig('StridedInflatedEfficientNet', 'pro', []),
    ModelConfig('StridedInflatedMobileNetV2', 'pro', []),
    ModelConfig('StridedInflatedEfficientNet', 'lite', []),
    ModelConfig('StridedInflatedMobileNetV2', 'lite', []),
]


def train_model(path_in,
                path_out,
                model_name,
                model_version,
                num_layers_to_finetune,
                epochs,
                use_gpu=True,
                overwrite=True,
                temporal_training=None,
Example #3
0
import sense.display
from sense.controller import Controller
from sense.downstream_tasks.fitness_rep_counting import INT2LAB
from sense.downstream_tasks.fitness_rep_counting import LAB2INT
from sense.downstream_tasks.nn_utils import LogisticRegression
from sense.downstream_tasks.nn_utils import Pipe
from sense.downstream_tasks.postprocess import AggregatedPostProcessors
from sense.downstream_tasks.postprocess import PostprocessClassificationOutput
from sense.downstream_tasks.postprocess import TwoPositionsCounter
from sense.loading import build_backbone_network
from sense.loading import get_relevant_weights
from sense.loading import ModelConfig

SUPPORTED_MODEL_CONFIGURATIONS = [
    ModelConfig('StridedInflatedEfficientNet', 'pro', ['rep_counter']),
]

if __name__ == "__main__":
    # Parse arguments
    args = docopt(__doc__)
    camera_id = int(args['--camera_id'] or 0)
    path_in = args['--path_in'] or None
    path_out = args['--path_out'] or None
    title = args['--title'] or None
    model_name = args['--model_name'] or None
    model_version = args['--model_version'] or None
    use_gpu = args['--use_gpu']

    # Load weights
    selected_config, weights = get_relevant_weights(
from sense.controller import Controller
from sense.downstream_tasks.gesture_detection import LAB2INT
from sense.downstream_tasks.gesture_detection import INT2LAB
from sense.downstream_tasks.gesture_detection import ENABLED_LABELS
from sense.downstream_tasks.gesture_detection import LAB_THRESHOLDS
from sense.downstream_tasks.nn_utils import LogisticRegression
from sense.downstream_tasks.nn_utils import Pipe
from sense.downstream_tasks.postprocess import AggregatedPostProcessors
from sense.downstream_tasks.postprocess import EventCounter
from sense.downstream_tasks.postprocess import PostprocessClassificationOutput
from sense.loading import get_relevant_weights
from sense.loading import build_backbone_network
from sense.loading import ModelConfig

SUPPORTED_MODEL_CONFIGURATIONS = [
    ModelConfig('StridedInflatedEfficientNet', 'pro', ['gesture_detection']),
    ModelConfig('StridedInflatedEfficientNet', 'lite', ['gesture_detection']),
]

if __name__ == "__main__":
    # Parse arguments
    args = docopt(__doc__)
    camera_id = int(args['--camera_id'] or 0)
    path_in = args['--path_in'] or None
    path_out = args['--path_out'] or None
    title = args['--title'] or None
    model_name = args['--model_name'] or None
    model_version = args['--model_version'] or None
    use_gpu = args['--use_gpu']

    # Load weights
Example #5
0
import sense.display
from sense.controller import Controller
from sense.downstream_tasks import calorie_estimation
from sense.downstream_tasks.fitness_activity_recognition import INT2LAB
from sense.downstream_tasks.nn_utils import LogisticRegression
from sense.downstream_tasks.nn_utils import Pipe
from sense.downstream_tasks.postprocess import PostprocessClassificationOutput
from sense.loading import build_backbone_network
from sense.loading import get_relevant_weights
from sense.loading import ModelConfig


REQUIRED_FEATURE_CONVERTERS = ['fitness_activity_recognition', 'met_converter']

SUPPORTED_MODEL_CONFIGURATIONS = [
    ModelConfig('StridedInflatedMobileNetV2', 'pro', REQUIRED_FEATURE_CONVERTERS),
    ModelConfig('StridedInflatedEfficientNet', 'pro', REQUIRED_FEATURE_CONVERTERS),
    ModelConfig('StridedInflatedMobileNetV2', 'lite', REQUIRED_FEATURE_CONVERTERS),
    ModelConfig('StridedInflatedEfficientNet', 'lite', REQUIRED_FEATURE_CONVERTERS),
]


if __name__ == "__main__":
    # Parse arguments
    args = docopt(__doc__)
    weight = float(args['--weight'])
    height = float(args['--height'])
    age = float(args['--age'])
    gender = args['--gender'] or None
    camera_id = int(args['--camera_id'] or 0)
    path_in = args['--path_in'] or None
Example #6
0
  --model_name=NAME               Name of the model to be used.
  --model_version=VERSION         Version of the model to be used.
  --use_gpu                       Whether to run inference on the GPU or not.
"""
from docopt import docopt

import sense.display
from sense.controller import Controller
from sense.downstream_tasks import calorie_estimation
from sense.downstream_tasks.nn_utils import Pipe
from sense.loading import build_backbone_network
from sense.loading import get_relevant_weights
from sense.loading import ModelConfig

SUPPORTED_MODEL_CONFIGURATIONS = [
    ModelConfig('StridedInflatedMobileNetV2', 'pro', ['met_converter']),
    ModelConfig('StridedInflatedEfficientNet', 'pro', ['met_converter']),
    ModelConfig('StridedInflatedMobileNetV2', 'lite', ['met_converter']),
    ModelConfig('StridedInflatedEfficientNet', 'lite', ['met_converter']),
]

if __name__ == "__main__":
    # Parse arguments
    args = docopt(__doc__)
    weight = float(args['--weight'])
    height = float(args['--height'])
    age = float(args['--age'])
    gender = args['--gender'] or None
    model_name = args['--model_name'] or None
    model_version = args['--model_version'] or None
    use_gpu = args['--use_gpu']
Example #7
0
from sense.controller import Controller
from sense.downstream_tasks.gesture_control import LAB2INT
from sense.downstream_tasks.gesture_control import INT2LAB
from sense.downstream_tasks.gesture_control import ENABLED_LABELS
from sense.downstream_tasks.gesture_control import LAB_THRESHOLDS
from sense.downstream_tasks.nn_utils import LogisticRegression
from sense.downstream_tasks.nn_utils import Pipe
from sense.downstream_tasks.postprocess import AggregatedPostProcessors
from sense.downstream_tasks.postprocess import EventCounter
from sense.downstream_tasks.postprocess import PostprocessClassificationOutput
from sense.loading import get_relevant_weights
from sense.loading import build_backbone_network
from sense.loading import ModelConfig

SUPPORTED_MODEL_CONFIGURATIONS = [
    ModelConfig('StridedInflatedEfficientNet', 'pro', ['gesture_control']),
    ModelConfig('StridedInflatedEfficientNet', 'lite', ['gesture_control']),
]


def run_gesture_control(model_name: str,
                        model_version: str,
                        title: Optional[str] = None,
                        display_fn: Optional[Callable] = None,
                        **kwargs):
    """
    :param model_name:
        Model from backbone (StridedInflatedEfficientNet or StridedInflatedMobileNetV2).
    :param model_version:
        Model version (pro or lite)
    :param title:
Example #8
0
            f"Classifier not found: {classifier_name}. Only the following classifiers "
            f"can be converted: {list(SUPPORTED_CLASSIFIER_CONVERSIONS.keys())}"
        )

    if classifier_name == "custom_classifier":
        if not path_in:
            raise ValueError("You have to provide the directory used to train the custom classifier")

        backbone_model_config, weights = load_custom_classifier_weights(path_in)
        backbone_name = backbone_model_config.model_name
        finalize_custom_classifier_config(classifier_settings, path_in)
    else:
        if not backbone_name or not backbone_version:
            raise ValueError("You have to provide the name and version for the backbone model")

        model_config = ModelConfig(backbone_name, backbone_version, [classifier_name])
        weights = model_config.get_weights()

    backbone_settings = SUPPORTED_BACKBONE_CONVERSIONS.get(backbone_name)
    if not backbone_settings:
        raise Exception(
            f"Backbone not found: {backbone_name}. Only the following backbones "
            f"can be converted: {list(SUPPORTED_BACKBONE_CONVERSIONS.keys())}"
        )

    # Merge weights (possibly overwriting backbone weights with finetuned ones from classifier checkpoint)
    weights_full = weights['backbone']
    weights_full.update(weights[classifier_name])

    for key, weight in weights_full.items():
        logging.info(f"{key}: {weight.shape}")
from sense.controller import Controller
from sense.downstream_tasks.nn_utils import Pipe, LogisticRegression
from sense.downstream_tasks.postprocess import AggregatedPostProcessors
from sense.downstream_tasks.postprocess import EventCounter
from sense.downstream_tasks.postprocess import PostprocessClassificationOutput
from sense.downstream_tasks.postprocess import TwoPositionsCounter
from sense.downstream_tasks.volleyball import CLASSIFICATION_THRESHOLDS
from sense.downstream_tasks.volleyball import INT2LAB_CLASSIFICATION, INT2LAB_COUNTING
from sense.downstream_tasks.volleyball import LAB2INT_CLASSIFICATION, LAB2INT_COUNTING
from sense.loading import build_backbone_network
from sense.loading import get_relevant_weights
from sense.loading import ModelConfig


SUPPORTED_MODEL_CONFIGURATIONS = [
    ModelConfig('StridedInflatedEfficientNet', 'pro', ['volleyball_classifier']),
    ModelConfig('StridedInflatedEfficientNet', 'pro', ['volleyball_counter']),
]


if __name__ == "__main__":
    # Parse arguments
    args = docopt(__doc__)
    counter = args['--counter']
    camera_id = int(args['--camera_id'] or 0)
    path_in = args['--path_in'] or None
    path_out = args['--path_out'] or None
    use_gpu = args['--use_gpu']

    head_name = 'volleyball_counter' if counter else 'volleyball_classifier'
    INT2LAB = INT2LAB_COUNTING if counter else INT2LAB_CLASSIFICATION