Example #1
0
def make_command_class_serializer(
        *schema_paths: str) -> CommandClassSerializer:
    data = {}
    for schema_path in schema_paths:
        data.update(load_yaml(os.path.join("network", "protocol",
                                           schema_path)))

    return CommandClassSerializer(data)
def command_class_serializer():
    schema_files = [
        "network/protocol/command_classes/management.yaml",
        "network/protocol/command_classes/transport_encapsulation.yaml",
        "network/protocol/command_classes/application.yaml"
    ]

    data = {}
    for schema_file in schema_files:
        data.update(load_yaml(schema_file))

    yield CommandClassSerializer(data)
Example #3
0
def train_and_test(args: argparse.Namespace):
    if args.test and args.saved_state is None:
        print(
            'You have to use --saved_state when using --test, to specify the weights of the model'
        )
        sys.exit(0)

    # Select device
    cuda_device = 'cuda:%d' % args.gpu
    device = torch.device(cuda_device if torch.cuda.is_available() else 'cpu')

    # Load parameters from yaml file.
    param_config = load_yaml(args.param_file)

    # Basic parameters
    modality = args.modality
    modality_config = param_config.get('modalities').get(modality)

    # Hyper params
    num_neighbors = modality_config.get(
        'num_neighbors') if args.num_neighbors is None else args.num_neighbors
    batch_size = modality_config.get('batch_size')
    num_epochs = modality_config.get(
        'num_epochs') if args.epochs is None else args.epochs
    shuffle = param_config.get('dataset').get('shuffle')

    # Criterion, optimizer and scheduler
    model_class_name = modality_config.get('model').get('class_name')
    criterion = modality_config.get('criterion').get('class_name')
    criterion_from = modality_config.get('criterion').get('from_module')
    criterion_kwargs = modality_config.get('criterion').get('kwargs')
    if args.margin:
        criterion_kwargs['margin'] = args.margin
    if args.semi_hard is not None:
        criterion_kwargs['semi_hard'] = args.semi_hard
    optimizer = modality_config.get('optimizer').get('class_name')
    optimizer_from = modality_config.get('optimizer').get('from_module')
    optimizer_kwargs = modality_config.get('optimizer').get('kwargs')
    if args.lr:
        optimizer_kwargs['lr'] = args.lr
    if args.optimizer:
        optimizer = args.optimizer

    # Dataset config
    selected_dataset = getattr(datasets,
                               param_config.get('dataset').get('class_name'))
    transforms, test_transforms = get_transforms_from_config(
        param_config.get('modalities').get(modality).get('transforms'))
    train_dataset_kwargs = param_config.get('dataset').get('train_kwargs')
    validation_dataset_kwargs = param_config.get('dataset').get(
        'validation_kwargs')
    test_dataset_kwargs = param_config.get('dataset').get('test_kwargs')

    # Load Data
    train_dataset = selected_dataset(modality=modality,
                                     transform=transforms,
                                     **train_dataset_kwargs)
    num_actions = len(train_dataset.actions)
    train_loader = DataLoader(dataset=train_dataset,
                              batch_sampler=BalancedSampler(
                                  labels=train_dataset.labels,
                                  n_classes=num_actions,
                                  n_samples=modality_config['num_samples']))
    validation_dataset = selected_dataset(modality=modality,
                                          transform=test_transforms,
                                          **validation_dataset_kwargs)
    validation_loader = DataLoader(dataset=validation_dataset,
                                   batch_size=batch_size,
                                   shuffle=shuffle)
    test_dataset = selected_dataset(modality=modality,
                                    transform=test_transforms,
                                    **test_dataset_kwargs)
    test_loader = DataLoader(dataset=test_dataset,
                             batch_size=batch_size,
                             shuffle=shuffle)
    class_names = train_dataset.get_class_names()

    # Initiate the model
    model_kwargs = modality_config.get('model').get('kwargs')
    if args.pretrained is not None and model_class_name == 'MobileNetV2':
        model_kwargs['pretrained'] = args.pretrained
    if args.out_size is not None:
        model_kwargs['out_size'] = args.out_size
    if args.dr is not None:
        model_kwargs['dropout_rate'] = args.dr
    model = getattr(models, model_class_name)(
        *modality_config.get('model').get('args'), **model_kwargs)
    if args.test:
        model.load_state_dict(torch.load(args.saved_state))
    model = model.to(device)

    # Loss, optimizer and scheduler
    criterion = getattr(importlib.import_module(criterion_from),
                        criterion)(**criterion_kwargs)
    optimizer = getattr(importlib.import_module(optimizer_from),
                        optimizer)(model.parameters(), **optimizer_kwargs)
    scheduler = None
    if not args.no_scheduler:
        scheduler_class_name = modality_config.get('scheduler').get(
            'class_name')
        scheduler_from = modality_config.get('scheduler').get('from_module')
        scheduler_kwargs = modality_config.get('scheduler').get('kwargs')
        scheduler = getattr(importlib.import_module(scheduler_from),
                            scheduler_class_name)(optimizer,
                                                  **scheduler_kwargs)

    # Training procedure:
    # 1. Instantiate tensorboard writer
    # 2. Run training with triplet loss
    max_val_acc = -1
    max_train_acc = -1
    min_train_loss = -1
    min_val_loss = -1
    if not args.test:
        if args.experiment is None:
            print('Specify an experiment name by using --experiment argument')
            sys.exit(0)
        elif args.experiment == 'auto':
            experiment = '%s_%s_TL_A%s_M%s_LR%s_%s_%sep' % (
                model.name, modality, str(num_actions),
                str(criterion_kwargs['margin']), str(optimizer_kwargs['lr']),
                'semi_hard' if criterion_kwargs['semi_hard'] else 'hard',
                num_epochs)
        else:
            experiment = args.experiment
        if args.verbose:
            print('Experiment:  %s' % experiment)
        writer = SummaryWriter('../logs/' + experiment)

        train_losses, val_losses, val_accs, train_accs = train_triplet_loss(
            model=model,
            criterion=criterion,
            optimizer=optimizer,
            scheduler=scheduler,
            class_names=class_names,
            train_loader=train_loader,
            val_loader=validation_loader,
            num_epochs=num_epochs,
            device=device,
            experiment=experiment,
            writer=writer,
            n_neighbors=num_neighbors,
            verbose=args.verbose)

        # Save last state of model
        save_model(model, '%s_last_state.pt' % experiment)

        max_val_acc = max(val_accs) if len(val_accs) > 0 else max_val_acc
        max_train_acc = max(
            train_accs) if len(train_accs) > 0 else max_train_acc
        min_train_loss = max(
            train_losses) if len(train_losses) > 0 else min_train_loss
        min_val_loss = max(val_losses) if len(val_losses) > 0 else min_val_loss

    cm, test_acc, test_scores, test_labels = get_predictions_with_knn(
        n_neighbors=num_neighbors,
        train_loader=train_loader,
        test_loader=test_loader,
        model=model,
        device=device)

    cm_image = plot_confusion_matrix(cm=cm,
                                     title='Confusion Matrix- Test Loader',
                                     normalize=False,
                                     save=False,
                                     show_figure=False,
                                     classes=test_dataset.get_class_names())
    if not args.test:
        writer.add_hparams(
            {
                'learning_rate': optimizer_kwargs['lr'],
                'margin': criterion_kwargs['margin'],
                'semi_hard': criterion_kwargs['semi_hard'],
                'out_size': model_kwargs['out_size']
            }, {
                'hparam/val_acc': max_val_acc,
                'hparam/test_acc': test_acc,
                'hparam/train_acc': max_train_acc
            },
            run_name='hparams')
        writer.add_images('ConfusionMatrix/Test',
                          cm_image,
                          dataformats='CHW',
                          global_step=num_epochs - 1)
        writer.add_embedding(
            test_scores,
            metadata=[class_names[idx] for idx in test_labels.int().tolist()],
            tag="test (%f%%)" % test_acc)
        writer.add_text('config', json.dumps(param_config, indent=2))
        writer.add_text('args', json.dumps(args.__dict__, indent=2))
        writer.flush()
        writer.close()

        return {
            'lr': optimizer_kwargs['lr'],
            'margin': criterion_kwargs['margin'],
            'semi_hard': criterion_kwargs['semi_hard'],
            'out_size': model_kwargs['out_size'],
            'test_acc': test_acc,
            'max_train_acc': max_train_acc,
            'max_val_acc': max_val_acc,
            'min_train_loss': min_train_loss,
            'min_val_loss': min_val_loss
        }

    return {'test_acc': test_acc}
Example #4
0
def main(args):
    # Select device
    cuda_device = 'cuda:%d' % args.gpu
    device = torch.device(cuda_device if torch.cuda.is_available() else 'cpu')

    # Load parameters from yaml file.
    param_config = load_yaml(args.param_file)
    shuffle = False
    selected_dataset = getattr(datasets,
                               param_config.get('dataset').get('class_name'))
    train_dataset_kwargs = param_config.get('dataset').get('train_kwargs')
    test_dataset_kwargs = param_config.get('dataset').get('test_kwargs')

    # modalities
    modalities = []
    if args.inertial_state is not None:
        modalities.append('inertial')
    if args.sdfdi_state is not None:
        modalities.append('sdfdi')
    if args.skeleton_state is not None:
        modalities.append('skeleton')

    if len(modalities) < 2:
        raise Exception('Cannot fuse with less than two modalities')

    # Synchronized lists
    train_concat_scores = None
    train_concat_labels = None
    test_concat_scores = None
    test_concat_labels = None
    class_names = None

    # Get concatenated vectors
    if not os.path.exists('/tmp/train_concat_scores.pt') or args.new_vectors:
        for modality in modalities:
            if param_config.get('modalities').get(modality) is None:
                break

            batch_size = param_config.get('modalities').get(modality).get(
                'batch_size')
            train_transforms, test_transforms = get_transforms_from_config(
                param_config.get('modalities').get(modality).get('transforms'))
            train_dataset = selected_dataset(modality=modality,
                                             transform=train_transforms,
                                             **train_dataset_kwargs)
            train_loader = DataLoader(dataset=train_dataset,
                                      batch_size=batch_size,
                                      shuffle=shuffle)
            test_dataset = selected_dataset(modality=modality,
                                            transform=test_transforms,
                                            **test_dataset_kwargs)
            test_loader = DataLoader(dataset=test_dataset,
                                     batch_size=batch_size,
                                     shuffle=shuffle)
            model_kwargs = param_config.get('modalities').get(modality).get(
                'model').get('kwargs')
            if args.out_size is not None:
                model_kwargs['out_size'] = args.out_size
            model = getattr(
                models,
                param_config.get('modalities').get(modality).get('model').get(
                    'class_name'))(*param_config.get('modalities').get(
                        modality).get('model').get('args'), **model_kwargs)
            model.load_state_dict(
                torch.load(getattr(args, modality + '_state')))
            model = model.to(device)

            print('Getting train vectors from ' + modality)
            train_scores, train_labels = get_predictions(
                train_loader, model, device)
            train_concat_scores = get_fused_scores(train_concat_scores,
                                                   train_scores, args.rule)
            train_concat_labels = get_fused_labels(train_concat_labels,
                                                   train_labels)

            print('Getting test vectors from ' + modality)
            test_scores, test_labels = get_predictions(test_loader, model,
                                                       device)
            test_concat_scores = get_fused_scores(test_concat_scores,
                                                  test_scores, args.rule)
            test_concat_labels = get_fused_labels(test_concat_labels,
                                                  test_labels)

            if not class_names:
                class_names = train_dataset.get_class_names()

        # L2 Normalize the concatenated vectors
        train_concat_scores = train_concat_scores.div(
            train_concat_scores.norm(p=2, dim=1, keepdim=True))
        test_concat_scores = test_concat_scores.div(
            test_concat_scores.norm(p=2, dim=1, keepdim=True))

        # Save concatenated vectors temporarily to avoid getting scores everytime
        print('Saving vectors to save time for next time')
        torch.save(train_concat_scores, TRAIN_SCORES_FILE)
        torch.save(train_concat_labels, TRAIN_LABELS_FILE)
        torch.save(test_concat_scores, TEST_SCORES_FILE)
        torch.save(test_concat_labels, TEST_LABELS_FILE)

        if args.print_tsne or args.save_tsne:
            if device.type == 'cuda':
                train_concat_scores = train_concat_scores.cpu()
                train_concat_labels = train_concat_labels.cpu()
                test_concat_scores = test_concat_scores.cpu()
                test_concat_labels = test_concat_labels.cpu()
            run_tsne(train_concat_scores,
                     train_concat_labels.argmax(1),
                     class_names,
                     filename='train_embeddings.png',
                     save=args.save_tsne,
                     show=args.print_tsne)
            run_tsne(test_concat_scores,
                     test_concat_labels.argmax(1),
                     class_names,
                     filename='test_embeddings.png',
                     save=args.save_tsne,
                     show=args.print_tsne)
    else:
        print('Vectors exist. Loading...')
        train_concat_scores = torch.load(TRAIN_SCORES_FILE)
        train_concat_labels = torch.load(TRAIN_LABELS_FILE)
        test_concat_scores = torch.load(TEST_SCORES_FILE)
        test_concat_labels = torch.load(TEST_LABELS_FILE)

    if args.use_knn:
        if device.type == 'cuda':
            train_concat_scores = train_concat_scores.cpu()
            train_concat_labels = train_concat_labels.cpu()
            test_concat_scores = test_concat_scores.cpu()
            test_concat_labels = test_concat_labels.cpu()

        classifier = KNeighborsClassifier(n_neighbors=args.n_neighbors)
        classifier.fit(train_concat_scores, train_concat_labels.argmax(1))
        test_predictions = classifier.predict(test_concat_scores)

        test_accuracy = int((test_concat_labels.argmax(1) == torch.Tensor(test_predictions)).sum()) / \
                        test_concat_labels.shape[0]
    elif args.use_elm:
        # Elm initialization
        elm = ELM(input_size=train_concat_scores.shape[1],
                  num_classes=train_concat_labels.shape[1],
                  hidden_size=args.elm_hidden_size,
                  device=device)

        # Fit the ELM in training data. For labels use any of the three, they are all the same since shuffle is off.
        print('Training elm network...')
        elm.fit(train_concat_scores, train_concat_labels)

        # Get accuracy on test data
        test_accuracy = elm.evaluate(test_concat_scores, test_concat_labels)

    else:
        mlp = MLP(input_size=train_concat_scores.shape[1],
                  hidden_size=args.mlp_hidden_size,
                  out_size=train_concat_labels.shape[1],
                  dropout_rate=args.mlp_dr,
                  norm_out=False)
        mlp = mlp.to(device)
        criterion = CrossEntropyLoss()
        optimizer = RMSprop(mlp.parameters(), lr=args.mlp_lr)
        if not os.path.exists(MLP_STATE_FILE) or args.new_mlp:
            train_simple(mlp, criterion, optimizer, args.mlp_epochs,
                         train_concat_scores, train_concat_labels)
            torch.save(mlp.state_dict(), MLP_STATE_FILE)
        else:
            print('MLP is already trained. Loading...')
            mlp.load_state_dict(torch.load(MLP_STATE_FILE))
        test_accuracy = get_accuracy_simple(mlp, test_concat_scores,
                                            test_concat_labels)

    print('Test accuracy: %f' % test_accuracy)
    return test_accuracy
Example #5
0
def main(args):
    # Select device
    cuda_device = 'cuda:%d' % args.gpu
    device = torch.device(cuda_device if torch.cuda.is_available() else 'cpu')

    # Load parameters from yaml file.
    param_config = load_yaml(args.param_file)
    shuffle = False
    selected_dataset = getattr(datasets,
                               param_config.get('dataset').get('class_name'))
    train_dataset_kwargs = param_config.get('dataset').get('train_kwargs')
    test_dataset_kwargs = param_config.get('dataset').get('test_kwargs')

    # modalities
    modalities = []
    if args.inertial_state is not None:
        modalities.append('inertial')
    if args.sdfdi_state is not None:
        modalities.append('sdfdi')
    if args.skeleton_state is not None:
        modalities.append('skeleton')

    # Synchronized lists
    train_all_scores = None
    train_all_labels = None
    test_all_scores = None
    test_all_labels = None

    if len(modalities) < 2:
        raise RuntimeError('Cannot fuse with less than two modalities')

    for modality in modalities:
        if param_config.get('modalities').get(modality) is None:
            break

        batch_size = 16
        train_transforms, test_transforms = get_transforms_from_config(
            param_config.get('modalities').get(modality).get('transforms'))
        train_dataset = selected_dataset(modality=modality,
                                         transform=train_transforms,
                                         **train_dataset_kwargs)
        train_loader = DataLoader(dataset=train_dataset,
                                  batch_size=batch_size,
                                  shuffle=shuffle)
        test_dataset = selected_dataset(modality=modality,
                                        transform=test_transforms,
                                        **test_dataset_kwargs)
        test_loader = DataLoader(dataset=test_dataset,
                                 batch_size=batch_size,
                                 shuffle=shuffle)
        model = getattr(
            models,
            param_config.get('modalities').get(modality).get('model').get(
                'class_name'))(*param_config.get('modalities').get(
                    modality).get('model').get('args'), **param_config.get(
                        'modalities').get(modality).get('model').get('kwargs'))
        model.load_state_dict(torch.load(getattr(args, modality + '_state')))
        model.skip_last_fc = True
        model = model.to(device)

        print('Getting train vectors from ' + modality)
        train_scores, train_labels = get_predictions(train_loader, model,
                                                     device)
        train_all_scores = get_fused_scores(train_all_scores, train_scores,
                                            args.rule)
        train_all_labels = get_fused_labels(train_all_labels, train_labels)
        print('Getting test vectors from ' + modality)
        test_scores, test_labels = get_predictions(test_loader, model, device)
        test_all_scores = get_fused_scores(test_all_scores, test_scores,
                                           args.rule)
        test_all_labels = get_fused_labels(test_all_labels, test_labels)

    # Elm initialization
    elm = ELM(input_size=train_all_scores.shape[1],
              num_classes=train_all_labels.shape[1],
              hidden_size=args.hidden_size,
              device=device)

    # Fit the ELM in training data. For labels use any of the three, they are all the same since shuffle is off.
    print('Training elm network...')
    elm.fit(train_all_scores, train_all_labels)

    # Get accuracy on test data
    accuracy = elm.evaluate(test_all_scores, test_all_labels)

    print('ELM Accuracy: %f' % accuracy)
    return accuracy
def train_and_test(args: argparse.Namespace):
    param_config = load_yaml(args.param_file, append=False)

    # Select device
    cuda_device = 'cuda:%d' % args.gpu
    device = torch.device(cuda_device if torch.cuda.is_available() else 'cpu')

    # Generic arguments
    num_epochs = param_config.get('general').get(
        'num_epochs') if args.epochs is None else args.epochs
    num_neighbors = param_config.get('general').get('num_neighbors')

    # Load the selected dataset
    selected_dataset = getattr(datasets,
                               param_config.get('dataset').get('class_name'))

    # Initiate datasets and loaders for each modality
    train_inertial, val_inertial, test_inertial = get_train_val_test_datasets(
        selected_dataset, 'inertial', param_config)
    train_sdfdi, val_sdfdi, test_sdfdi = get_train_val_test_datasets(
        selected_dataset, 'sdfdi', param_config)
    if param_config.get('modalities').get('skeleton'):
        train_skeleton, val_skeleton, test_skeleton = get_train_val_test_datasets(
            selected_dataset, 'skeleton', param_config)
    train_datasets = [train_inertial, train_sdfdi]
    val_datasets = [val_inertial, val_sdfdi]
    test_datasets = [test_inertial, test_sdfdi]
    if param_config.get('modalities').get('skeleton'):
        train_datasets.append(train_skeleton)
        val_datasets.append(val_skeleton)
        test_datasets.append(test_skeleton)
    # Prepare concat datasets and loaders
    train_dataset = ConcatDataset(*train_datasets)
    val_dataset = ConcatDataset(*val_datasets)
    test_dataset = ConcatDataset(*test_datasets)
    num_actions = len(train_dataset.datasets[0].actions)
    batch_size = param_config.get('general').get('batch_size')
    shuffle = param_config.get('general').get('shuffle')
    train_loader = DataLoader(
        dataset=train_dataset,
        batch_sampler=BalancedSampler(
            labels=train_dataset.labels,
            n_classes=num_actions,
            n_samples=param_config.get('general').get('num_samples')))
    val_loader = DataLoader(dataset=val_dataset,
                            batch_size=batch_size,
                            shuffle=shuffle)
    test_loader = DataLoader(dataset=test_dataset,
                             batch_size=batch_size,
                             shuffle=shuffle)
    class_names = train_dataset.get_class_names()

    # Load medusa network
    n1_kwargs = param_config.get('modalities').get('inertial').get(
        'model').get('kwargs')
    n2_kwargs = param_config.get('modalities').get('sdfdi').get('model').get(
        'kwargs')
    n3_kwargs = None
    if param_config.get('modalities').get('skeleton'):
        n3_kwargs = param_config.get('modalities').get('skeleton').get(
            'model').get('kwargs')
    mlp_kwargs = param_config.get('general').get('mlp_kwargs')
    if args.out_size:
        n1_kwargs['out_size'] = args.out_size
        n2_kwargs['out_size'] = args.out_size
        if param_config.get('modalities').get('skeleton'):
            n3_kwargs['out_size'] = args.out_size
        mlp_kwargs['out_size'] = args.out_size
        # Also adjust the input of the mlp due to the change in out_size
        mlp_kwargs['input_size'] = 3 * args.out_size
    if args.dr:
        mlp_kwargs['dropout_rate'] = args.dr
    if args.mlp_hidden_size:
        mlp_kwargs['hidden_size'] = args.mlp_hidden_size

    model = Medusa(mlp_kwargs, n1_kwargs, n2_kwargs, n3_kwargs)
    if args.test:
        model.load_state_dict(torch.load(args.saved_state))
    model = model.to(device)

    # Criterion, optimizer
    criterion = param_config.get('general').get('criterion').get('class_name')
    criterion_from = param_config.get('general').get('criterion').get(
        'from_module')
    criterion_kwargs = param_config.get('general').get('criterion').get(
        'kwargs')
    optimizer = param_config.get('general').get('optimizer').get('class_name')
    optimizer_from = param_config.get('general').get('optimizer').get(
        'from_module')
    optimizer_kwargs = param_config.get('general').get('optimizer').get(
        'kwargs')
    if args.margin:
        criterion_kwargs['margin'] = args.margin
    if args.semi_hard is not None:
        criterion_kwargs['semi_hard'] = args.semi_hard
    if args.lr:
        optimizer_kwargs['lr'] = args.lr
    criterion = getattr(importlib.import_module(criterion_from),
                        criterion)(**criterion_kwargs)
    optimizer = getattr(importlib.import_module(optimizer_from),
                        optimizer)(model.parameters(), **optimizer_kwargs)

    if not args.test:
        if args.experiment is None:
            datetime = time.strftime("%Y%m%d_%H%M", time.localtime())
            experiment = '%s_medusa' % datetime
        else:
            experiment = args.experiment
        writer = SummaryWriter('../logs/' + experiment)

        train_losses, val_losses, val_accuracies, train_accuracies = train_triplet_loss(
            model,
            criterion,
            optimizer,
            class_names,
            train_loader,
            val_loader,
            num_epochs,
            device,
            experiment,
            num_neighbors,
            writer,
            verbose=True,
            skip_accuracy=args.skip_accuracy)

        # Save last state of model
        save_model(model, '%s_last_state.pt' % experiment)

    cm, test_acc, test_scores, test_labels = get_predictions_with_knn(
        n_neighbors=num_neighbors,
        train_loader=train_loader,
        test_loader=test_loader,
        model=model,
        device=device)

    cm_image = plot_confusion_matrix(cm=cm,
                                     title='Confusion Matrix- Test Loader',
                                     normalize=False,
                                     save=False,
                                     show_figure=False,
                                     classes=test_dataset.get_class_names())
    if not args.test:
        writer.add_images('ConfusionMatrix/Test',
                          cm_image,
                          dataformats='CHW',
                          global_step=num_epochs - 1)
        writer.add_embedding(
            test_scores,
            metadata=[class_names[idx] for idx in test_labels.int().tolist()],
            tag="test (%f%%)" % test_acc)
        writer.add_text('config', json.dumps(param_config, indent=2))
        writer.add_text('args', json.dumps(args.__dict__, indent=2))
        writer.flush()
        writer.close()

    if args.print_tsne or args.save_tsne:
        train_scores, train_labels = get_predictions(train_loader,
                                                     model,
                                                     device,
                                                     apply_softmax=False)
        if device.type == 'cuda':
            train_scores = train_scores.cpu()
            train_labels = train_labels.cpu()
        run_tsne(train_scores,
                 train_labels.argmax(1),
                 class_names,
                 filename='train_medusa_embeddings.png',
                 save=args.save_tsne,
                 show=args.print_tsne)
        run_tsne(test_scores,
                 test_labels,
                 class_names,
                 filename='test_medusa_embeddings.png',
                 save=args.save_tsne,
                 show=args.print_tsne)
    print('Test acc: %.5f' % test_acc)

    return test_acc
def responses_to_host_serializer():
    yield PacketSerializer(
        load_yaml("controller/protocol/commands/responses_to_host.yaml"))
def requests_from_host_serializer():
    yield PacketSerializer(
        load_yaml("controller/protocol/commands/requests_from_host.yaml"))
def frame_serializer():
    yield PacketSerializer(load_yaml("controller/protocol/frames/frames.yaml"))
Example #10
0
def make_packet_serializer(schema_path: str) -> PacketSerializer:
    return PacketSerializer(
        load_yaml(os.path.join("controller", "protocol", schema_path)))
def main(args):
    if args.test and args.saved_state is None:
        print(
            'You have to use --saved_state when using --test, to specify the weights of the model'
        )
        sys.exit(0)

    # Select device
    cuda_device = 'cuda:%d' % args.gpu
    device = torch.device(cuda_device if torch.cuda.is_available() else 'cpu')

    # Load parameters from yaml file.
    param_config = load_yaml(args.param_file)

    # Assign parameters
    modality = args.modality
    modality_config = param_config.get('modalities').get(modality)
    selected_dataset = getattr(datasets,
                               param_config.get('dataset').get('class_name'))
    transforms, test_transforms = get_transforms_from_config(
        modality_config.get('transforms'))
    batch_size = modality_config.get(
        'batch_size') if args.bs is None else args.bs
    num_epochs = modality_config.get(
        'num_epochs') if args.epochs is None else args.epochs
    shuffle = param_config.get('dataset').get('shuffle')
    model_class_name = modality_config.get('model').get('class_name')
    criterion = modality_config.get('criterion').get('class_name')
    criterion_from = modality_config.get('criterion').get('from_module')
    optimizer = modality_config.get('optimizer').get('class_name')
    optimizer_from = modality_config.get('optimizer').get('from_module')
    optimizer_kwargs = modality_config.get('optimizer').get('kwargs')
    if args.lr:
        optimizer_kwargs['lr'] = args.lr
    train_dataset_kwargs = param_config.get('dataset').get('train_kwargs')
    validation_dataset_kwargs = param_config.get('dataset').get(
        'validation_kwargs')
    test_dataset_kwargs = param_config.get('dataset').get('test_kwargs')

    # Load Data
    train_dataset = selected_dataset(modality=modality,
                                     transform=transforms,
                                     **train_dataset_kwargs)
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=batch_size,
                              shuffle=shuffle)
    validation_dataset = selected_dataset(modality=modality,
                                          transform=test_transforms,
                                          **validation_dataset_kwargs)
    validation_loader = DataLoader(dataset=validation_dataset,
                                   batch_size=batch_size,
                                   shuffle=shuffle)
    test_dataset = selected_dataset(modality=modality,
                                    transform=test_transforms,
                                    **test_dataset_kwargs)
    test_loader = DataLoader(dataset=test_dataset,
                             batch_size=batch_size,
                             shuffle=shuffle)

    # Initiate the model
    model_kwargs = modality_config.get('model').get('kwargs')
    if args.dr is not None:
        model_kwargs['dropout_rate'] = args.dr
    model = getattr(
        models, model_class_name)(*modality_config.get('model').get('args'),
                                  **modality_config.get('model').get('kwargs'))
    if args.test:
        model.load_state_dict(torch.load(args.saved_state))
    model = model.to(device)

    # Loss and optimizer
    criterion = getattr(importlib.import_module(criterion_from), criterion)()
    optimizer = getattr(importlib.import_module(optimizer_from),
                        optimizer)(model.parameters(), **optimizer_kwargs)

    # Training procedure
    max_val_acc = -1
    max_train_acc = -1
    min_train_loss = -1
    min_val_loss = -1

    if not args.test:
        # Initiate Tensorboard writer with the given experiment name or generate an automatic one
        experiment = '%s_%s_%s_%s' % (
            selected_dataset.__name__, modality,
            args.param_file.split('/')[-1],
            time.strftime("%Y%m%d_%H%M", time.localtime())
        ) if args.experiment is None else args.experiment
        writer_name = '../logs/%s' % experiment
        writer = SummaryWriter(writer_name)

        # Print parameters
        print_table({
            'param_file': args.param_file,
            'experiment': experiment,
            'tensorboard_folder': writer_name,
            'dataset': selected_dataset.__name__,
            'criterion': type(criterion).__name__,
            'optimizer': type(optimizer).__name__,
            'modality': modality,
            'model': model.name,
            'learning_rate': optimizer_kwargs['lr'],
            'batch_size': batch_size,
            'num_epochs': num_epochs,
        })

        # Start training
        train_accs, val_accs, train_losses, val_losses = train(
            model=model,
            criterion=criterion,
            optimizer=optimizer,
            train_loader=train_loader,
            validation_loader=validation_loader,
            num_epochs=num_epochs,
            device=device,
            experiment=experiment,
            writer=writer)

        # Save last state of model
        save_model(model, '%s_last_state.pt' % experiment)

        max_val_acc = max(val_accs) if len(val_accs) > 0 else max_val_acc
        max_train_acc = max(
            train_accs) if len(train_accs) > 0 else max_train_acc
        min_train_loss = max(
            train_losses) if len(train_losses) > 0 else min_train_loss
        min_val_loss = max(val_losses) if len(val_losses) > 0 else min_val_loss

        cm_image_train = plot_confusion_matrix(
            cm=get_confusion_matrix(train_loader, model, device),
            title='Confusion Matrix - Training',
            normalize=False,
            save=False,
            classes=train_dataset.get_class_names(),
            show_figure=False)
        cm_image_validation = plot_confusion_matrix(
            cm=get_confusion_matrix(validation_loader, model, device),
            title='Confusion Matrix - Validation',
            normalize=False,
            save=False,
            classes=validation_dataset.get_class_names(),
            show_figure=False)
        cm_image_test = plot_confusion_matrix(
            cm=get_confusion_matrix(test_loader, model, device),
            title='Confusion Matrix - Test',
            normalize=False,
            save=False,
            classes=test_dataset.get_class_names(),
            show_figure=False)

        # Add confusion matrices for each dataset, mark it for the last step which is num_epochs - 1
        writer.add_images('ConfusionMatrix/Train',
                          cm_image_train,
                          dataformats='CHW',
                          global_step=num_epochs - 1)
        writer.add_images('ConfusionMatrix/Validation',
                          cm_image_validation,
                          dataformats='CHW',
                          global_step=num_epochs - 1)
        writer.add_images('ConfusionMatrix/Test',
                          cm_image_test,
                          dataformats='CHW',
                          global_step=num_epochs - 1)
        print('Best validation accuracy: %f' % max(val_accs))

        writer.add_text('config', json.dumps(param_config, indent=2))
        writer.add_text('args', json.dumps(args.__dict__, indent=2))
        writer.flush()
        writer.close()

    test_accuracy = get_accuracy(test_loader, model, device)
    print('Test accuracy (not based on val): %f' % test_accuracy)

    return {
        'test_acc': test_accuracy,
        'max_train_acc': max_train_acc,
        'max_val_acc': max_val_acc,
        'min_train_loss': min_train_loss,
        'min_val_loss': min_val_loss
    }
                    default=0,
                    help='Only applicable when cuda gpu is available')
parser.add_argument('--param_file',
                    type=str,
                    default='parameters/utd_mhad/default.yaml')
parser.add_argument('--saved_state', type=str, required=True)
parser.add_argument('--knn', action='store_true')
parser.add_argument('--n_neighbors', type=int, default=2)
args = parser.parse_args()

# Select device
cuda_device = 'cuda:%d' % args.gpu
device = torch.device(cuda_device if torch.cuda.is_available() else 'cpu')

# Load parameters from yaml file.
param_config = load_yaml(args.param_file)

# Assign parameters
modality = args.modality
SelectedDataset = getattr(datasets,
                          param_config.get('dataset').get('class_name'))
_, test_transforms = get_transforms_from_config(
    param_config.get('modalities').get(modality).get('transforms'))
batch_size = param_config.get('modalities').get(modality).get('batch_size')
shuffle = param_config.get('dataset').get('shuffle')
model_class_name = param_config.get('modalities').get(modality).get(
    'model').get('class_name')
train_dataset_kwargs = param_config.get('dataset').get('train_kwargs')
test_dataset_kwargs = param_config.get('dataset').get('test_kwargs')

# Load Data