Example #1
0
def predict(min_proba):
    # Load classifier along with the label encoder.
    with open(os.path.join(common.PRJ_DIR, common.SVM_MODEL), 'rb') as fp:
        model = pickle.load(fp)
    with open(os.path.join(common.PRJ_DIR, common.LABELS), 'rb') as fp:
        le = pickle.load(fp)

    # Calculate size of radar image data array used for training.
    train_size_z = int((common.R_MAX - common.R_MIN) / common.R_RES) + 1
    train_size_y = int((common.PHI_MAX - common.PHI_MIN) / common.PHI_RES) + 1
    train_size_x = int(
        (common.THETA_MAX - common.THETA_MIN) / common.THETA_RES) + 1
    logger.debug(f'train_size: {train_size_x}, {train_size_y}, {train_size_z}')

    try:
        while True:
            # Scan according to profile and record targets.
            radar.Trigger()

            # Retrieve any targets from the last recording.
            targets = radar.GetSensorTargets()
            if not targets:
                continue

            # Retrieve the last completed triggered recording
            raw_image, size_x, size_y, size_z, _ = radar.GetRawImage()
            raw_image_np = np.array(raw_image, dtype=np.float32)

            for t, target in enumerate(targets):
                logger.info('**********')
                logger.info(
                    'Target #{}:\nx: {}\ny: {}\nz: {}\namplitude: {}\n'.format(
                        t + 1, target.xPosCm, target.yPosCm, target.zPosCm,
                        target.amplitude))

                i, j, k = common.calculate_matrix_indices(
                    target.xPosCm, target.yPosCm, target.zPosCm, size_x,
                    size_y, size_z)

                # projection_yz is the 2D projection of target in y-z plane.
                projection_yz = raw_image_np[i, :, :]
                # projection_xz is the 2D projection of target in x-z plane.
                projection_xz = raw_image_np[:, j, :]
                # projection_xy is 2D projection of target signal in x-y plane.
                projection_xy = raw_image_np[:, :, k]

                proj_zoom = calc_proj_zoom(train_size_x, train_size_y,
                                           train_size_z, size_x, size_y,
                                           size_z)

                observation = common.process_samples(
                    [(projection_xz, projection_yz, projection_xy)],
                    proj_mask=PROJ_MASK,
                    proj_zoom=proj_zoom,
                    scale=True)

                # Make a prediction.
                name, prob = classifier(observation, model, le, min_proba)
                logger.info(f'Detected {name} with probability {prob}')
                logger.info('**********')
    except KeyboardInterrupt:
        pass
    finally:
        # Stop and Disconnect.
        radar.Stop()
        radar.Disconnect()
        radar.Clean()
        logger.info('Successful radar shutdown.')

    return
Example #2
0
def main():
    # Load radar observations and labels.
    with open(path.join(common.PRJ_DIR, common.RADAR_DATA), 'rb') as fp:
        data_pickle = pickle.load(fp)

    print('Loading and scaling samples.')
    processed_samples = common.process_samples(data_pickle['samples'],
                                               proj_mask=PROJ_MASK)
    #print('processed_samples {}'.format(processed_samples))

    # Encode the labels.
    print('Encoding labels.')
    le = LabelEncoder()
    encoded_labels = le.fit_transform(data_pickle['labels'])
    class_names = list(le.classes_)
    print(f'class names: {class_names}')

    # Balance the dataset.
    balanced_labels, balanced_data = balance_classes(encoded_labels,
                                                     processed_samples)

    # Plot the dataset.
    plot_data = balanced_data
    plot_dataset(balanced_labels, plot_data)

    # Split data up into train and test sets.
    (X_train, X_test, y_train,
     y_test) = train_test_split(balanced_data,
                                balanced_labels,
                                test_size=0.20,
                                random_state=RANDOM_SEED,
                                shuffle=True)
    #print('X_train: {} X_test: {} y_train: {} y_test: {}'.format(X_train, X_test, y_train, y_test))

    skf = StratifiedKFold(n_splits=FOLDS)

    # Find best svm classifier, evaluate and then save it.
    best_svm = find_best_svm_estimator(X_train, y_train,
                                       skf.split(X_train, y_train),
                                       RANDOM_SEED)

    evaluate_model(best_svm, X_test, y_test, class_names, SVM_CM)

    print('\n Saving svm model...')
    with open(path.join(common.PRJ_DIR, common.SVM_MODEL), 'wb') as outfile:
        outfile.write(pickle.dumps(best_svm))
    """
    # Find best XGBoost classifier, evaluate and save it. 
    best_xgb = find_best_xgb_estimator(X_train, y_train, skf.split(X_train, y_train),
        PARA_COMB, RANDOM_SEED)

    evaluate_model(best_xgb, X_test, y_test, class_names, XGB_CM)

    print('\n Saving xgb model...')
    with open(path.join(common.PRJ_DIR, common.XGB_MODEL), 'wb') as outfile:
        outfile.write(pickle.dumps(best_xgb))
    """
    # Write the label encoder to disk.
    print('\n Saving label encoder.')
    with open(path.join(common.PRJ_DIR, common.LABELS), 'wb') as outfile:
        outfile.write(pickle.dumps(le))
Example #3
0
def svc_fit(train, proj_mask, epochs, folds=5, batch_size=32):
    """ Fit SVM using SVC on data set. 

    Args:
        train (tuple of list): (X, y) train data.
        proj_mask (Namedtuple): Radar projections to use for training.
        epochs (int): Number of times to augment data.
        folds (int, optional): Number of folds for the Stratified K-Folds
            cross-validator. Default=5
        batch_size (int, optional): Augment batch size. Default=32.

    Returns:
        estimator: Estimator that was chosen by grid search.
    """
    def find_best_svm_estimator(X, y, cv, random_seed):
        """Exhaustive search over specified parameter values for svm.

        Returns:
            optimized svm estimator.

        Note:
            https://www.csie.ntu.edu.tw/~cjlin/papers/guide/guide.pdf
        """
        print('\n Finding best svm estimator...')
        Cs = [0.01, 0.1, 1, 10, 100]
        gammas = [0.001, 0.01, 0.1, 1, 10]
        param_grid = [{
            'C': Cs,
            'kernel': ['linear']
        }, {
            'C': Cs,
            'gamma': gammas,
            'kernel': ['rbf']
        }]
        init_est = svm.SVC(probability=True,
                           class_weight='balanced',
                           random_state=random_seed,
                           cache_size=1000,
                           verbose=False)
        grid_search = model_selection.GridSearchCV(estimator=init_est,
                                                   param_grid=param_grid,
                                                   verbose=2,
                                                   n_jobs=4,
                                                   cv=cv)
        grid_search.fit(X, y)
        #print('\n All results:')
        #print(grid_search.cv_results_)
        logger.info('\n Best estimator:')
        logger.info(grid_search.best_estimator_)
        logger.info('\n Best score for {}-fold search:'.format(folds))
        logger.info(grid_search.best_score_)
        logger.info('\n Best hyperparameters:')
        logger.info(grid_search.best_params_)
        return grid_search.best_estimator_

    X_train, y_train = train

    # Augment training set.
    if epochs:
        data_gen = DataGenerator(rotation_range=15.0,
                                 zoom_range=0.3,
                                 noise_sd=0.2)
        logger.info('Augmenting data set.')
        logger.info(f'Original number of training samples: {y_train.shape[0]}')

        # Faster to use a list in below ops.
        y_train = y_train.tolist()

        # Do not mutate original lists.
        xc = X_train.copy()
        yc = y_train.copy()

        for e in range(epochs):
            logger.debug(f'epoch: {e}')
            batch = 0
            for X_batch, y_batch in data_gen.flow(xc,
                                                  yc,
                                                  batch_size=batch_size):
                logger.debug(f'batch: {batch}')
                X_train.extend(X_batch)
                y_train.extend(y_batch)
                batch += 1
                if batch >= len(xc) / batch_size:
                    break

        # Sanity check if augmentation introduced a scaling problem.
        max = np.amax([[np.concatenate(t, axis=None)] for t in X_train])
        assert abs(max - 1.0) < 1e-6, 'scale error'

        # Convert y_train back to np array.
        y_train = np.array(y_train, dtype=np.int8)

        logger.info(
            f'Augmented number of training samples: {y_train.shape[0]}')

    logger.info('Generating feature vectors from radar projections.')
    X_train = common.process_samples(X_train, proj_mask=proj_mask)
    logger.info(f'Feature vector length: {X_train.shape[1]}')

    # Balance classes.
    logger.info('Balancing classes.')
    y_train, X_train = balance_classes(y_train, X_train)

    skf = model_selection.StratifiedKFold(n_splits=folds)

    # Find best classifier.
    logger.info('Finding best classifier.')
    clf = find_best_svm_estimator(X_train, y_train,
                                  skf.split(X_train, y_train), RANDOM_SEED)

    return clf
Example #4
0
def sgd_fit(train,
            test,
            proj_mask,
            online_learn,
            svm_model,
            epochs,
            folds=5,
            batch_size=32):
    """ Fit SVM using SGD on data set. 

    Args:
        train (tuple of list): (X, y) train data.
        test (tuple of list): (X, y) test data.
        proj_mask (Namedtuple): Radar projections to use for training.
        online_learn (bool): If True perform online learning with data.
        svm_model (str): Name of existing svm model for online learning.
        epochs (int): Number of times to augment data.
        folds (int, optional): Number of folds for the Stratified K-Folds
            cross-validator. Default=5
        batch_size (int, optional): Augment batch size. Default=32.

    Returns:
        estimator: Estimator that was chosen by grid search.
    """
    def find_best_sgd_svm_estimator(X, y, cv, random_seed):
        """Exhaustive search over specified parameter values for svm using sgd.

        Returns:
            optimized svm estimator.
        """
        max_iter = max(np.ceil(10**6 / len(X)), 1000)
        small_alphas = [10.0e-08, 10.0e-09, 10.0e-10]
        alphas = [10.0e-04, 10.0e-05, 10.0e-06, 10.0e-07]
        l1_ratios = [0.075, 0.15, 0.30]
        param_grid = [{
            'alpha': alphas,
            'penalty': ['l1', 'l2'],
            'average': [False]
        }, {
            'alpha': alphas,
            'penalty': ['elasticnet'],
            'average': [False],
            'l1_ratio': l1_ratios
        }, {
            'alpha': small_alphas,
            'penalty': ['l1', 'l2'],
            'average': [True]
        }, {
            'alpha': small_alphas,
            'penalty': ['elasticnet'],
            'average': [True],
            'l1_ratio': l1_ratios
        }]
        init_est = linear_model.SGDClassifier(loss='log',
                                              max_iter=max_iter,
                                              random_state=random_seed,
                                              n_jobs=-1,
                                              warm_start=True)
        grid_search = model_selection.GridSearchCV(estimator=init_est,
                                                   param_grid=param_grid,
                                                   verbose=2,
                                                   n_jobs=-1,
                                                   cv=cv)
        grid_search.fit(X, y)
        #print('\n All results:')
        #print(grid_search.cv_results_)
        logger.info('\n Best estimator:')
        logger.info(grid_search.best_estimator_)
        logger.info('\n Best score for {}-fold search:'.format(folds))
        logger.info(grid_search.best_score_)
        logger.info('\n Best hyperparameters:')
        logger.info(grid_search.best_params_)
        return grid_search.best_estimator_

    X_train, y_train = train
    X_test, y_test = test

    # Make a copy of train set for later use in augmentation.
    if epochs:
        xc = X_train.copy()
        yc = y_train.copy()

    # Generate feature vectors from radar projections.
    logger.info('Generating feature vectors.')
    X_train = common.process_samples(X_train, proj_mask=proj_mask)
    X_test = common.process_samples(X_test, proj_mask=proj_mask)
    logger.info(f'Feature vector length: {X_train.shape[1]}')

    # Balance classes.
    logger.info('Balancing classes.')
    y_train, X_train = balance_classes(y_train, X_train)

    if not online_learn:
        # Find best initial classifier.
        logger.info('Running best fit with new data.')
        skf = model_selection.StratifiedKFold(n_splits=folds)
        clf = find_best_sgd_svm_estimator(X_train, y_train,
                                          skf.split(X_train, y_train),
                                          RANDOM_SEED)
    else:
        # Fit existing classifier with new data.
        logger.info('Running partial fit with new data.')
        with open(os.path.join(common.PRJ_DIR, svm_model), 'rb') as fp:
            clf = pickle.load(fp)
        max_iter = max(np.ceil(10**6 / len(X_train)), 1000)
        for _ in range(max_iter):
            clf.partial_fit(X_train, y_train)

    # Augment training set and use to run partial fits on classifier.
    if epochs:
        logger.info(
            f'Running partial fit with augmented data (epochs: {epochs}).')
        y_predicted = clf.predict(X_test)
        logger.debug(
            f'Un-augmented accuracy: {metrics.accuracy_score(y_test, y_predicted)}.'
        )
        data_gen = DataGenerator(rotation_range=5.0,
                                 zoom_range=0.2,
                                 noise_sd=0.1,
                                 balance=True)
        for e in range(epochs):
            logger.debug(f'Augment epoch: {e}.')
            batch = 0
            for X_batch, y_batch in data_gen.flow(xc,
                                                  yc,
                                                  batch_size=batch_size):
                logger.debug(f'Augment batch: {batch}.')
                X_batch = common.process_samples(X_batch, proj_mask=proj_mask)
                y_batch, X_batch = balance_classes(y_batch, X_batch)
                clf.partial_fit(X_batch, y_batch, classes=np.unique(y_train))
                y_predicted = clf.predict(X_test)
                acc = metrics.accuracy_score(y_test, y_predicted)
                logger.debug(f'Augmented accuracy: {acc}.')
                batch += 1
                if batch >= len(xc) / batch_size:
                    break

    return clf
Example #5
0
    if not args.use_svc:
        logger.info('Using SVM algo: SGDClassifier.')
        clf = sgd_fit(train=(X_train, y_train),
                      test=(X_test, y_test),
                      proj_mask=args.proj_mask,
                      online_learn=args.online_learn,
                      svm_model=args.svm_model,
                      epochs=args.epochs)
    else:
        logger.info('Using SVM algo: SVC.')
        clf = svc_fit(train=(X_train, y_train),
                      proj_mask=args.proj_mask,
                      epochs=args.epochs)

    # Generate feature vectors.
    X_val_fv = common.process_samples(X_val, proj_mask=proj_mask)
    X_test_fv = common.process_samples(X_test, proj_mask=proj_mask)

    logger.info('Calibrating classifier.')
    cal_clf = calibration.CalibratedClassifierCV(base_estimator=clf,
                                                 cv='prefit')
    cal_clf.fit(X_val_fv, y_val)

    logger.info('Evaluating final classifier on test set.')
    evaluate_model(cal_clf, X_test_fv, y_test, class_names, args.svm_cm)

    logger.info(f'Saving svm model to: {args.svm_model}.')
    with open(args.svm_model, 'wb') as outfile:
        outfile.write(pickle.dumps(cal_clf))

    # Do not overwrite label encoder if online learning was performed.
Example #6
0
def main():
    radar.Init()

    # Configure Walabot database install location.
    radar.SetSettingsFolder()

    # Establish communication with walabot.
    try:
        radar.ConnectAny()
    except radar.WalabotError as err:
        print(f'Failed to connect to Walabot.\nerror code: {str(err.code)}')
        exit(1)

    # Set radar scan profile.
    radar.SetProfile(common.RADAR_PROFILE)

    # Set scan arena in polar coords
    radar.SetArenaR(common.R_MIN, common.R_MAX, common.R_RES)
    radar.SetArenaPhi(common.PHI_MIN, common.PHI_MAX, common.PHI_RES)
    radar.SetArenaTheta(common.THETA_MIN, common.THETA_MAX, common.THETA_RES)

    # Threshold
    radar.SetThreshold(RADAR_THRESHOLD)

    # radar filtering
    filter_type = radar.FILTER_TYPE_MTI if MTI else radar.FILTER_TYPE_NONE
    radar.SetDynamicImageFilter(filter_type)

    # Start the system in preparation for scanning.
    radar.Start()

    # Calibrate scanning to ignore or reduce the signals if not in MTI mode.
    if not MTI:
        common.calibrate()

    try:
        while True:
            # Scan according to profile and record targets.
            radar.Trigger()

            # Retrieve any targets from the last recording.
            targets = radar.GetSensorTargets()
            if not targets:
                continue

            # Retrieve the last completed triggered recording
            raw_image, size_x, size_y, size_z, _ = radar.GetRawImage()
            raw_image_np = np.array(raw_image, dtype=np.float32)

            for t, target in enumerate(targets):
                print(
                    'Target #{}:\nx: {}\ny: {}\nz: {}\namplitude: {}\n'.format(
                        t + 1, target.xPosCm, target.yPosCm, target.zPosCm,
                        target.amplitude))

                i, j, k = common.calculate_matrix_indices(
                    target.xPosCm, target.yPosCm, target.zPosCm, size_x,
                    size_y, size_z)

                # projection_yz is the 2D projection of target in y-z plane.
                projection_yz = raw_image_np[i, :, :]
                # projection_xz is the 2D projection of target in x-z plane.
                projection_xz = raw_image_np[:, j, :]
                # projection_xy is 2D projection of target signal in x-y plane.
                projection_xy = raw_image_np[:, :, k]

                observation = common.process_samples(
                    [(projection_xy, projection_yz, projection_xz)],
                    proj_mask=PROJ_MASK)

                # Make a prediction.
                name, prob = classifier(observation)
                if name == 'person':
                    color_name = colored(name, 'green')
                elif name == 'dog':
                    color_name = colored(name, 'yellow')
                elif name == 'cat':
                    color_name = colored(name, 'blue')
                else:
                    color_name = colored(name, 'red')
                print(f'Detected {color_name} with probability {prob}\n')
    except KeyboardInterrupt:
        pass
    finally:
        # Stop and Disconnect.
        radar.Stop()
        radar.Disconnect()
        radar.Clean()
        print('Successful termination.')
Example #7
0
def main():
    # Load radar observations and labels.
    with open(os.path.join(common.PRJ_DIR, common.RADAR_DATA), 'rb') as fp:
        data_pickle = pickle.load(fp)

    # Samples are in the form [(xz, yz, xy), ...] and are in range [0, RADAR_MAX].
    samples = data_pickle['samples']
    #max_sample = np.amax([[np.concatenate(t, axis=None)] for t in samples])
    #print(f'samples: {samples}')
    # Scale each feature to the [0, 1] range without breaking the sparsity.
    print('Scaling samples.')
    samples = [[p / common.RADAR_MAX for p in s] for s in samples]
    #max_sample = np.amax([[np.concatenate(t, axis=None)] for t in samples])
    #print(f'sample max: {max_sample}')
    #print(f'scaled samples: {samples}')

    # Encode the labels.
    print('Encoding labels.')
    le = preprocessing.LabelEncoder()
    encoded_labels = le.fit_transform(data_pickle['labels'])
    class_names = list(le.classes_)
    print(f'class names: {class_names}')

    # Split data and labels up into train and test sets.
    print('Splitting data into train and test sets.')
    X_train, X_test, y_train, y_test = model_selection.train_test_split(
        samples,
        encoded_labels,
        test_size=0.20,
        random_state=RANDOM_SEED,
        shuffle=True)
    #print(f'X_train: {X_train} X_test: {X_test} y_train: {y_train} y_test: {y_test}')

    # Augment training set.
    data_gen = DataGenerator(rotation_range=15.0, zoom_range=0.3, noise_sd=0.2)
    print('Augmenting dataset.')
    print(f'X len: {len(X_train)}, y len: {len(y_train)}')

    # Faster to use a list in below ops.
    y_train = y_train.tolist()

    # Do not mutate original lists.
    xc = X_train.copy()
    yc = y_train.copy()

    for e in range(EPOCHS):
        print(f'epoch: {e}')
        batch = 0
        for X_batch, y_batch in data_gen.flow(xc, yc, batch_size=BATCH_SIZE):
            print(f'batch: {batch}')
            X_train.extend(X_batch)
            y_train.extend(y_batch)
            batch += 1
            if batch >= len(xc) / BATCH_SIZE:
                break

    # Sanity check if augmentation introduced a scaling problem.
    max = np.amax([[np.concatenate(t, axis=None)] for t in X_train])
    assert abs(max - 1.0) < 1e-6, "scale error"

    print('Generating feature vectors from radar projections.')
    X_train = common.process_samples(X_train, proj_mask=PROJ_MASK)

    # Convert y_train back to np array.
    y_train = np.array(y_train, dtype=np.int8)

    # Balance classes.
    # This replicates minority class samples.
    # Augmenting data may not result in perfect balance so this will fine-tune.
    print('Balancing classes.')
    y_train, X_train = balance_classes(y_train, X_train)

    skf = model_selection.StratifiedKFold(n_splits=FOLDS)

    # Find best classifier.
    best_svm = find_best_sgd_svm_estimator(X_train, y_train,
                                           skf.split(X_train, y_train),
                                           RANDOM_SEED)

    print('Evaluating best svm classifier.')
    X_test = common.process_samples(X_test, proj_mask=PROJ_MASK)
    evaluate_model(best_svm, X_test, y_test, class_names, SVM_CM)

    print('\n Saving svm model.')
    with open(os.path.join(common.PRJ_DIR, common.SVM_MODEL), 'wb') as outfile:
        outfile.write(pickle.dumps(best_svm))
    """
    # Find best XGBoost classifier, evaluate and save it. 
    best_xgb = find_best_xgb_estimator(X_train, y_train, skf.split(X_train, y_train),
        PARA_COMB, RANDOM_SEED)

    evaluate_model(best_xgb, X_test, y_test, class_names, XGB_CM)

    print('\n Saving xgb model...')
    with open(os.path.join(common.PRJ_DIR, common.XGB_MODEL), 'wb') as outfile:
        outfile.write(pickle.dumps(best_xgb))
    """
    # Write the label encoder to disk.
    print('\n Saving label encoder.')
    with open(os.path.join(common.PRJ_DIR, common.LABELS), 'wb') as outfile:
        outfile.write(pickle.dumps(le))