def get_prediction_pipeline(classifier: transformers.NBClassifier,
                            attributes: list = None,
                            feature_hooks: list = None) -> Pipeline:
    """Build the prediction pipeline using existing classifier.

    *must be fit using `fit_predict` method.*

    :param classifier: pre-trained NBClassifier
    :param attributes: list, attributes for NLTKPreprocessor

        List of attributes which will be extracted from NVD and passed to NLTK
        preprocessor.

    :param feature_hooks: dict, {feature_key: Hook}
        to be used as an argument to `FeatureExtractor`

        Specify features which should be extracted from the given set.
        The hooks are called for each element of the set and return
        corresponding features.
    """

    return Pipeline(steps=[
        ('nltk_preprocessor',
         preprocessing.NLTKPreprocessor(feed_attributes=attributes)),
        (
            'feature_extractor',
            transformers.FeatureExtractor(
                feature_hooks=feature_hooks,
                # make hooks sharable (useful if training pipeline was used before)
                share_hooks=True)),
        ('classifier', classifier)
    ])
def get_training_pipeline(feature_hooks=None) -> Pipeline:
    """Build the training pipeline from FeatureExtractor and NBClassifier.

    The training pipeline expects as an input preprocessed data
    and trains NBClassifier on that data.

    *must be fit using `fit_transform` method.*

    :param feature_hooks: dict, {feature_key: Hook}
        to be used as an argument to `FeatureExtractor`

        Specify features which should be extracted from the given set.
        The hooks are called for each element of the set and return
        corresponding features.
    """

    return Pipeline(steps=[
        (
            'feature_extractor',
            transformers.FeatureExtractor(
                feature_hooks=feature_hooks,
                # make hooks sharable (useful if training pipeline was used before)
                share_hooks=True)),
        ('classifier', transformers.NBClassifier())
    ])
def extract_labeled_features(data: typing.Union[list, np.ndarray],
                             attributes: list,
                             feature_hooks: list = None,
                             labeling_func=None) -> tuple:
    """Extract data by concatenating and fitting
    the preprocessing and extraction pipeline.

    :returns: tuple, (featureset, classification labels)
    """

    prep_pipeline = get_preprocessing_pipeline(labeling_func=labeling_func)

    steps, preps = list(zip(*prep_pipeline.steps))
    fit_params = {
        "%s__feed_attributes" % steps[2]: attributes,
        "%s__output_attributes" % steps[2]: ['label']
    }

    prep_data = prep_pipeline.fit_transform(X=data, **fit_params)
    del data

    # split the data
    prep_data = np.array(prep_data)
    features, labels = prep_data[:, 0], prep_data[:, 1]

    extractor = transformers.FeatureExtractor(feature_hooks=feature_hooks)

    featuresets = extractor.fit_transform(X=features, y=labels)

    return featuresets, labels
def get_extraction_pipeline(attributes,
                            feature_hooks: list = None) -> Pipeline:
    """Build the extraction pipeline.

    :param attributes: list, attributes for NLTKPreprocessor

        List of attributes which will be extracted from NVD and passed to NLTK
        preprocessor.

    :param feature_hooks: dict, {feature_key: Hook}
        to be used as an argument to `FeatureExtractor`

        Specify features which should be extracted from the given set.
        The hooks are called for each element of the set and return
        corresponding features.
    """

    return Pipeline(steps=[
        ('nvd_feed_preprocessor',
         preprocessing.NVDFeedPreprocessor(attributes=attributes)),
        ('nltk_preprocessor',
         preprocessing.NLTKPreprocessor(feed_attributes=attributes)),
        (
            'feature_extractor',
            transformers.FeatureExtractor(
                feature_hooks=feature_hooks,
                # make hooks sharable (useful if training pipeline was used before)
                share_hooks=True)),
    ])
예제 #5
0
def main(argv):
    """Run."""
    args = parse_args(argv=argv)

    if args.csv:
        # TODO
        raise NotImplementedError("The feature has not been implemented yet."
                                  " Sorry for the inconvenience.")
    else:
        print("Getting NVD Feed...")
        feed = NVD.from_feeds(feed_names=args.nvd_feeds)
        feed.update()
        data = list(feed.cves())  # generator

    cve_dict = {cve.cve_id: cve for cve in data}

    # set up default argument for vendor-product feature hook
    feature_hooks.vendor_product_match_hook.default_kwargs = {
        'cve_dict': cve_dict
    }

    training_pipeline = Pipeline(
        steps=[('nvd_feed_preprocessor',
                preprocessing.NVDFeedPreprocessor(
                    attributes=['cve_id', 'description'])),
               ('label_preprocessor',
                preprocessing.LabelPreprocessor(
                    feed_attributes=['project', 'description'],
                    output_attributes=['cve_id', 'description'],
                    hook=transformers.Hook(
                        key='label_hook', reuse=True, func=utils.find_))),
               ('nltk_preprocessor',
                preprocessing.NLTKPreprocessor(
                    feed_attributes=['description'],
                    output_attributes=['cve_id', 'label'])),
               ('feature_extractor',
                transformers.FeatureExtractor(feature_hooks=FEATURE_HOOKS,
                                              share_hooks=True)
                ), ('classifier', transformers.NBClassifier())])

    start_time = time()
    print("Training started")

    try:
        classifier = training_pipeline.fit_transform(X=data)
    finally:
        print(f"Training finished in {time() - start_time} seconds")

    if args.export:
        classifier.export(args.export_dir)
예제 #6
0
def extract_labeled_features(data: typing.Union[list, np.ndarray],
                             nvd_attributes: list,
                             nltk_feed_attributes: list = None,
                             feature_hooks: list = None,
                             labeling_func=None,
                             share_hooks=True) -> tuple:
    """Extract labeled features from input data.

     Extracts labeled features by concatenating and fitting the preprocessing
     and extraction pipeline.

     This is a wrapper for simplification of preprocessing and feature extraction.
     For full functionality it is suggested to build custom pipelines.

    :param data: input data to the preprocessing pipeline
    :param nvd_attributes: list, attributes to output by NVDPreprocessor

        The attributes are outputed by NVDPreprocessor and passed
        to FeatureExtractor.

    :param nltk_feed_attributes: list, attributes for NLTKPreprocessor

        List of attributes which will be fed to NLTKPreprocessor.

    :param feature_hooks: List[Hook], hooks used for feature extraction
    :param labeling_func: function used for labeling, passed to LabelPreprocessor
    :param share_hooks: bool, whether to reuse hooks

    :returns: tuple, (featureset, classification labels)
    """
    nltk_feed_attributes = nltk_feed_attributes or []

    prep_pipeline = get_preprocessing_pipeline(nvd_attributes=nvd_attributes,
                                               labeling_func=labeling_func,
                                               share_hooks=share_hooks)

    steps, _ = list(zip(*prep_pipeline.steps))
    fit_params = {
        "%s__feed_attributes" % steps[2]: nltk_feed_attributes,
        "%s__output_attributes" % steps[2]: nvd_attributes + ['label']
    }

    prep_data = prep_pipeline.fit_transform(X=data, **fit_params)

    # split the data
    extractor = transformers.FeatureExtractor(feature_hooks=feature_hooks)

    featuresets = extractor.fit_transform(X=prep_data)

    return featuresets, np.array(prep_data)[:, -1]
예제 #7
0
def get_full_training_pipeline(labeling_func: typing.Callable = None,
                               feature_hooks=None,
                               share_hooks=False) -> Pipeline:
    """Build the full training pipeline with no predefined attributes.

    The pipeline accepts raw data, performs preprocessing and feature
    extraction and trains NBClassifier on that data.

    The customization of feed and output attributes is fully left to user.
    It is necessary to provide `fit_params` when fitting, as this pipeline
    does not contain any predefined arguments.

    *must be fit using `fit_transform` method with `fit_params`*

    :param feature_hooks: dict, {feature_key: Hook}
        to be used as an argument to `FeatureExtractor`

        Specify features which should be extracted from the given set.
        The hooks are called for each element of the set and return
        corresponding features.

    :param labeling_func: callable object to be used for labeling

        The `labeling_func` is used to create a hook for `LabelPreprocessor`
        (see `LabelPreprocessor` documentation for more info).
        By default `toolkit.utils.find_` function is used for that purpose.

    :param share_hooks: boolean, whether to reuse hooks

    :returns: Pipeline
    """
    if labeling_func is None:
        labeling_func = utils.find_

    return Pipeline(
        steps=[('nvd_feed_preprocessor', preprocessing.NVDFeedPreprocessor()),
               ('label_preprocessor',
                preprocessing.LabelPreprocessor(hook=transformers.Hook(
                    key='label_hook', reuse=share_hooks, func=labeling_func))
                ), ('nltk_preprocessor', preprocessing.NLTKPreprocessor()),
               ('feature_extractor',
                transformers.FeatureExtractor(feature_hooks=feature_hooks,
                                              share_hooks=True)
                ), ('classifier', transformers.NBClassifier())])