def __init__(self, clf, genres, data, display_name): self.logger = get_logger('benchmark') self.display_name = display_name self.clf = clf self.genres = genres self.m_genres = {genre: i for i, genre in enumerate(genres)} self.scaler = StandardScaler() self.X, self.y = data
def __init__(self, clf_obj=None, **metadata): # Process mandatory fields for field in self.MANDATORY_CLASSIFIER_METADATA_FIELDS: if field not in metadata: raise LookupError(f'Field "{field}" not found in metadata"') setattr(self, field, metadata[field]) self.logger = get_logger(f'classifier ({self.name})') # Process optional fields for field, default in self.OPTIONAL_CLASSIFIER_METADATA_FIELDS: value = metadata[field] if field in metadata else default setattr(self, field, value) self._init_classifier(clf_obj)
def __init__(self, genres, feature_extractor): self.genres = genres self.ft_extractor = feature_extractor self.logger = get_logger('dataset')
import json import os.path from sklearn.externals import joblib from genrec.classifier import Classifier from genrec.logger import get_logger from web.config import * from web.utils import scandir_filtered logger = get_logger('classifiers') def _get_valid_dirs(): dirpath = os.path.dirname(__file__) filters = [ (lambda entry: entry.is_dir(), None), (lambda entry: entry.name in AVAILABLE_DATASETS, lambda entry: logger.debug(f'Ignoring directory "{entry.path}"')) ] yield from map(lambda entry: entry.path, scandir_filtered(dirpath, filters)) def _get_classifiers(dir): logger.info(f'Scanning directory "{dir}"...') filters = [
def __init__(self, sr=22050, fft_len=512): self.target_sr = sr self.fft_len = fft_len self.logger = get_logger('ft-extractor')