def get_parser(self, prog_name): parser = super().get_parser(prog_name) parser.add_argument("--input", type=Path, required=True, help="File containing generated features in netCDF 4 format, conformant to the auDeep" "data model") parser.add_argument("--cross-validate", action="store_true", help="Use cross-validation according to the cross-validation setup of the data") parser.add_argument("--train-partitions", nargs="+", type=EnumType(Partition), help="Train classifier on the specified partitions (TRAIN, DEVEL, or TEST)") parser.add_argument("--eval-partitions", nargs="+", type=EnumType(Partition), help="Evaluate classifier on the specified partitions (TRAIN, DEVEL, or TEST)") parser.add_argument("--upsample", action="store_true", help="Balance classes in the training partitions/splits") parser.add_argument("--majority-vote", action="store_true", help="Use majority voting to determine the labels of chunked instances") parser.add_argument("--repeat", metavar="N", default=1, type=int, help="Repeat evaluation N times and compute the mean accuracy (default 1)") return parser
def get_parser(self, prog_name): parser = super().get_parser(prog_name) parser.add_argument("--train-input", type=Path, required=True, help="File containing training data in netCDF 4 format, conformant to the auDeep" "data model") parser.add_argument("--train-partitions", nargs="+", type=EnumType(Partition), help="Use only the specified partitions of the training data (TRAIN, DEVEL, or TEST)") parser.add_argument("--eval-input", type=Path, required=True, help="File containing evaluation data in netCDF 4 format, conformant to the auDeep" "data model") parser.add_argument("--eval-partitions", nargs="+", type=EnumType(Partition), help="Use only the specified partitions of the evaluation data (TRAIN, DEVEL, or TEST)") parser.add_argument("--upsample", action="store_true", help="Balance classes in the training data") parser.add_argument("--majority-vote", action="store_true", help="Use majority voting to determine the labels of chunked instances") parser.add_argument("--output", type=Path, required=True, help="Write predictions to the specified file as CSV with tab delimiters") return parser
def get_parser(self, prog_name): parser = super().get_parser(prog_name) parser.add_argument( "--num-layers", default=1, type=int, help="The number of layers in the encoder and decoder (default 1)") parser.add_argument( "--num-units", default=16, type=int, help="The number of RNN cells per layer (default 16)") parser.add_argument("--bidirectional-encoder", action="store_true", help="Use a bidirectional encoder (default off)") parser.add_argument("--bidirectional-decoder", action="store_true", help="Use a bidirectional decoder (default off)") parser.add_argument( "--cell", default=CellType.GRU, type=EnumType(CellType), help="The type of the RNN cells (GRU or LSTM, default GRU)") parser.add_argument( "--keep-prob", default=0.8, type=float, help="Keep activations with the specified probability (default 0.8)" ) return parser
def get_parser(self, prog_name): parser = super().get_parser(prog_name) parser.add_argument( "--input", type=Path, required=True, help= "File containing a data set in netCDF 4 format, conformant to the auDeep " "data model.") parser.add_argument( "--partitions", default=None, nargs="+", type=EnumType(Partition), help= "Partitions which should be upsampled (TRAIN, DEVEL, or TEST). If not set, the " "entire data set will be upsampled.") parser.add_argument( "--output", type=Path, required=True, help= "Files to which to write the upsampled data set. Data is stored " "in netCDF 4 format according to the auDeep data model.") return parser
def get_parser(self, prog_name): parser = super().get_parser(prog_name) parser.add_argument( "--input", type=Path, required=True, help= "File containing a data set in netCDF 4 format, conformant to the auDeep " "data model.") parser.add_argument( "--output", type=Path, required=True, help="Files to which to write the modified data set. Data is stored " "in netCDF 4 format according to the auDeep data model.") group = parser.add_mutually_exclusive_group() group.add_argument( "--add-cv-setup", default=None, type=int, metavar="NUM_FOLDS", help= "Randomly generate NUM_FOLDS evenly sized cross-validation folds") group.add_argument( "--add-partitioning", nargs="+", default=None, type=EnumType(Partition), metavar="PARTITIONS", help= "Randomly generate an evenly sized partitioning with the specified partitions" ) group.add_argument("--remove-partitioning", action="store_true", help="Remove partition information") group.add_argument("--remove-cv-setup", action="store_true", help="Remove cross-validation information") return parser
def get_parser(self, prog_name): parser = super().get_parser(prog_name) parser.add_argument( "--input", type=Path, required=True, help= "File containing a data set in netCDF 4 format, conformant to the auDeep " "data model.") parser.add_argument("--format", type=EnumType(ExportFormat), required=True, help="The export format (CSV, or ARFF)") parser.add_argument( "--labels-last", action="store_true", help= "append labels to the end of feature vectors, instead of the beginning" ) parser.add_argument( "--name", type=str, default=None, help= "The name of generated files. By default, the name of the input file is used." ) parser.add_argument( "--output", type=Path, required=True, help= "The output base directory. Partitions and cross-validation folds are written to " "separate directories.") return parser
def get_parser(self, prog_name): parser = super().get_parser(prog_name) parser.add_argument( "--num-f-layers", default=1, type=int, help= "The number of layers in the frequency encoder and decoder (default 1)" ) parser.add_argument( "--num-f-units", default=64, type=int, help= "The number of RNN cells per layer in the frequency RNNs(default 64)" ) parser.add_argument( "--num-t-layers", default=2, type=int, help= "The number of layers in the time encoder and decoder (default 2)") parser.add_argument( "--num-t-units", default=128, type=int, help= "The number of RNN cells per layer in the time RNNs (default 128)") parser.add_argument( "--bidirectional-f-encoder", action="store_true", help="Use a bidirectional frequency encoder (default off)") parser.add_argument( "--bidirectional-f-decoder", action="store_true", help="Use a bidirectional frequency decoder (default off)") parser.add_argument( "--bidirectional-t-encoder", action="store_true", help="Use a bidirectional time encoder (default off)") parser.add_argument( "--bidirectional-t-decoder", action="store_true", help="Use a bidirectional time decoder (default off)") parser.add_argument( "--cell", default=CellType.GRU, type=EnumType(CellType), help="The type of the RNN cells (GRU or LSTM, default GRU)") parser.add_argument( "--keep-prob", default=0.8, type=float, help="Keep activations with the specified probability (default 0.8)" ) parser.add_argument( "--freq-window-width", default=32, type=int, help= "the width of the sliding window on the frequency axis (default 32)" ) parser.add_argument( "--freq-window-overlap", default=24, type=int, help="overlap between windows on the frequency axis (default 24)") parser.add_argument( "--f-encoder-noise", default=0.0, type=float, help= "Replace frequency encoder input time steps by zeros with the specified probability " "(default 0.0)") parser.add_argument( "--t-encoder-noise", default=0.0, type=float, help= "Replace time encoder input time steps by zeros with the specified probability " "(default 0.0)") parser.add_argument( "--f-feed-previous-prob", default=0.0, type=float, help= "Feed output of previous time step instead of correct output to frequency decoder " "with specified probability (default 0.0)") parser.add_argument( "--t-feed-previous-prob", default=0.0, type=float, help= "Feed output of previous time step instead of correct output to time decoder " "with specified probability (default 0.0)") return parser
def get_parser(self, prog_name): parser = super().get_parser(prog_name) parser.add_argument("--basedir", type=Path, required=True, help="The data set base directory") parser.add_argument( "--parser", type=str, default="audeep.backend.parsers.meta.MetaParser", help="Parser for the data set file structure. Defaults to " "audeep.backend.parsers.meta.MetaParser, which supports several common file " "structures.") parser.add_argument( "--output", type=Path, default=Path("./spectrograms.nc"), help= "The output filename (default './spectrograms.nc'). Data is stored in netCDF 4 format" "according to the auDeep data model.") parser.add_argument( "--mel-spectrum", default=None, type=int, metavar="FILTERS", help= "Generate mel spectrograms with the specified number of filter banks (default off)" ) parser.add_argument( "--window-width", default=0.04, type=float, help="The width of the FFT window in seconds (default 0.04)") parser.add_argument( "--window-overlap", default=0.02, type=float, help="The overlap between FFT windows in seconds (default 0.02)") parser.add_argument( "--clip-above", metavar="dB", default=None, type=int, help= "Clip amplitudes above the specified dB value. Amplitudes are normalized so that 0dB " "is the highest value.") parser.add_argument( "--clip-below", metavar="dB", default=None, type=int, help= "Clip amplitudes below the specified dB value. Amplitudes are normalized so that 0dB " "is the highest value.") parser.add_argument( "--chunk-length", default=None, type=float, help= "Split audio files into chunks of specified length in seconds. Requires the " "--chunk-count option to be set.") parser.add_argument( "--chunk-count", default=None, type=int, help= "Number of chunks per audio file. Excess chunks will be dropped, and an error is " "raised if there are not enough chunks. Requires the --chunk-length option to be set." ) parser.add_argument( "--pretend", default=None, type=int, help= "Process the file at the specified index only and display the resulting spectrogram" ) parser.add_argument( "--channels", default=ChannelFusion.MEAN, type=EnumType(ChannelFusion), help= "Strategy for combining the audio channels. Valid values are \"mean\", \"left\", " "\"right\", and \"diff\".") parser.add_argument( "--fixed-length", default=None, type=float, help= "Ensure that all samples have exactly the specified length in seconds, by cutting or " "padding audio appropriately") parser.add_argument( "--center-fixed", action="store_true", help= "Pad or cut equally at the start and end of samples if a fixed length is set. By " "default, padding or cutting is performed only at the end of samples." ) return parser