Ejemplo n.º 1
0
 def test_add_model_file_arg_custom_arg_name(self):
     arg_name, value = 'model_file_test', 'some_model_file.dat'
     cmdline = ['--{}'.format(arg_name), value]
     parser = argparse.ArgumentParser()
     net_args.add_model_file_arg(parser, arg_name=arg_name)
     res = parser.parse_args(cmdline)
     self.assertEqual(getattr(res, arg_name), value)
Ejemplo n.º 2
0
 def test_add_model_file_arg_short_alias(self):
     alias, value = 'm', 'model_file.dat'
     cmdline = ['-{}'.format(alias), value]
     parser = argparse.ArgumentParser()
     net_args.add_model_file_arg(parser, short_alias=alias)
     res = parser.parse_args(cmdline)
     self.assertEqual(res.model_file, value)
    def __init__(self):
        self.default_logdir = cutils.get_config_for_module(
            "model_trainer")['default']['logdir']
        parser = argparse.ArgumentParser(
            description="Train network using provided dataset")

        # dataset input
        in_aliases = {'dataset name': 'name', 'dataset directory': 'srcdir'}
        dset_args = dargs.DatasetArgs(input_aliases=in_aliases)
        item_args = dargs.ItemTypeArgs()
        atype = dargs.arg_type.INPUT
        group = parser.add_argument_group(title="Input dataset")
        dset_args.add_dataset_arg_double(group, atype)
        item_args.add_item_type_args(group, atype)
        group.add_argument('--test_items_count',
                           type=atypes.int_range(1),
                           help='Number of dataset items to include in the '
                           'test set. Overrides test_items_fraction.')
        group.add_argument('--test_items_fraction',
                           type=float,
                           default=0.1,
                           help='Number of dataset items to include in the '
                           'test set, expressed as a fraction.')
        modes = net_cons.DATASET_SPLIT_MODES
        group.add_argument('--split_mode',
                           choices=modes,
                           required=True,
                           help='Method of splitting the test items subset '
                           'from the input dataset.')

        # network to train
        group = parser.add_argument_group(title="Network configuration")
        net_args.add_network_arg(group, short_alias='n')
        net_args.add_model_file_arg(group, short_alias='m')
        group.add_argument('--tb_dir',
                           default=self.default_logdir,
                           help=('directory to store training logs for '
                                 'tensorboard.'))
        group.add_argument('--save',
                           action='store_true',
                           help=('save the model after training. Model files '
                                 'are saved under tb_dir as net.network_name/'
                                 'net.network_name.tflearn.*'))

        # training settings
        group = parser.add_argument_group(title="Training parameters")
        net_args.add_training_settings_args(group,
                                            num_epochs={
                                                'required': False,
                                                'default': 11,
                                                'short_alias': 'e'
                                            })

        self.parser = parser
        self.dset_args = dset_args
        self.item_args = item_args
Ejemplo n.º 4
0
    def __init__(self):
        self.default_logdir = cutils.get_config_for_module(
            "model_xvalidator")['default']['logdir']
        print("default logdir set to {}".format(self.default_logdir))
        parser = argparse.ArgumentParser(
            description="Perform Kfold cross-validation on a given neural "
            "network with the given dataset.")

        # cross-validation settings
        group = parser.add_argument_group(title="Cross-validation parameters")
        group.add_argument('--num_crossvals',
                           type=atypes.int_range(1),
                           required=True,
                           help='number of cross validations to perform')

        # network to train
        group = parser.add_argument_group(title="Network to use")
        net_args.add_network_arg(group, short_alias='n')
        net_args.add_model_file_arg(group, short_alias='m')

        # training_parameters
        group = parser.add_argument_group(title="Training parameters to use")
        net_args.add_training_settings_args(group,
                                            num_epochs={
                                                'required': False,
                                                'default': 11,
                                                'short_alias': 'e'
                                            })
        group.add_argument('--tb_dir',
                           default=self.default_logdir,
                           help=('directory to store training logs for '
                                 'tensorboard.'))

        # dataset input
        in_aliases = {'dataset name': 'name', 'dataset directory': 'srcdir'}
        dset_args = dargs.DatasetArgs(input_aliases=in_aliases)
        item_args = dargs.ItemTypeArgs()
        atype = dargs.arg_type.INPUT
        group = parser.add_argument_group(title="Input dataset")
        dset_args.add_dataset_arg_double(group, atype)
        item_args.add_item_type_args(group, atype)
        group.add_argument('--test_items_count',
                           type=atypes.int_range(1),
                           help='number of dataset items to include in the '
                           'test set. Overrides test_items_fraction.')
        group.add_argument('--test_items_fraction',
                           type=float,
                           default=0.1,
                           help='number of dataset items to include in the '
                           'test set, expressed as a fraction.')

        self.parser = parser
        self.dset_args = dset_args
        self.item_args = item_args
Ejemplo n.º 5
0
    def __init__(self):
        parser = argparse.ArgumentParser(
            description="Visualize convolutional filters of trained model")
        parser.add_argument('logdir',
                            help=('Directory to output visualized filter '
                                  'images to.'))

        # trained neural network model
        group = parser.add_argument_group('Neural network settings')
        net_args.add_network_arg(group, short_alias='n')
        net_args.add_model_file_arg(group, short_alias='m', required=True)
        packet_args = cargs.PacketArgs(long_alias='packet_dims',
                                       no_EC_dims=True)
        packet_args.helpstr = 'Dimensions of packets used for input items'
        packet_args.add_packet_arg(group, short_alias='p')

        # visualization settings
        group = parser.add_argument_group('Visualization settings')
        group.add_argument('--start_filter',
                           default=0,
                           type=int,
                           help=('index of first filter to visualize.'))
        group.add_argument('--stop_filter',
                           default=None,
                           type=int,
                           help=('index of the filter after the last filter '
                                 'to visualize.'))
        group.add_argument('--start_depth',
                           default=0,
                           type=int,
                           help=('first depth index of filters to visualize.'))
        group.add_argument('--stop_depth',
                           default=None,
                           type=int,
                           help=('index after the last depth index of filters '
                                 'to visualize.'))

        # misc
        parser.add_argument('--usecpu',
                            action='store_true',
                            help=('Use host CPU instead of the CUDA device. '
                                  'On systems without a dedicated CUDA device '
                                  'and no CUDA-enabled version  of tensorflow '
                                  'installed, this flag has no effect.'))

        self.packet_args = packet_args
        self.parser = parser
Ejemplo n.º 6
0
    def __init__(self):
        parser = argparse.ArgumentParser(
            description="Evaluate trained network model with given dataset")
        parser.add_argument('outfile',
                            nargs='?',
                            type=argparse.FileType('w'),
                            default=sys.stdout,
                            help=('name of output TSV to write to. If not '
                                  'provided, output to stdout.'))

        # dataset input
        atype = dargs.arg_type.INPUT
        in_aliases = {'dataset name': 'name', 'dataset directory': 'srcdir'}
        dset_args = dargs.DatasetArgs(input_aliases=in_aliases)
        item_args = dargs.ItemTypeArgs()
        group = parser.add_argument_group(title="Input dataset")
        dset_args.add_dataset_arg_double(group, atype)
        item_args.add_item_type_args(group, atype)
        # slice of dataset items to use for evaluation
        group.add_argument('--start_item',
                           default=0,
                           type=int,
                           help=('index of first dataset item to use for '
                                 'evaluation.'))
        group.add_argument('--stop_item',
                           default=None,
                           type=int,
                           help=('index of the dataset item after the last '
                                 'item to use for evaluation.'))

        # trained neural network model
        group = parser.add_argument_group('Neural network settings')
        net_args.add_network_arg(group, short_alias='n')
        net_args.add_model_file_arg(group, short_alias='m', required=True)

        # misc
        parser.add_argument('--usecpu',
                            action='store_true',
                            help=('Use host CPU instead of the CUDA device. '
                                  'On systems without a dedicated CUDA device '
                                  'and no CUDA-enabled version  of tensorflow '
                                  'installed, this flag has no effect.'))

        self.parser = parser
        self.dset_args = dset_args
        self.item_args = item_args
Ejemplo n.º 7
0
    def __init__(self):
        parser = argparse.ArgumentParser(
            description=('Create classification report in HTML format from '
                         'provided evaluation results in TSV format'))

        # input tsv
        parser.add_argument('infile',
                            nargs='?',
                            type=argparse.FileType('r'),
                            default=sys.stdin,
                            help=('name of input TSV to read from. If not '
                                  'provided, read from stdin.'))

        # output settings
        group = parser.add_argument_group(title="Output settings")
        group.add_argument('--tablesize',
                           type=atypes.int_range(1),
                           help=('Maximum number of table rows per html '
                                 'report file.'))
        group.add_argument('--logdir',
                           help=('Directory to store output logs. If a '
                                 'non-default directory is used, it must '
                                 'exist prior to calling this script.'))
        item_args = dargs.ItemTypeArgs(out_item_prefix='add')
        help = {
            k: 'add image placeholder for {}'.format(desc)
            for k, desc in item_args.item_descriptions.items()
        }
        item_args.add_item_type_args(group, dargs.arg_type.OUTPUT, help=help)

        # meta-information to include in report headers
        g_title = "Meta-information to include in report headers"
        group = parser.add_argument_group(title=g_title)
        nargs.add_network_arg(group, required=False)
        nargs.add_model_file_arg(group, required=False)
        in_aliases = {'dataset name': 'name', 'dataset directory': 'srcdir'}
        dset_args = dargs.DatasetArgs(input_aliases=in_aliases)
        dset_args.add_dataset_arg_double(group,
                                         dargs.arg_type.INPUT,
                                         required=False)

        self.parser = parser
        self.dset_args = dset_args
        self.item_args = item_args
Ejemplo n.º 8
0
    def __init__(self):
        parser = argparse.ArgumentParser(
            description="Visualize hidden layer activations of model using "
            "given dataset")
        parser.add_argument('logdir',
                            help=('Directory to output visualized activation '
                                  'images to.'))

        # trained neural network model settings
        group = parser.add_argument_group('Trained model settings')
        net_args.add_network_arg(group, short_alias='n')
        net_args.add_model_file_arg(group, short_alias='m', required=True)

        # input dataset settings
        group = parser.add_argument_group(title="Input dataset")
        in_aliases = {'dataset name': 'name', 'dataset directory': 'srcdir'}
        dset_args = dargs.DatasetArgs(input_aliases=in_aliases)
        dset_args.add_dataset_arg_double(group,
                                         dargs.arg_type.INPUT,
                                         required=True,
                                         dir_default=os.path.curdir)
        item_args = dargs.ItemTypeArgs()
        item_args.add_item_type_args(group, dargs.arg_type.INPUT)
        group.add_argument('--start_item',
                           default=0,
                           type=int,
                           help=('index of first dataset item to use.'))
        group.add_argument('--stop_item',
                           default=None,
                           type=int,
                           help=('index of the dataset item after the last '
                                 'item to use.'))

        # misc
        parser.add_argument('--usecpu',
                            action='store_true',
                            help=('Use host CPU instead of the CUDA device. '
                                  'On systems without a dedicated CUDA device '
                                  'and no CUDA-enabled version of tensorflow '
                                  'installed, this flag has no effect.'))
        self.dset_args = dset_args
        self.item_args = item_args
        self.parser = parser
Ejemplo n.º 9
0
 def test_model_arg_missing_when_required_raises_error(self):
     parser = argparse.ArgumentParser()
     net_args.add_model_file_arg(parser, required=True)
     with self.assertRaises(SystemExit) as cm:
         args = parser.parse_args([])
         self.fail('Failed to raise exception on model_file not passed')