def train_aoi(output_path, data_fpath, labels_fpath): r""" CommandLine: python -m ibeis_cnn.train --test-train_aoi Example: >>> # DISABLE_DOCTEST >>> from ibeis_cnn.train import * # NOQA >>> result = train_aoi() >>> print(result) """ era_size = 256 batch_size = 16 max_epochs = era_size * 16 hyperparams = ut.argparse_dict({ 'era_size': era_size, 'learning_rate': .01, 'rate_schedule': 0.75, 'momentum': .9, 'weight_decay': 0.0001, 'augment_on': True, 'augment_on_validate': True, 'whiten_on': False, 'max_epochs': max_epochs, 'stopping_patience': max_epochs, 'class_weight': None, }) ut.colorprint('[netrun] Ensuring Dataset', 'yellow') dataset = ingest_data.get_numpy_dataset2('aoi', data_fpath, labels_fpath, output_path) X_train, y_train = dataset.subset('train') X_valid, y_valid = dataset.subset('valid') print('dataset.training_dpath = %r' % (dataset.training_dpath, )) input_shape = ( batch_size, dataset.data_shape[0] + 4, ) ut.colorprint('[netrun] Architecture Specification', 'yellow') model = AoIModel(input_shape=input_shape, training_dpath=dataset.training_dpath, **hyperparams) ut.colorprint('[netrun] Initialize architecture', 'yellow') model.output_dims = 1 model.input_shape = ( None, dataset.data_shape[0] + 4, ) model.batch_size = batch_size model.init_arch() ut.colorprint('[netrun] * Initializing new weights', 'lightgray') if model.has_saved_state(): model.load_model_state() ut.colorprint('[netrun] Training Requested', 'yellow') # parse training arguments config = ut.argparse_dict( dict( era_size=era_size, max_epochs=max_epochs, show_confusion=False, )) model.monitor_config.update(**config) print('\n[netrun] Model Info') model.print_layer_info() ut.colorprint('[netrun] Begin training', 'yellow') model.fit(X_train, y_train, X_valid=X_valid, y_valid=y_valid) model_path = model.save_model_state() return model_path
def train_classifier(output_path, data_fpath, labels_fpath): r""" CommandLine: python -m ibeis_cnn.train --test-train_classifier Example: >>> # DISABLE_DOCTEST >>> from ibeis_cnn.train import * # NOQA >>> result = train_classifier() >>> print(result) """ era_size = 16 max_epochs = 256 hyperparams = ut.argparse_dict( { 'era_size' : era_size, 'batch_size' : 128, 'learning_rate' : .01, 'rate_schedule' : 0.75, 'momentum' : .9, 'weight_decay' : 0.0001, 'augment_on' : True, 'whiten_on' : True, 'max_epochs' : max_epochs, } ) ut.colorprint('[netrun] Ensuring Dataset', 'yellow') dataset = ingest_data.get_numpy_dataset2('classifier', data_fpath, labels_fpath, output_path) X_train, y_train = dataset.subset('train') X_valid, y_valid = dataset.subset('valid') print('dataset.training_dpath = %r' % (dataset.training_dpath,)) ut.colorprint('[netrun] Architecture Specification', 'yellow') model = ClassifierModel( data_shape=dataset.data_shape, training_dpath=dataset.training_dpath, **hyperparams) ut.colorprint('[netrun] Init encoder and convert labels', 'yellow') if hasattr(model, 'init_encoder'): model.init_encoder(y_train) ut.colorprint('[netrun] Initialize archchitecture', 'yellow') model.init_arch() ut.colorprint('[netrun] * Initializing new weights', 'lightgray') if model.has_saved_state(): model.load_model_state() # else: # model.reinit_weights() # ut.colorprint('[netrun] Need to initialize training state', 'yellow') # X_train, y_train = dataset.subset('train') # model.ensure_data_params(X_train, y_train) ut.colorprint('[netrun] Training Requested', 'yellow') # parse training arguments config = ut.argparse_dict(dict( monitor=True, monitor_updates=True, show_confusion=True, era_size=era_size, max_epochs=max_epochs, )) model.monitor_config.update(**config) if getattr(model, 'encoder', None) is not None: class_list = list(model.encoder.classes_) y_train = np.array([class_list.index(_) for _ in y_train ]) y_valid = np.array([class_list.index(_) for _ in y_valid ]) print('\n[netrun] Model Info') model.print_layer_info() ut.colorprint('[netrun] Begin training', 'yellow') model.fit(X_train, y_train, X_valid=X_valid, y_valid=y_valid) model_path = model.save_model_state() return model_path