def main(experiment_name, list_experiments=False, gpu_device='/gpu:0'): """Create a tensorflow worker to run experiments in your DB.""" if list_experiments: exps = db.list_experiments() print '_' * 30 print 'Initialized experiments:' print '_' * 30 for l in exps: print l.values()[0] print '_' * 30 print 'You can add to the DB with: '\ 'python prepare_experiments.py --experiment=%s' % \ exps[0].values()[0] return if experiment_name is None: print 'No experiment specified. Pulling one out of the DB.' experiment_name = db.get_experiment_name() # Prepare to run the model config = Config() condition_label = '%s_%s' % (experiment_name, py_utils.get_dt_stamp()) experiment_label = '%s' % (experiment_name) log = logger.get(os.path.join(config.log_dir, condition_label)) experiment_dict = experiments.experiments()[experiment_name]() config = add_to_config(d=experiment_dict, config=config) # Globals config, exp_params = process_DB_exps( experiment_name=experiment_name, log=log, config=config) # Update config w/ DB params dataset_module = py_utils.import_module(model_dir=config.dataset_info, dataset=config.dataset) dataset_module = dataset_module.data_processing() # hardcoded class name train_data, train_means = get_data_pointers( dataset=config.dataset, base_dir=config.tf_records, cv=dataset_module.folds.keys()[1], # TODO: SEARCH FOR INDEX. log=log) val_data, val_means = get_data_pointers(dataset=config.dataset, base_dir=config.tf_records, cv=dataset_module.folds.keys()[0], log=log) # Initialize output folders dir_list = { 'checkpoints': os.path.join(config.checkpoints, condition_label), 'summaries': os.path.join(config.summaries, condition_label), 'condition_evaluations': os.path.join(config.condition_evaluations, condition_label), 'experiment_evaluations': os.path.join( # DEPRECIATED config.experiment_evaluations, experiment_label), 'visualization': os.path.join(config.visualizations, condition_label), 'weights': os.path.join(config.condition_evaluations, condition_label, 'weights') } [py_utils.make_dir(v) for v in dir_list.values()] # Prepare data loaders on the cpu config.data_augmentations = py_utils.flatten_list( config.data_augmentations, log) with tf.device('/cpu:0'): train_images, train_labels = data_loader.inputs( dataset=train_data, batch_size=config.batch_size, model_input_image_size=dataset_module.model_input_image_size, tf_dict=dataset_module.tf_dict, data_augmentations=config.data_augmentations, num_epochs=config.epochs, tf_reader_settings=dataset_module.tf_reader, shuffle=config.shuffle) val_images, val_labels = data_loader.inputs( dataset=val_data, batch_size=config.batch_size, model_input_image_size=dataset_module.model_input_image_size, tf_dict=dataset_module.tf_dict, data_augmentations=config.data_augmentations, num_epochs=config.epochs, tf_reader_settings=dataset_module.tf_reader, shuffle=config.shuffle) log.info('Created tfrecord dataloader tensors.') # Load model specification struct_name = config.model_struct.split(os.path.sep)[-1] try: model_dict = py_utils.import_module( dataset=struct_name, model_dir=os.path.join('models', 'structs', experiment_name).replace(os.path.sep, '.')) except IOError: print 'Could not find the model structure: %s' % experiment_name # Inject model_dict with hyperparameters if requested model_dict.layer_structure = hp_opt_utils.inject_model_with_hps( layer_structure=model_dict.layer_structure, exp_params=exp_params) # Prepare model on GPU with tf.device(gpu_device): with tf.variable_scope('cnn') as scope: # Training model if len(dataset_module.output_size) > 1: log.warning('Found > 1 dimension for your output size.' 'Converting to a scalar.') dataset_module.output_size = np.prod( dataset_module.output_size) if hasattr(model_dict, 'output_structure'): # Use specified output layer output_structure = model_dict.output_structure else: output_structure = None model = model_utils.model_class( mean=train_means, training=True, output_size=dataset_module.output_size) train_scores, model_summary = model.build( data=train_images, layer_structure=model_dict.layer_structure, output_structure=output_structure, log=log, tower_name='cnn') log.info('Built training model.') log.debug(json.dumps(model_summary, indent=4), verbose=0) print_model_architecture(model_summary) # Prepare the loss function train_loss, _ = loss_utils.loss_interpreter( logits=train_scores, labels=train_labels, loss_type=config.loss_function, dataset_module=dataset_module) # Add weight decay if requested if len(model.regularizations) > 0: train_loss = loss_utils.wd_loss( regularizations=model.regularizations, loss=train_loss, wd_penalty=config.regularization_strength) train_op = loss_utils.optimizer_interpreter( loss=train_loss, lr=config.lr, optimizer=config.optimizer, constraints=config.optimizer_constraints, model=model) log.info('Built training loss function.') train_accuracy = eval_metrics.metric_interpreter( metric=dataset_module.score_metric, pred=train_scores, labels=train_labels) # training accuracy if int(train_images.get_shape()[-1]) <= 3: tf.summary.image('train images', train_images) tf.summary.scalar('training loss', train_loss) tf.summary.scalar('training accuracy', train_accuracy) log.info('Added training summaries.') # Validation model scope.reuse_variables() val_model = model_utils.model_class( mean=val_means, training=True, output_size=dataset_module.output_size) val_scores, _ = val_model.build( # Ignore summary data=val_images, layer_structure=model_dict.layer_structure, output_structure=output_structure, log=log, tower_name='cnn') log.info('Built validation model.') val_loss, _ = loss_utils.loss_interpreter( logits=val_scores, labels=val_labels, loss_type=config.loss_function, dataset_module=dataset_module) val_accuracy = eval_metrics.metric_interpreter( metric=dataset_module.score_metric, pred=val_scores, labels=val_labels) # training accuracy if int(train_images.get_shape()[-1]) <= 3: tf.summary.image('val images', val_images) tf.summary.scalar('validation loss', val_loss) tf.summary.scalar('validation accuracy', val_accuracy) log.info('Added validation summaries.') # Set up summaries and saver saver = tf.train.Saver(tf.global_variables()) summary_op = tf.summary.merge_all() # Initialize the graph sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) # Need to initialize both of these if supplying num_epochs to inputs sess.run( tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())) summary_writer = tf.summary.FileWriter(dir_list['summaries'], sess.graph) # Set up exemplar threading coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) # Create dictionaries of important training and validation information train_dict = { 'train_loss': train_loss, 'train_accuracy': train_accuracy, 'train_images': train_images, 'train_labels': train_labels, 'train_op': train_op, 'train_scores': train_scores } val_dict = { 'val_loss': val_loss, 'val_accuracy': val_accuracy, 'val_images': val_images, 'val_labels': val_labels, 'val_scores': val_scores, } # Start training loop np.save( os.path.join(dir_list['condition_evaluations'], 'training_config_file'), config) log.info('Starting training') output_dict = training.training_loop( config=config, db=db, coord=coord, sess=sess, summary_op=summary_op, summary_writer=summary_writer, saver=saver, threads=threads, summary_dir=dir_list['summaries'], checkpoint_dir=dir_list['checkpoints'], weight_dir=dir_list['weights'], train_dict=train_dict, val_dict=val_dict, train_model=model, val_model=val_model, exp_params=exp_params) log.info('Finished training.') model_name = config.model_struct.replace('/', '_') py_utils.save_npys(data=output_dict, model_name=model_name, output_string=dir_list['experiment_evaluations'])
def main( experiment_name, list_experiments=False, load_and_evaluate_ckpt=None, placeholder_data=None, grad_images=False, gpu_device='/gpu:0'): """Create a tensorflow worker to run experiments in your DB.""" if list_experiments: exps = db.list_experiments() print '_' * 30 print 'Initialized experiments:' print '_' * 30 for l in exps: print l.values()[0] print '_' * 30 if len(exps) == 0: print 'No experiments found.' else: print 'You can add to the DB with: '\ 'python prepare_experiments.py --experiment=%s' % \ exps[0].values()[0] return if experiment_name is None: print 'No experiment specified. Pulling one out of the DB.' experiment_name = db.get_experiment_name() # Prepare to run the model config = Config() condition_label = '%s_%s' % (experiment_name, py_utils.get_dt_stamp()) experiment_label = '%s' % (experiment_name) log = logger.get(os.path.join(config.log_dir, condition_label)) assert experiment_name is not None, 'Empty experiment name.' experiment_dict = experiments.experiments()[experiment_name]() config = add_to_config(d=experiment_dict, config=config) # Globals config.load_and_evaluate_ckpt = load_and_evaluate_ckpt if load_and_evaluate_ckpt is not None: # Remove the train operation and add a ckpt pointer from ops import evaluation config, exp_params = process_DB_exps( experiment_name=experiment_name, log=log, config=config) # Update config w/ DB params dataset_module = py_utils.import_module( model_dir=config.dataset_info, dataset=config.dataset) dataset_module = dataset_module.data_processing() # hardcoded class name train_key = [k for k in dataset_module.folds.keys() if 'train' in k] if not len(train_key): train_key = 'train' else: train_key = train_key[0] train_data, train_means_image, train_means_label = get_data_pointers( dataset=config.dataset, base_dir=config.tf_records, cv=train_key, log=log) val_key = [k for k in dataset_module.folds.keys() if 'val' in k] if not len(val_key): val_key = 'train' else: val_key = val_key[0] val_data, val_means_image, val_means_label = get_data_pointers( dataset=config.dataset, base_dir=config.tf_records, cv=val_key, log=log) # Initialize output folders dir_list = { 'checkpoints': os.path.join( config.checkpoints, condition_label), 'summaries': os.path.join( config.summaries, condition_label), 'condition_evaluations': os.path.join( config.condition_evaluations, condition_label), 'experiment_evaluations': os.path.join( # DEPRECIATED config.experiment_evaluations, experiment_label), 'visualization': os.path.join( config.visualizations, condition_label), 'weights': os.path.join( config.condition_evaluations, condition_label, 'weights') } [py_utils.make_dir(v) for v in dir_list.values()] # Prepare data loaders on the cpu if all(isinstance(i, list) for i in config.data_augmentations): if config.data_augmentations: config.data_augmentations = py_utils.flatten_list( config.data_augmentations, log) if load_and_evaluate_ckpt is not None: config.epochs = 1 config.train_shuffle = False config.val_shuffle = False with tf.device('/cpu:0'): if placeholder_data: placeholder_shape = placeholder_data['train_image_shape'] placeholder_dtype = placeholder_data['train_image_dtype'] original_train_images = tf.placeholder( dtype=placeholder_dtype, shape=placeholder_shape, name='train_images') placeholder_shape = placeholder_data['train_label_shape'] placeholder_dtype = placeholder_data['train_label_dtype'] original_train_labels = tf.placeholder( dtype=placeholder_dtype, shape=placeholder_shape, name='train_labels') placeholder_shape = placeholder_data['val_image_shape'] placeholder_dtype = placeholder_data['val_image_dtype'] original_val_images = tf.placeholder( dtype=placeholder_dtype, shape=placeholder_shape, name='val_images') placeholder_shape = placeholder_data['val_label_shape'] placeholder_dtype = placeholder_data['val_label_dtype'] original_val_labels = tf.placeholder( dtype=placeholder_dtype, shape=placeholder_shape, name='val_labels') # Apply augmentations ( train_images, train_labels ) = data_loader.placeholder_image_augmentations( images=original_train_images, model_input_image_size=dataset_module.model_input_image_size, labels=original_train_labels, data_augmentations=config.data_augmentations, batch_size=config.batch_size) ( val_images, val_labels ) = data_loader.placeholder_image_augmentations( images=original_val_images, model_input_image_size=dataset_module.model_input_image_size, labels=original_val_labels, data_augmentations=config.data_augmentations, batch_size=config.batch_size) # Store in the placeholder dict placeholder_data['train_images'] = original_train_images placeholder_data['train_labels'] = original_train_labels placeholder_data['val_images'] = original_val_images placeholder_data['val_labels'] = original_val_labels else: train_images, train_labels = data_loader.inputs( dataset=train_data, batch_size=config.batch_size, model_input_image_size=dataset_module.model_input_image_size, tf_dict=dataset_module.tf_dict, data_augmentations=config.data_augmentations, num_epochs=config.epochs, tf_reader_settings=dataset_module.tf_reader, shuffle=config.shuffle_train, resize_output=config.resize_output) if hasattr(config, 'val_augmentations'): val_augmentations = config.val_augmentations else: val_augmentations = config.data_augmentations val_images, val_labels = data_loader.inputs( dataset=val_data, batch_size=config.batch_size, model_input_image_size=dataset_module.model_input_image_size, tf_dict=dataset_module.tf_dict, data_augmentations=val_augmentations, num_epochs=config.epochs, tf_reader_settings=dataset_module.tf_reader, shuffle=config.shuffle_val, resize_output=config.resize_output) log.info('Created tfrecord dataloader tensors.') # Load model specification struct_name = config.model_struct.split(os.path.sep)[-1] try: model_dict = py_utils.import_module( dataset=struct_name, model_dir=os.path.join( 'models', 'structs', experiment_name).replace(os.path.sep, '.') ) except IOError: print 'Could not find the model structure: %s in folder %s' % ( struct_name, experiment_name) # Inject model_dict with hyperparameters if requested model_dict.layer_structure = hp_opt_utils.inject_model_with_hps( layer_structure=model_dict.layer_structure, exp_params=exp_params) # Prepare variables for the models if len(dataset_module.output_size) == 2: log.warning( 'Found > 1 dimension for your output size.' 'Converting to a scalar.') dataset_module.output_size = np.prod( dataset_module.output_size) if hasattr(model_dict, 'output_structure'): # Use specified output layer output_structure = model_dict.output_structure else: output_structure = None # Correct number of output neurons if needed if config.dataloader_override and\ 'weights' in output_structure[-1].keys(): output_neurons = output_structure[-1]['weights'][0] size_check = output_neurons != dataset_module.output_size fc_check = output_structure[-1]['layers'][0] == 'fc' if size_check and fc_check: output_structure[-1]['weights'][0] = dataset_module.output_size log.warning('Adjusted output neurons from %s to %s.' % ( output_neurons, dataset_module.output_size)) # Prepare model on GPU if not hasattr(dataset_module, 'input_normalization'): dataset_module.input_normalization = None with tf.device(gpu_device): with tf.variable_scope('cnn') as scope: # Training model model = model_utils.model_class( mean=train_means_image, training=True, output_size=dataset_module.output_size, input_normalization=dataset_module.input_normalization) train_scores, model_summary, _ = model.build( data=train_images, layer_structure=model_dict.layer_structure, output_structure=output_structure, log=log, tower_name='cnn') if grad_images: oh_dims = int(train_scores.get_shape()[-1]) target_scores = tf.one_hot(train_labels, oh_dims) * train_scores train_gradients = tf.gradients(target_scores, train_images)[0] log.info('Built training model.') log.debug( json.dumps(model_summary, indent=4), verbose=0) print_model_architecture(model_summary) # Normalize labels on GPU if needed if 'normalize_labels' in exp_params.keys(): if exp_params['normalize_labels'] == 'zscore': train_labels -= train_means_label['mean'] train_labels /= train_means_label['std'] val_labels -= train_means_label['mean'] val_labels /= train_means_label['std'] log.info('Z-scoring labels.') elif exp_params['normalize_labels'] == 'mean': train_labels -= train_means_label['mean'] val_labels -= val_means_label['mean'] log.info('Mean-centering labels.') # Check the shapes of labels and scores if not isinstance(train_scores, list): if len( train_scores.get_shape()) != len( train_labels.get_shape()): train_shape = train_scores.get_shape().as_list() label_shape = train_labels.get_shape().as_list() val_shape = val_scores.get_shape().as_list() val_label_shape = val_labels.get_shape().as_list() if len( train_shape) == 2 and len( label_shape) == 1 and train_shape[-1] == 1: train_labels = tf.expand_dims(train_labels, axis=-1) val_labels = tf.expand_dims(val_labels, axis=-1) elif len( train_shape) == 2 and len( label_shape) == 1 and train_shape[-1] == 1: train_scores = tf.expand_dims(train_scores, axis=-1) val_scores = tf.expand_dims(val_scores, axis=-1) # Prepare the loss function train_loss, _ = loss_utils.loss_interpreter( logits=train_scores, # TODO labels=train_labels, loss_type=config.loss_function, weights=config.loss_weights, dataset_module=dataset_module) # Add loss tensorboard tracking if isinstance(train_loss, list): for lidx, tl in enumerate(train_loss): tf.summary.scalar('training_loss_%s' % lidx, tl) train_loss = tf.add_n(train_loss) else: tf.summary.scalar('training_loss', train_loss) # Add weight decay if requested if len(model.regularizations) > 0: train_loss = loss_utils.wd_loss( regularizations=model.regularizations, loss=train_loss, wd_penalty=config.regularization_strength) assert config.lr is not None, 'No learning rate.' # TODO: Make a QC function if config.lr > 1: old_lr = config.lr config.lr = loss_utils.create_lr_schedule( train_batch=config.batch_size, num_training=config.lr) config.optimizer = 'momentum' log.info('Forcing momentum classifier.') else: old_lr = None train_op = loss_utils.optimizer_interpreter( loss=train_loss, lr=config.lr, optimizer=config.optimizer, constraints=config.optimizer_constraints, model=model) log.info('Built training loss function.') # Add a score for the training set train_accuracy = eval_metrics.metric_interpreter( metric=dataset_module.score_metric, # TODO: Attach to exp cnfg pred=train_scores, # TODO labels=train_labels) # Add aux scores if requested train_aux = {} if hasattr(dataset_module, 'aux_scores'): for m in dataset_module.aux_scores: train_aux[m] = eval_metrics.metric_interpreter( metric=m, pred=train_scores, labels=train_labels) # [0] # TODO: Fix for multiloss # Prepare remaining tensorboard summaries if config.tensorboard_images: if len(train_images.get_shape()) == 4: tf_fun.image_summaries(train_images, tag='Training images') if (np.asarray( train_labels.get_shape().as_list()) > 1).sum() > 2: tf_fun.image_summaries( train_labels, tag='Training_targets') tf_fun.image_summaries( train_scores, tag='Training_predictions') if isinstance(train_accuracy, list): for tidx, ta in enumerate(train_accuracy): tf.summary.scalar('training_accuracy_%s' % tidx, ta) else: tf.summary.scalar('training_accuracy', train_accuracy) if config.pr_curve: if isinstance(train_scores, list): for pidx, train_score in enumerate(train_scores): train_label = train_labels[:, pidx] pr_summary.op( tag='training_pr_%s' % pidx, predictions=tf.cast( tf.argmax( train_score, axis=-1), tf.float32), labels=tf.cast(train_label, tf.bool), display_name='training_precision_recall_%s' % pidx) else: pr_summary.op( tag='training_pr', predictions=tf.cast( tf.argmax( train_scores, axis=-1), tf.float32), labels=tf.cast(train_labels, tf.bool), display_name='training_precision_recall') log.info('Added training summaries.') with tf.variable_scope('cnn', tf.AUTO_REUSE) as scope: # Validation model scope.reuse_variables() val_model = model_utils.model_class( mean=train_means_image, # Normalize with train data training=False, output_size=dataset_module.output_size, input_normalization=dataset_module.input_normalization) val_scores, _, _ = val_model.build( # Ignore summary data=val_images, layer_structure=model_dict.layer_structure, output_structure=output_structure, log=log, tower_name='cnn') if grad_images: oh_dims = int(val_scores.get_shape()[-1]) target_scores = tf.one_hot(val_labels, oh_dims) * val_scores val_gradients = tf.gradients(target_scores, val_images)[0] log.info('Built validation model.') # Check the shapes of labels and scores val_loss, _ = loss_utils.loss_interpreter( logits=val_scores, labels=val_labels, loss_type=config.loss_function, weights=config.loss_weights, dataset_module=dataset_module) # Add loss tensorboard tracking if isinstance(val_loss, list): for lidx, tl in enumerate(val_loss): tf.summary.scalar('validation_loss_%s' % lidx, tl) val_loss = tf.add_n(val_loss) else: tf.summary.scalar('validation_loss', val_loss) # Add a score for the validation set val_accuracy = eval_metrics.metric_interpreter( metric=dataset_module.score_metric, # TODO pred=val_scores, labels=val_labels) # Add aux scores if requested val_aux = {} if hasattr(dataset_module, 'aux_scores'): for m in dataset_module.aux_scores: val_aux[m] = eval_metrics.metric_interpreter( metric=m, pred=val_scores, labels=val_labels) # [0] # TODO: Fix for multiloss # Prepare tensorboard summaries if config.tensorboard_images: if len(val_images.get_shape()) == 4: tf_fun.image_summaries( val_images, tag='Validation') if (np.asarray( val_labels.get_shape().as_list()) > 1).sum() > 2: tf_fun.image_summaries( val_labels, tag='Validation_targets') tf_fun.image_summaries( val_scores, tag='Validation_predictions') if isinstance(val_accuracy, list): for vidx, va in enumerate(val_accuracy): tf.summary.scalar('validation_accuracy_%s' % vidx, va) else: tf.summary.scalar('validation_accuracy', val_accuracy) if config.pr_curve: if isinstance(val_scores, list): for pidx, val_score in enumerate(val_scores): val_label = val_labels[:, pidx] pr_summary.op( tag='validation_pr_%s' % pidx, predictions=tf.cast( tf.argmax( val_score, axis=-1), tf.float32), labels=tf.cast(val_label, tf.bool), display_name='validation_precision_recall_%s' % pidx) else: pr_summary.op( tag='validation_pr', predictions=tf.cast( tf.argmax( val_scores, axis=-1), tf.float32), labels=tf.cast(val_labels, tf.bool), display_name='validation_precision_recall') log.info('Added validation summaries.') # Set up summaries and saver if not hasattr(config, 'max_to_keep'): config.max_to_keep = None saver = tf.train.Saver( var_list=tf.global_variables(), max_to_keep=config.max_to_keep) summary_op = tf.summary.merge_all() # Initialize the graph sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) # Need to initialize both of these if supplying num_epochs to inputs sess.run( tf.group( tf.global_variables_initializer(), tf.local_variables_initializer()) ) summary_writer = tf.summary.FileWriter(dir_list['summaries'], sess.graph) # Set up exemplar threading if placeholder_data: coord, threads = None, None else: coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) # Create dictionaries of important training and validation information train_dict = { 'train_loss': train_loss, 'train_images': train_images, 'train_labels': train_labels, 'train_op': train_op, 'train_scores': train_scores } val_dict = { 'val_loss': val_loss, 'val_images': val_images, 'val_labels': val_labels, 'val_scores': val_scores, } if grad_images: train_dict['train_gradients'] = train_gradients val_dict['val_gradients'] = val_gradients if isinstance(train_accuracy, list): for tidx, (ta, va) in enumerate(zip(train_accuracy, val_accuracy)): train_dict['train_accuracy_%s' % tidx] = ta val_dict['val_accuracy_%s' % tidx] = va else: train_dict['train_accuracy_0'] = train_accuracy val_dict['val_accuracy_0'] = val_accuracy if load_and_evaluate_ckpt is not None: # Remove the train operation and add a ckpt pointer del train_dict['train_op'] if hasattr(dataset_module, 'aux_score'): # Attach auxillary scores to tensor dicts for m in dataset_module.aux_scores: train_dict['train_aux_%s' % m] = train_aux[m] val_dict['val_aux_%s' % m] = val_aux[m] # Start training loop if old_lr is not None: config.lr = old_lr np.save( os.path.join( dir_list['condition_evaluations'], 'training_config_file'), config) log.info('Starting training') if load_and_evaluate_ckpt is not None: return evaluation.evaluation_loop( config=config, db=db, coord=coord, sess=sess, summary_op=summary_op, summary_writer=summary_writer, saver=saver, threads=threads, summary_dir=dir_list['summaries'], checkpoint_dir=dir_list['checkpoints'], weight_dir=dir_list['weights'], train_dict=train_dict, val_dict=val_dict, train_model=model, val_model=val_model, exp_params=exp_params, placeholder_data=placeholder_data) else: output_dict = training.training_loop( config=config, db=db, coord=coord, sess=sess, summary_op=summary_op, summary_writer=summary_writer, saver=saver, threads=threads, summary_dir=dir_list['summaries'], checkpoint_dir=dir_list['checkpoints'], weight_dir=dir_list['weights'], train_dict=train_dict, val_dict=val_dict, train_model=model, val_model=val_model, exp_params=exp_params) log.info('Finished training.') model_name = config.model_struct.replace('/', '_') if output_dict is not None: py_utils.save_npys( data=output_dict, model_name=model_name, output_string=dir_list['experiment_evaluations'])
def build_model(exp_params, config, log, dt_string, gpu_device, cpu_device, use_db=True, add_config=None, placeholders=False, checkpoint=None, test=False, map_out=None, num_batches=None, tensorboard_images=False): """Standard model building routines.""" config = py_utils.add_to_config(d=exp_params, config=config) if not hasattr(config, 'force_path'): config.force_path = False exp_label = '%s_%s_%s' % (exp_params['model'], exp_params['experiment'], py_utils.get_dt_stamp()) directories = py_utils.prepare_directories(config, exp_label) dataset_module = py_utils.import_module(pre_path=config.dataset_classes, module=config.train_dataset) train_dataset_module = dataset_module.data_processing() if not config.force_path: (train_data, _, _) = py_utils.get_data_pointers( dataset=train_dataset_module.output_name, base_dir=config.tf_records, local_dir=config.local_tf_records, cv='train') else: train_data = train_dataset_module.train_path dataset_module = py_utils.import_module(pre_path=config.dataset_classes, module=config.val_dataset) val_dataset_module = dataset_module.data_processing() if not config.force_path: val_data, _, _ = py_utils.get_data_pointers( dataset=val_dataset_module.output_name, base_dir=config.tf_records, local_dir=config.local_tf_records, cv='val') else: val_data = train_dataset_module.val_path # val_means_image, val_means_label = None, None # Create data tensors if hasattr(train_dataset_module, 'aux_loss'): train_aux_loss = train_dataset_module.aux_loss else: train_aux_loss = None with tf.device(cpu_device): if placeholders and not test: # Train with placeholders (pl_train_images, pl_train_labels, pl_val_images, pl_val_labels, train_images, train_labels, val_images, val_labels) = get_placeholders(train_dataset=train_dataset_module, val_dataset=val_dataset_module, config=config) train_module_data = train_dataset_module.get_data() val_module_data = val_dataset_module.get_data() placeholders = { 'train': { 'images': train_module_data[0]['train'], 'labels': train_module_data[1]['train'] }, 'val': { 'images': val_module_data[0]['val'], 'labels': val_module_data[1]['val'] }, } train_aux, val_aux = None, None elif placeholders and test: test_dataset_module = train_dataset_module # Test with placeholders (pl_test_images, pl_test_labels, test_images, test_labels) = get_placeholders_test( test_dataset=test_dataset_module, config=config) test_module_data = test_dataset_module.get_data() placeholders = { 'test': { 'images': test_module_data[0]['test'], 'labels': test_module_data[1]['test'] }, } train_aux, val_aux = None, None else: train_images, train_labels, train_aux = data_loader.inputs( dataset=train_data, batch_size=config.train_batch_size, model_input_image_size=train_dataset_module. model_input_image_size, tf_dict=train_dataset_module.tf_dict, data_augmentations=config.train_augmentations, num_epochs=config.epochs, aux=train_aux_loss, tf_reader_settings=train_dataset_module.tf_reader, shuffle=config.shuffle_train) if hasattr(val_dataset_module, 'val_model_input_image_size'): val_dataset_module.model_input_image_size = val_dataset_module.val_model_input_image_size val_images, val_labels, val_aux = data_loader.inputs( dataset=val_data, batch_size=config.val_batch_size, model_input_image_size=val_dataset_module. model_input_image_size, tf_dict=val_dataset_module.tf_dict, data_augmentations=config.val_augmentations, num_epochs=None, tf_reader_settings=val_dataset_module.tf_reader, shuffle=config.shuffle_val) # Build training and val models model_spec = py_utils.import_module(module=config.model, pre_path=config.model_classes) if hasattr(train_dataset_module, 'force_output_size'): train_dataset_module.output_size = train_dataset_module.force_output_size if hasattr(val_dataset_module, 'force_output_size'): val_dataset_module.output_size = val_dataset_module.force_output_size if hasattr(config, 'loss_function'): train_loss_function = config.loss_function val_loss_function = config.loss_function else: train_loss_function = config.train_loss_function val_loss_function = config.val_loss_function # Route test vs train/val h_check = [ x for x in tf.trainable_variables() if 'homunculus' in x.name or 'humonculus' in x.name ] if not hasattr(config, 'default_restore'): config.default_restore = False if test: assert len(gpu_device) == 1, 'Testing only works with 1 gpu.' gpu_device = gpu_device[0] with tf.device(gpu_device): if not placeholders: test_images = val_images test_labels = val_labels test_dataset_module = val_dataset_module test_logits, test_vars = model_spec.build_model( data_tensor=test_images, reuse=None, training=False, output_shape=test_dataset_module.output_size) if test_logits.dtype is not tf.float32: test_logits = tf.cast(test_logits, tf.float32) # Derive loss if not hasattr(config, 'test_loss_function'): test_loss_function = val_loss_function else: test_loss_function = config.test_loss_function test_loss = losses.derive_loss(labels=test_labels, logits=test_logits, loss_type=test_loss_function) # Derive score test_score = losses.derive_score(labels=test_labels, logits=test_logits, loss_type=test_loss_function, score_type=config.score_function) # Initialize model (sess, saver, summary_op, summary_writer, coord, threads, restore_saver) = initialize_tf(config=config, placeholders=placeholders, ckpt=checkpoint, default_restore=config.default_restore, directories=directories) if placeholders: proc_images = test_images proc_labels = test_labels test_images = pl_test_images test_labels = pl_test_labels _, H, W, _ = test_vars['model_output_y'].shape jacobian = tf.gradients(test_logits, test_vars['model_output_x'])[ 0] # g.batch_jacobian(test_vars['model_output_x'], test_images) test_dict = { 'test_loss': test_loss, 'test_score': test_score, 'test_images': test_images, 'test_labels': test_labels, 'test_logits': test_logits, 'test_jacobian': jacobian } if placeholders: test_dict['test_proc_images'] = proc_images test_dict['test_proc_labels'] = proc_labels if len(h_check): test_dict['homunculus'] = h_check[0] if isinstance(test_vars, dict): for k, v in test_vars.iteritems(): test_dict[k] = v else: test_dict['activity'] = test_vars else: train_losses, val_losses, tower_grads, norm_updates = [], [], [], [] train_scores, val_scores = [], [] train_image_list, train_label_list = [], [] val_image_list, val_label_list = [], [] train_reuse = None if not hasattr(config, 'lr_schedule'): config.lr_schedule = None if hasattr(config, 'loss_function'): train_loss_function = config.loss_function val_loss_function = config.loss_function else: train_loss_function = config.train_loss_function val_loss_function = config.val_loss_function # Prepare loop if not placeholders: train_batch_queue = tf_fun.get_batch_queues(images=train_images, labels=train_labels, gpu_device=gpu_device) val_batch_queue = tf_fun.get_batch_queues(images=val_images, labels=val_labels, gpu_device=gpu_device) config.lr = optimizers.get_lr_schedule(lr=config.lr, lr_schedule=config.lr_schedule) opt = optimizers.get_optimizers(optimizer=config.optimizer, lr=config.lr, dtype=train_images.dtype) with tf.device(cpu_device): global_step = tf.train.get_or_create_global_step() for i, gpu in enumerate(gpu_device): # rs = tf.AUTO_REUSE if i > 0 else None with tf.device(gpu): with tf.name_scope('tower_%d' % i) as scope: # Prepare tower data if placeholders: # Multi-gpu: will have to split # train_images per gpu by hand train_image_batch = train_images val_image_batch = val_images train_label_batch = train_labels val_label_batch = val_labels else: (train_image_batch, train_label_batch) = train_batch_queue.dequeue() (val_image_batch, val_label_batch) = val_batch_queue.dequeue() train_image_list += [train_image_batch] train_label_list += [train_label_batch] val_image_list += [val_image_batch] val_label_list += [val_label_batch] # Build models train_logits, train_vars = model_spec.build_model( data_tensor=train_image_batch, reuse=train_reuse, training=True, output_shape=train_dataset_module.output_size) num_training_vars = len(tf.trainable_variables()) val_logits, val_vars = model_spec.build_model( data_tensor=val_image_batch, reuse=True, training=False, output_shape=val_dataset_module.output_size) num_validation_vars = len(tf.trainable_variables()) assert num_training_vars == num_validation_vars, \ 'Found a different # of train and val variables.' train_reuse = True # Derive losses if train_logits.dtype is not tf.float32: train_logits = tf.cast(train_logits, tf.float32) if val_logits.dtype is not tf.float32: val_logits = tf.cast(val_logits, tf.float32) train_loss = losses.derive_loss( labels=train_label_batch, logits=train_logits, images=train_image_batch, loss_type=train_loss_function) val_loss = losses.derive_loss( labels=val_label_batch, logits=val_logits, images=val_image_batch, loss_type=val_loss_function) # Derive score train_score = losses.derive_score( labels=train_labels, logits=train_logits, loss_type=train_loss_function, score_type=config.score_function) val_score = losses.derive_score( labels=val_labels, logits=val_logits, loss_type=val_loss_function, score_type=config.score_function) # Add aux losses if requested if hasattr(model_spec, 'weight_decay'): wd = (model_spec.weight_decay() * tf.add_n([ tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'batch_normalization' not in v.name and 'horizontal' not in v.name and 'mu' not in v.name and 'beta' not in v.name and 'intercept' not in v.name ])) tf.summary.scalar('weight_decay', wd) train_loss += wd if hasattr(model_spec, 'bsds_weight_decay'): wd = (model_spec.bsds_weight_decay()['l2'] * tf.add_n([ tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'horizontal' not in v.name and 'norm' not in v.name ])) tf.summary.scalar('weight_decay_readout', wd) train_loss += wd wd = (model_spec.bsds_weight_decay()['l1'] * tf.add_n([ tf.reduce_sum(tf.abs(v)) for v in tf.trainable_variables() if 'horizontal' in v.name ])) tf.summary.scalar('weight_decay_horizontal', wd) train_loss += wd if hasattr(model_spec, 'orthogonal'): weights = [ v for v in tf.trainable_variables() if 'horizontal' in v.name ] assert len(weights) is not None, \ 'No horizontal weights for laplace.' wd = model_spec.orthogonal() * tf.add_n( [tf_fun.orthogonal(w) for w in weights]) tf.summary.scalar('weight_decay', wd) train_loss += wd if hasattr(model_spec, 'laplace'): weights = [ v for v in tf.trainable_variables() if 'horizontal' in v.name ] assert len(weights) is not None, \ 'No horizontal weights for laplace.' wd = model_spec.laplace() * tf.add_n( [tf_fun.laplace(w) for w in weights]) tf.summary.scalar('weight_decay', wd) train_loss += wd # Derive auxilary losses if hasattr(config, 'aux_loss'): aux_loss_type, scale = config.aux_loss.items()[0] for k, v in train_vars.iteritems(): # if k in train_dataset_module.aux_loss.keys(): # ( # aux_loss_type, # scale # ) = train_dataset_module.aux_loss[k] train_loss += (losses.derive_loss( labels=train_labels, logits=v, loss_type=aux_loss_type) * scale) # Gather everything train_losses += [train_loss] val_losses += [val_loss] train_scores += [train_score] val_scores += [val_score] # Compute and store gradients with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE): grads = opt.compute_gradients(train_loss) optimizers.check_grads(grads) tower_grads += [grads] # Gather normalization variables norm_updates += [ tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope=scope) ] # Recompute and optimize gradients grads = optimizers.average_gradients(tower_grads) if hasattr(config, 'clip_gradients') and config.clip_gradients: grads = optimizers.apply_grad_clip(grads, config.clip_gradients) op_vars = [] if hasattr(config, 'exclusion_lr') and hasattr(config, 'exclusion_scope'): grads_0 = [ x for x in grads if config.exclusion_scope not in x[1].name ] grads_1 = [x for x in grads if config.exclusion_scope in x[1].name] op_vars_0 = optimizers.apply_gradients(opt=opt, grads=grads_0, global_step=global_step) opt_1 = optimizers.get_optimizers(optimizer=config.optimizer, lr=config.exclusion_lr, dtype=train_images.dtype) op_vars_1 = optimizers.apply_gradients(opt=opt_1, grads=grads_1, global_step=global_step) op_vars += [op_vars_0] op_vars += [op_vars_1] else: op_vars += [ optimizers.apply_gradients(opt=opt, grads=grads, global_step=global_step) ] if not hasattr(config, 'variable_moving_average'): config.variable_moving_average = False if config.variable_moving_average: variable_averages = tf.train.ExponentialMovingAverage( config.variable_moving_average, global_step) op_vars += [variable_averages.apply(tf.trainable_variables())] if len(norm_updates): op_vars += [tf.group(*norm_updates)] train_op = tf.group(*op_vars) # Summarize losses and scores train_loss = tf.reduce_mean(train_losses) val_loss = tf.reduce_mean(val_losses) train_score = tf.reduce_mean(train_scores) val_score = tf.reduce_mean(val_scores) if len(train_image_list) > 1: train_image_list = tf.stack(train_image_list, axis=0) train_label_list = tf.stack(train_label_list, axis=0) else: train_image_list = train_image_list[0] train_label_list = train_label_list[0] if len(val_image_list) > 1: val_image_list = tf.stack(val_image_list, axis=0) val_label_list = tf.stack(val_label_list, axis=0) else: val_image_list = val_image_list[0] val_label_list = val_label_list[0] tf.summary.scalar('train_loss', train_loss) tf.summary.scalar('val_loss', val_loss) if tensorboard_images: tf.summary.image('train_images', train_images) tf.summary.image('val_images', val_images) # Initialize model (sess, saver, summary_op, summary_writer, coord, threads, restore_saver) = initialize_tf(config=config, placeholders=placeholders, ckpt=checkpoint, default_restore=config.default_restore, directories=directories) # Create dictionaries of important training and validation information if placeholders: proc_train_images = train_images proc_train_labels = train_labels proc_val_images = val_images proc_val_labels = val_labels train_images = pl_train_images train_labels = pl_train_labels val_images = pl_val_images val_labels = pl_val_labels train_dict = { 'train_loss': train_loss, 'train_score': train_score, 'train_images': train_image_list, 'train_labels': train_label_list, 'train_logits': train_logits, 'train_op': train_op } if placeholders: train_dict['proc_train_images'] = proc_train_images train_dict['proc_train_labels'] = proc_train_labels if train_aux is not None: train_dict['train_aux'] = train_aux if tf.contrib.framework.is_tensor(config.lr): train_dict['lr'] = config.lr else: train_dict['lr'] = tf.constant(config.lr) if isinstance(train_vars, dict): for k, v in train_vars.iteritems(): train_dict[k] = v else: train_dict['activity'] = train_vars if hasattr(config, 'save_gradients') and config.save_gradients: grad = tf.gradients(train_logits, train_images)[0] if grad is not None: train_dict['gradients'] = grad else: log.warning('Could not calculate val gradients.') val_dict = { 'val_loss': val_loss, 'val_score': val_score, 'val_images': val_image_list, 'val_logits': val_logits, 'val_labels': val_label_list, } if placeholders: val_dict['proc_val_images'] = proc_val_images val_dict['proc_val_labels'] = proc_val_labels if val_aux is not None: val_dict['aux'] = val_aux if isinstance(val_vars, dict): for k, v in val_vars.iteritems(): val_dict[k] = v else: val_dict['activity'] = val_vars if hasattr(config, 'save_gradients') and config.save_gradients: grad = tf.gradients(val_logits, val_images)[0] if grad is not None: val_dict['gradients'] = grad else: log.warning('Could not calculate val gradients.') if len(h_check): val_dict['homunculus'] = h_check[0] # Add optional info to the config if add_config is not None: extra_list = add_config.split(',') for eidx, extra in enumerate(extra_list): setattr(config, 'extra_%s' % eidx, extra) # Count parameters num_params = tf_fun.count_parameters(var_list=tf.trainable_variables()) print 'Model has approximately %s trainable params.' % num_params if test: return training.test_loop(log=log, config=config, sess=sess, summary_op=summary_op, summary_writer=summary_writer, saver=saver, restore_saver=restore_saver, directories=directories, test_dict=test_dict, exp_label=exp_label, num_params=num_params, checkpoint=checkpoint, num_batches=num_batches, save_weights=config.save_weights, save_checkpoints=config.save_checkpoints, save_activities=config.save_activities, save_gradients=config.save_gradients, map_out=map_out, placeholders=placeholders) else: # Start training loop training.training_loop(log=log, config=config, coord=coord, sess=sess, summary_op=summary_op, summary_writer=summary_writer, saver=saver, restore_saver=restore_saver, threads=threads, directories=directories, train_dict=train_dict, val_dict=val_dict, exp_label=exp_label, num_params=num_params, checkpoint=checkpoint, use_db=use_db, save_weights=config.save_weights, save_checkpoints=config.save_checkpoints, save_activities=config.save_activities, save_gradients=config.save_gradients, placeholders=placeholders)
def model_builder( params, config, model_spec, gpu_device, cpu_device, placeholders=False, tensorboard_images=False): """Standard model building routines.""" config = py_utils.add_to_config( d=params, config=config) exp_label = '%s_%s' % (params['exp_name'], py_utils.get_dt_stamp()) directories = py_utils.prepare_directories(config, exp_label) dataset_module = py_utils.import_module( model_dir=config.dataset_info, dataset=config.dataset) dataset_module = dataset_module.data_processing() # hardcoded class name train_key = [k for k in dataset_module.folds.keys() if 'train' in k] if not len(train_key): train_key = 'train' else: train_key = train_key[0] ( train_data, train_means_image, train_means_label) = py_utils.get_data_pointers( dataset=config.dataset, base_dir=config.tf_records, cv=train_key) val_key = [k for k in dataset_module.folds.keys() if 'val' in k] if not len(val_key): val_key = 'train' else: val_key = val_key[0] if hasattr(config, 'val_dataset'): val_dataset = config.val_dataset else: val_dataset = config.dataset val_data, val_means_image, val_means_label = py_utils.get_data_pointers( dataset=val_dataset, base_dir=config.tf_records, cv=val_key) # Create data tensors with tf.device(cpu_device): if placeholders: ( train_images, train_labels, val_images, val_labels) = get_placeholders(dataset_module, config) placeholders = dataset_module.get_data() else: train_images, train_labels = data_loader.inputs( dataset=train_data, batch_size=config.batch_size, model_input_image_size=dataset_module.model_input_image_size, tf_dict=dataset_module.tf_dict, data_augmentations=config.data_augmentations, num_epochs=config.epochs, tf_reader_settings=dataset_module.tf_reader, shuffle=config.shuffle_train) val_images, val_labels = data_loader.inputs( dataset=val_data, batch_size=config.batch_size, model_input_image_size=dataset_module.model_input_image_size, tf_dict=dataset_module.tf_dict, data_augmentations=config.val_augmentations, num_epochs=config.epochs, tf_reader_settings=dataset_module.tf_reader, shuffle=config.shuffle_val) # Build training and val models with tf.device(gpu_device): train_logits, train_hgru_act = model_spec( data_tensor=train_images, reuse=None, training=True) val_logits, val_hgru_act = model_spec( data_tensor=val_images, reuse=tf.AUTO_REUSE, training=False) # Derive loss loss_type = None if hasattr(config, 'loss_type'): loss_type = config.loss_type train_loss = losses.derive_loss( labels=train_labels, logits=train_logits, loss_type=loss_type) val_loss = losses.derive_loss( labels=val_labels, logits=val_logits, loss_type=loss_type) if hasattr(config, 'metric_type'): metric_type = config.metric_type else: metric_type = 'accuracy' if metric_type == 'pearson': train_accuracy = metrics.pearson_score( labels=train_labels, pred=train_logits, REDUCTION=tf.reduce_mean) val_accuracy = metrics.pearson_score( labels=val_labels, pred=val_logits, REDUCTION=tf.reduce_mean) else: train_accuracy = metrics.class_accuracy( labels=train_labels, logits=train_logits) val_accuracy = metrics.class_accuracy( labels=val_labels, logits=val_logits) tf.summary.scalar('train_accuracy', train_accuracy) tf.summary.scalar('val_accuracy', val_accuracy) if tensorboard_images: tf.summary.image('train_images', train_images) tf.summary.image('val_images', val_images) # Build optimizer train_op = optimizers.get_optimizer( train_loss, config['lr'], config['optimizer']) # Initialize tf variables saver = tf.train.Saver( var_list=tf.global_variables()) summary_op = tf.summary.merge_all() sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) sess.run( tf.group( tf.global_variables_initializer(), tf.local_variables_initializer())) summary_writer = tf.summary.FileWriter( directories['summaries'], sess.graph) if not placeholders: coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) else: coord, threads = None, None # Create dictionaries of important training and validation information train_dict = { 'train_loss': train_loss, 'train_images': train_images, 'train_labels': train_labels, 'train_op': train_op, 'train_accuracy': train_accuracy } if isinstance(train_hgru_act, dict): for k, v in train_hgru_act.iteritems(): train_dict[k] = v else: train_dict['activity'] = train_hgru_act val_dict = { 'val_loss': val_loss, 'val_images': val_images, 'val_labels': val_labels, 'val_accuracy': val_accuracy, } if isinstance(val_hgru_act, dict): for k, v in val_hgru_act.iteritems(): val_dict[k] = v else: val_dict['activity'] = val_hgru_act # Count parameters num_params = np.sum( [np.prod(x.get_shape().as_list()) for x in tf.trainable_variables()]) print 'Model has approximately %s trainable params.' % num_params # Create datastructure for saving data ds = data_structure.data( batch_size=config.batch_size, validation_iters=config.validation_iters, num_validation_evals=config.num_validation_evals, shuffle_val=config.shuffle_val, lr=config.lr, loss_function=config.loss_function, optimizer=config.optimizer, model_name=config.model_name, dataset=config.dataset, num_params=num_params, output_directory=config.results) # Start training loop training.training_loop( config=config, coord=coord, sess=sess, summary_op=summary_op, summary_writer=summary_writer, saver=saver, threads=threads, directories=directories, train_dict=train_dict, val_dict=val_dict, exp_label=exp_label, data_structure=ds, placeholders=placeholders)
def extract_vgg_features(cm_type='contextual_vector_vd', layer_name='pool3', output_type='sparse_pool', project_name=None, model_type='vgg16', timesteps=5, dtype=tf.float32): """Main extraction and training script.""" assert project_name is not None, 'Need a project name.' # 1. Get file paths and load config config = Config() config.cm_type = cm_type project_path = config.projects[project_name] # 2. Assert the model is there and load neural data. print 'Loading preprocessed data...' data = np.load(os.path.join(project_path, '%s.npz' % project_name)) neural_data = data['data_matrix'] if config.round_neural_data: neural_data = np.round(neural_data) # TODO: across_session_data_matrix is subtracted version images = data['all_images'].astype(np.float32) # Remove zeroed columns from neural data channel_check = np.abs(neural_data).sum(0) > 0 neural_data = neural_data[:, channel_check] # TODO: create AUX dict with each channel's X/Y output_aux = {'loss': config.loss_type} # None rfs = rf_sizes.get_eRFs(model_type)[layer_name] # 3. Create a output directory if necessary and save a timestamped numpy. model_description = '%s_%s_%s_%s_%s_%s' % ( cm_type, layer_name, output_type, project_name, model_type, timesteps) dt_stamp = '%s_%s' % (model_description, str(datetime.now()).replace( ' ', '_').replace(':', '_').replace('-', '_')) project_dir = os.path.join(config.results, project_name) out_dir = os.path.join(project_dir, dt_stamp) checkpoint_dir = os.path.join(out_dir, 'checkpoints') dirs = [config.results, config.summaries, out_dir] [py_utils.make_dir(x) for x in dirs] print '-' * 60 print('Training model:' + out_dir) print '-' * 60 # 4. Prepare data on CPU neural_shape = list(neural_data.shape) num_neurons = neural_shape[-1] with tf.device('/cpu:0'): train_images = tf.placeholder(dtype=dtype, name='train_images', shape=[config.train_batch_size] + config.img_shape) train_neural = tf.placeholder(dtype=dtype, name='train_neural', shape=[config.train_batch_size] + [num_neurons]) val_images = tf.placeholder(dtype=dtype, name='val_images', shape=[config.val_batch_size] + config.img_shape) val_neural = tf.placeholder(dtype=dtype, name='val_neural', shape=[config.val_batch_size] + [num_neurons]) # 5. Prepare model on GPU with tf.device('/gpu:0'): with tf.variable_scope('cnn') as scope: vgg = vgg16.Vgg16(vgg16_npy_path=config.vgg16_weight_path) train_mode = tf.get_variable(name='training', initializer=False) vgg.build( train_images, output_shape=1000, # hardcode train_mode=train_mode, final_layer=layer_name) # Select a layer activities = vgg[layer_name] # Feature reduce with a 1x1 conv if config.reduce_features is not None: vgg, activities, reduce_weights = ff.pool_ff_interpreter( self=vgg, it_neuron_op='1x1conv', act=activities, it_name='feature_reduce', out_channels=config.reduce_features, aux=None) else: reduce_weights = None # Add con-model if requested if cm_type is not None and cm_type != 'none': norms = normalizations.normalizations() activities, cm_weights, _ = norms[cm_type]( x=activities, r_in=rfs['r_in'], j_in=rfs['j_in'], timesteps=timesteps, lesions=config.lesions, train=True) else: cm_weights = None # Create output layer for N-recording channels activities = tf.nn.dropout(activities, 0.5) vgg, output_activities, output_weights = ff.pool_ff_interpreter( self=vgg, it_neuron_op=output_type, act=activities, it_name='output', out_channels=num_neurons, aux=output_aux) # Prepare the loss function loss, _ = loss_utils.loss_interpreter(logits=output_activities, labels=train_neural, loss_type=config.loss_type) # Add contextual model WD if config.reduce_features is not None and reduce_weights is not None: loss += loss_utils.add_wd(weights=reduce_weights, wd_dict=config.wd_types) # Add contextual model WD if config.cm_wd_types is not None and cm_weights is not None: loss += loss_utils.add_wd(weights=cm_weights, wd_dict=config.cm_wd_types) # Add WD to output layer if config.wd_types is not None: loss += loss_utils.add_wd(weights=output_weights, wd_dict=config.wd_types) # Finetune the learning rates train_op = loss_utils.optimizer_interpreter( loss=loss, lr=config.lr, optimizer=config.optimizer) # Calculate metrics train_accuracy = eval_metrics.metric_interpreter( metric=config.metric, pred=output_activities, labels=train_neural) # Add summaries for debugging tf.summary.image('train images', train_images) tf.summary.image('validation images', val_images) tf.summary.scalar("loss", loss) tf.summary.scalar("training accuracy", train_accuracy) # Setup validation op scope.reuse_variables() # Validation graph is the same as training except no batchnorm val_vgg = vgg16.Vgg16(vgg16_npy_path=config.vgg16_weight_path) val_vgg.build(val_images, output_shape=1000, final_layer=layer_name) # Select a layer val_activities = val_vgg[layer_name] # Add feature reduction if requested if config.reduce_features is not None: val_vgg, val_activities, _ = ff.pool_ff_interpreter( self=val_vgg, it_neuron_op=config.reduce_type, act=val_activities, it_name='feature_reduce', out_channels=config.reduce_features, aux=None) else: reduce_weights = None # Add con-model if requested if cm_type is not None and cm_type != 'none': val_activities, _, _ = norms[cm_type](x=val_activities, r_in=rfs['r_in'], j_in=rfs['j_in'], timesteps=timesteps, lesions=config.lesions, train=False) # Create output layer for N-recording channels val_vgg, val_output_activities, _ = ff.pool_ff_interpreter( self=val_vgg, it_neuron_op=output_type, act=val_activities, it_name='output', out_channels=num_neurons, aux=output_aux) # Prepare the loss function val_loss, _ = loss_utils.loss_interpreter( logits=val_output_activities, labels=val_neural, loss_type=config.loss_type, max_spikes=config.max_spikes) # Calculate metrics val_accuracy = eval_metrics.metric_interpreter( metric=config.metric, pred=val_output_activities, labels=val_neural) tf.summary.scalar('validation loss', val_loss) tf.summary.scalar('validation accuracy', val_accuracy) # Set up summaries and saver saver = tf.train.Saver(tf.global_variables()) summary_op = tf.summary.merge_all() # Initialize the graph sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) # Need to initialize both of these if supplying num_epochs to inputs sess.run(tf.global_variables_initializer()) summary_dir = os.path.join(config.summaries, dt_stamp) summary_writer = tf.summary.FileWriter(summary_dir, sess.graph) # Start training loop train_vars = { 'images': train_images, 'neural_data': train_neural, 'loss': loss, 'score': train_accuracy, 'train_op': train_op } if cm_weights is not None: for k, v in cm_weights.iteritems(): train_vars[k] = v val_vars = { 'images': val_images, 'neural_data': val_neural, 'loss': val_loss, 'score': val_accuracy, } extra_params = { 'cm_type': cm_type, 'layer_name': layer_name, 'output_type': output_type, 'project_name': project_name, 'model_type': model_type, 'lesions': config.lesions, 'timesteps': timesteps } np.savez(os.path.join(out_dir, 'training_config_file'), config=config, extra_params=extra_params) train_cv_out, val_cv_out, weights = training.training_loop( config=config, neural_data=neural_data, images=images, target_size=config.img_shape[:2], sess=sess, train_vars=train_vars, val_vars=val_vars, summary_op=summary_op, summary_writer=summary_writer, checkpoint_dir=checkpoint_dir, summary_dir=summary_dir, saver=saver) np.savez(os.path.join(out_dir, 'data'), config=config, extra_params=extra_params, train_cv_out=train_cv_out, val_cv_out=val_cv_out, weight=weights)