def main(experiment_name, list_experiments=False, gpu_device='/gpu:0'): """Create a tensorflow worker to run experiments in your DB.""" if list_experiments: exps = db.list_experiments() print '_' * 30 print 'Initialized experiments:' print '_' * 30 for l in exps: print l.values()[0] print '_' * 30 print 'You can add to the DB with: '\ 'python prepare_experiments.py --experiment=%s' % \ exps[0].values()[0] return if experiment_name is None: print 'No experiment specified. Pulling one out of the DB.' experiment_name = db.get_experiment_name() # Prepare to run the model config = Config() condition_label = '%s_%s' % (experiment_name, py_utils.get_dt_stamp()) experiment_label = '%s' % (experiment_name) log = logger.get(os.path.join(config.log_dir, condition_label)) experiment_dict = experiments.experiments()[experiment_name]() config = add_to_config(d=experiment_dict, config=config) # Globals config, exp_params = process_DB_exps( experiment_name=experiment_name, log=log, config=config) # Update config w/ DB params dataset_module = py_utils.import_module(model_dir=config.dataset_info, dataset=config.dataset) dataset_module = dataset_module.data_processing() # hardcoded class name train_data, train_means = get_data_pointers( dataset=config.dataset, base_dir=config.tf_records, cv=dataset_module.folds.keys()[1], # TODO: SEARCH FOR INDEX. log=log) val_data, val_means = get_data_pointers(dataset=config.dataset, base_dir=config.tf_records, cv=dataset_module.folds.keys()[0], log=log) # Initialize output folders dir_list = { 'checkpoints': os.path.join(config.checkpoints, condition_label), 'summaries': os.path.join(config.summaries, condition_label), 'condition_evaluations': os.path.join(config.condition_evaluations, condition_label), 'experiment_evaluations': os.path.join( # DEPRECIATED config.experiment_evaluations, experiment_label), 'visualization': os.path.join(config.visualizations, condition_label), 'weights': os.path.join(config.condition_evaluations, condition_label, 'weights') } [py_utils.make_dir(v) for v in dir_list.values()] # Prepare data loaders on the cpu config.data_augmentations = py_utils.flatten_list( config.data_augmentations, log) with tf.device('/cpu:0'): train_images, train_labels = data_loader.inputs( dataset=train_data, batch_size=config.batch_size, model_input_image_size=dataset_module.model_input_image_size, tf_dict=dataset_module.tf_dict, data_augmentations=config.data_augmentations, num_epochs=config.epochs, tf_reader_settings=dataset_module.tf_reader, shuffle=config.shuffle) val_images, val_labels = data_loader.inputs( dataset=val_data, batch_size=config.batch_size, model_input_image_size=dataset_module.model_input_image_size, tf_dict=dataset_module.tf_dict, data_augmentations=config.data_augmentations, num_epochs=config.epochs, tf_reader_settings=dataset_module.tf_reader, shuffle=config.shuffle) log.info('Created tfrecord dataloader tensors.') # Load model specification struct_name = config.model_struct.split(os.path.sep)[-1] try: model_dict = py_utils.import_module( dataset=struct_name, model_dir=os.path.join('models', 'structs', experiment_name).replace(os.path.sep, '.')) except IOError: print 'Could not find the model structure: %s' % experiment_name # Inject model_dict with hyperparameters if requested model_dict.layer_structure = hp_opt_utils.inject_model_with_hps( layer_structure=model_dict.layer_structure, exp_params=exp_params) # Prepare model on GPU with tf.device(gpu_device): with tf.variable_scope('cnn') as scope: # Training model if len(dataset_module.output_size) > 1: log.warning('Found > 1 dimension for your output size.' 'Converting to a scalar.') dataset_module.output_size = np.prod( dataset_module.output_size) if hasattr(model_dict, 'output_structure'): # Use specified output layer output_structure = model_dict.output_structure else: output_structure = None model = model_utils.model_class( mean=train_means, training=True, output_size=dataset_module.output_size) train_scores, model_summary = model.build( data=train_images, layer_structure=model_dict.layer_structure, output_structure=output_structure, log=log, tower_name='cnn') log.info('Built training model.') log.debug(json.dumps(model_summary, indent=4), verbose=0) print_model_architecture(model_summary) # Prepare the loss function train_loss, _ = loss_utils.loss_interpreter( logits=train_scores, labels=train_labels, loss_type=config.loss_function, dataset_module=dataset_module) # Add weight decay if requested if len(model.regularizations) > 0: train_loss = loss_utils.wd_loss( regularizations=model.regularizations, loss=train_loss, wd_penalty=config.regularization_strength) train_op = loss_utils.optimizer_interpreter( loss=train_loss, lr=config.lr, optimizer=config.optimizer, constraints=config.optimizer_constraints, model=model) log.info('Built training loss function.') train_accuracy = eval_metrics.metric_interpreter( metric=dataset_module.score_metric, pred=train_scores, labels=train_labels) # training accuracy if int(train_images.get_shape()[-1]) <= 3: tf.summary.image('train images', train_images) tf.summary.scalar('training loss', train_loss) tf.summary.scalar('training accuracy', train_accuracy) log.info('Added training summaries.') # Validation model scope.reuse_variables() val_model = model_utils.model_class( mean=val_means, training=True, output_size=dataset_module.output_size) val_scores, _ = val_model.build( # Ignore summary data=val_images, layer_structure=model_dict.layer_structure, output_structure=output_structure, log=log, tower_name='cnn') log.info('Built validation model.') val_loss, _ = loss_utils.loss_interpreter( logits=val_scores, labels=val_labels, loss_type=config.loss_function, dataset_module=dataset_module) val_accuracy = eval_metrics.metric_interpreter( metric=dataset_module.score_metric, pred=val_scores, labels=val_labels) # training accuracy if int(train_images.get_shape()[-1]) <= 3: tf.summary.image('val images', val_images) tf.summary.scalar('validation loss', val_loss) tf.summary.scalar('validation accuracy', val_accuracy) log.info('Added validation summaries.') # Set up summaries and saver saver = tf.train.Saver(tf.global_variables()) summary_op = tf.summary.merge_all() # Initialize the graph sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) # Need to initialize both of these if supplying num_epochs to inputs sess.run( tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())) summary_writer = tf.summary.FileWriter(dir_list['summaries'], sess.graph) # Set up exemplar threading coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) # Create dictionaries of important training and validation information train_dict = { 'train_loss': train_loss, 'train_accuracy': train_accuracy, 'train_images': train_images, 'train_labels': train_labels, 'train_op': train_op, 'train_scores': train_scores } val_dict = { 'val_loss': val_loss, 'val_accuracy': val_accuracy, 'val_images': val_images, 'val_labels': val_labels, 'val_scores': val_scores, } # Start training loop np.save( os.path.join(dir_list['condition_evaluations'], 'training_config_file'), config) log.info('Starting training') output_dict = training.training_loop( config=config, db=db, coord=coord, sess=sess, summary_op=summary_op, summary_writer=summary_writer, saver=saver, threads=threads, summary_dir=dir_list['summaries'], checkpoint_dir=dir_list['checkpoints'], weight_dir=dir_list['weights'], train_dict=train_dict, val_dict=val_dict, train_model=model, val_model=val_model, exp_params=exp_params) log.info('Finished training.') model_name = config.model_struct.replace('/', '_') py_utils.save_npys(data=output_dict, model_name=model_name, output_string=dir_list['experiment_evaluations'])
def main(experiment_name, list_experiments=False, load_and_evaluate_ckpt=None, config_file=None, ckpt_file=None, gpu_device='/gpu:0'): """Create a tensorflow worker to run experiments in your DB.""" if list_experiments: exps = db.list_experiments() print '_' * 30 print 'Initialized experiments:' print '_' * 30 for l in exps: print l.values()[0] print '_' * 30 if len(exps) == 0: print 'No experiments found.' else: print 'You can add to the DB with: '\ 'python prepare_experiments.py --experiment=%s' % \ exps[0].values()[0] return if experiment_name is None: print 'No experiment specified. Pulling one out of the DB.' experiment_name = db.get_experiment_name() # Prepare to run the model config = Config() condition_label = '%s_%s' % (experiment_name, py_utils.get_dt_stamp()) experiment_label = '%s' % (experiment_name) log = logger.get(os.path.join(config.log_dir, condition_label)) experiment_dict = experiments.experiments()[experiment_name]() config = add_to_config(d=experiment_dict, config=config) # Globals config.load_and_evaluate_ckpt = load_and_evaluate_ckpt config, exp_params = process_DB_exps( experiment_name=experiment_name, log=log, config=config) # Update config w/ DB params config = np.load(config_file).item() dataset_module = py_utils.import_module(model_dir=config.dataset_info, dataset=config.dataset) dataset_module = dataset_module.data_processing() # hardcoded class name train_data, train_means_image, train_means_label = get_data_pointers( dataset=config.dataset, base_dir=config.tf_records, cv=dataset_module.folds.keys()[1], # TODO: SEARCH FOR INDEX. log=log) val_data, val_means_image, val_means_label = get_data_pointers( dataset=config.dataset, base_dir=config.tf_records, cv=dataset_module.folds.keys()[0], log=log) # Initialize output folders dir_list = { 'checkpoints': os.path.join(config.checkpoints, condition_label), 'summaries': os.path.join(config.summaries, condition_label), 'condition_evaluations': os.path.join(config.condition_evaluations, condition_label), 'experiment_evaluations': os.path.join( # DEPRECIATED config.experiment_evaluations, experiment_label), 'visualization': os.path.join(config.visualizations, condition_label), 'weights': os.path.join(config.condition_evaluations, condition_label, 'weights') } [py_utils.make_dir(v) for v in dir_list.values()] # Prepare data loaders on the cpu if all(isinstance(i, list) for i in config.data_augmentations): if config.data_augmentations: config.data_augmentations = py_utils.flatten_list( config.data_augmentations, log) config.epochs = 1 config.shuffle = False with tf.device('/cpu:0'): train_images, train_labels = data_loader.inputs( dataset=train_data, batch_size=config.batch_size, model_input_image_size=dataset_module.model_input_image_size, tf_dict=dataset_module.tf_dict, data_augmentations=config.data_augmentations, num_epochs=config.epochs, tf_reader_settings=dataset_module.tf_reader, shuffle=config.shuffle_train, resize_output=config.resize_output) if hasattr(config, 'val_augmentations'): val_augmentations = config.val_augmentations else: val_augmentations = config.data_augmentations val_images, val_labels = data_loader.inputs( dataset=val_data, batch_size=config.batch_size, model_input_image_size=dataset_module.model_input_image_size, tf_dict=dataset_module.tf_dict, data_augmentations=['resize_and_crop'], num_epochs=config.epochs, tf_reader_settings=dataset_module.tf_reader, shuffle=config.shuffle_val, resize_output=config.resize_output) log.info('Created tfrecord dataloader tensors.') # Load model specification struct_name = config.model_struct.split(os.path.sep)[-1] try: model_dict = py_utils.import_module( dataset=struct_name, model_dir=os.path.join('models', 'structs', experiment_name).replace(os.path.sep, '.')) except IOError: print 'Could not find the model structure: %s in folder %s' % ( struct_name, experiment_name) # Inject model_dict with hyperparameters if requested model_dict.layer_structure = hp_opt_utils.inject_model_with_hps( layer_structure=model_dict.layer_structure, exp_params=exp_params) # Prepare model on GPU with tf.device(gpu_device): with tf.variable_scope('cnn') as scope: # Normalize labels if needed if 'normalize_labels' in exp_params.keys(): if exp_params['normalize_labels'] == 'zscore': train_labels -= train_means_label['mean'] train_labels /= train_means_label['std'] log.info('Z-scoring labels.') elif exp_params['normalize_labels'] == 'mean': train_labels -= train_means_label['mean'] log.info('Mean-centering labels.') # Training model if len(dataset_module.output_size) == 2: log.warning('Found > 1 dimension for your output size.' 'Converting to a scalar.') dataset_module.output_size = np.prod( dataset_module.output_size) if hasattr(model_dict, 'output_structure'): # Use specified output layer output_structure = model_dict.output_structure else: output_structure = None model = model_utils.model_class( mean=train_means_image, training=True, output_size=dataset_module.output_size) train_scores, model_summary = model.build( data=train_images, layer_structure=model_dict.layer_structure, output_structure=output_structure, log=log, tower_name='cnn') eval_graph = tf.Graph() with eval_graph.as_default(): with eval_graph.gradient_override_map({'selu': 'GradLRP'}): train_grad_images = tf.gradients( train_scores[0] * tf.cast(train_labels, tf.float32), train_images)[0] log.info('Built training model.') log.debug(json.dumps(model_summary, indent=4), verbose=0) print_model_architecture(model_summary) # Check the shapes of labels and scores if not isinstance(train_scores, list): if len(train_scores.get_shape()) != len( train_labels.get_shape()): train_shape = train_scores.get_shape().as_list() label_shape = train_labels.get_shape().as_list() if len(train_shape) == 2 and len( label_shape) == 1 and train_shape[-1] == 1: train_labels = tf.expand_dims(train_labels, axis=-1) elif len(train_shape) == 2 and len( label_shape) == 1 and train_shape[-1] == 1: train_scores = tf.expand_dims(train_scores, axis=-1) # Prepare the loss function train_loss, _ = loss_utils.loss_interpreter( logits=train_scores, # TODO labels=train_labels, loss_type=config.loss_function, weights=config.loss_weights, dataset_module=dataset_module) # Add loss tensorboard tracking if isinstance(train_loss, list): for lidx, tl in enumerate(train_loss): tf.summary.scalar('training_loss_%s' % lidx, tl) train_loss = tf.add_n(train_loss) else: tf.summary.scalar('training_loss', train_loss) # Add weight decay if requested if len(model.regularizations) > 0: train_loss = loss_utils.wd_loss( regularizations=model.regularizations, loss=train_loss, wd_penalty=config.regularization_strength) train_op = loss_utils.optimizer_interpreter( loss=train_loss, lr=config.lr, optimizer=config.optimizer, constraints=config.optimizer_constraints, model=model) log.info('Built training loss function.') # Add a score for the training set train_accuracy = eval_metrics.metric_interpreter( metric=dataset_module.score_metric, # TODO: Attach to exp cnfg pred=train_scores, # TODO labels=train_labels) # Add aux scores if requested train_aux = {} if hasattr(dataset_module, 'aux_scores'): for m in dataset_module.aux_scores: train_aux[m] = eval_metrics.metric_interpreter( metric=m, pred=train_scores, labels=train_labels)[0] # TODO: Fix for multiloss # Prepare remaining tensorboard summaries if len(train_images.get_shape()) == 4: tf_fun.image_summaries(train_images, tag='Training images') if len(train_labels.get_shape()) > 2: tf_fun.image_summaries(train_labels, tag='Training_targets') tf_fun.image_summaries(train_scores, tag='Training_predictions') if isinstance(train_accuracy, list): for tidx, ta in enumerate(train_accuracy): tf.summary.scalar('training_accuracy_%s' % tidx, ta) else: tf.summary.scalar('training_accuracy', train_accuracy) if config.pr_curve: if isinstance(train_scores, list): for pidx, train_score in enumerate(train_scores): train_label = train_labels[:, pidx] pr_summary.op( tag='training_pr_%s' % pidx, predictions=tf.cast( tf.argmax(train_score, axis=-1), tf.float32), labels=tf.cast(train_label, tf.bool), display_name='training_precision_recall_%s' % pidx) else: pr_summary.op(tag='training_pr', predictions=tf.cast( tf.argmax(train_scores, axis=-1), tf.float32), labels=tf.cast(train_labels, tf.bool), display_name='training_precision_recall') log.info('Added training summaries.') # Validation model scope.reuse_variables() val_model = model_utils.model_class( mean=train_means_image, # Normalize with train data training=False, # False, output_size=dataset_module.output_size) val_scores, _ = val_model.build( # Ignore summary data=val_images, layer_structure=model_dict.layer_structure, output_structure=output_structure, log=log, tower_name='cnn') eval_graph = tf.Graph() with eval_graph.as_default(): with eval_graph.gradient_override_map({'selu': 'GradLRP'}): val_grad_images = tf.gradients( val_scores[0] * tf.cast(val_labels, tf.float32), val_images)[0] log.info('Built validation model.') # Check the shapes of labels and scores if not isinstance(train_scores, list): if len(val_scores.get_shape()) != len(val_labels.get_shape()): val_shape = val_scores.get_shape().as_list() val_label_shape = val_labels.get_shape().as_list() if len(val_shape) == 2 and len( val_label_shape) == 1 and val_shape[-1] == 1: val_labels = tf.expand_dims(val_labels, axis=-1) if len(val_shape) == 2 and len( val_label_shape) == 1 and val_shape[-1] == 1: val_scores = tf.expand_dims(val_scores, axis=-1) val_loss, _ = loss_utils.loss_interpreter( logits=val_scores, labels=val_labels, loss_type=config.loss_function, weights=config.loss_weights, dataset_module=dataset_module) # Add loss tensorboard tracking if isinstance(val_loss, list): for lidx, tl in enumerate(val_loss): tf.summary.scalar('validation_loss_%s' % lidx, tl) val_loss = tf.add_n(val_loss) else: tf.summary.scalar('validation_loss', val_loss) # Add a score for the validation set val_accuracy = eval_metrics.metric_interpreter( metric=dataset_module.score_metric, # TODO pred=val_scores, labels=val_labels) # Add aux scores if requested val_aux = {} if hasattr(dataset_module, 'aux_scores'): for m in dataset_module.aux_scores: val_aux[m] = eval_metrics.metric_interpreter( metric=m, pred=val_scores, labels=val_labels)[0] # TODO: Fix for multiloss # Prepare tensorboard summaries if len(val_images.get_shape()) == 4: tf_fun.image_summaries(val_images, tag='Validation') if len(val_labels.get_shape()) > 2: tf_fun.image_summaries(val_labels, tag='Validation_targets') tf_fun.image_summaries(val_scores, tag='Validation_predictions') if isinstance(val_accuracy, list): for vidx, va in enumerate(val_accuracy): tf.summary.scalar('validation_accuracy_%s' % vidx, va) else: tf.summary.scalar('validation_accuracy', val_accuracy) if config.pr_curve: if isinstance(val_scores, list): for pidx, val_score in enumerate(val_scores): val_label = val_labels[:, pidx] pr_summary.op( tag='validation_pr_%s' % pidx, predictions=tf.cast(tf.argmax(val_score, axis=-1), tf.float32), labels=tf.cast(val_label, tf.bool), display_name='validation_precision_recall_%s' % pidx) else: pr_summary.op(tag='validation_pr', predictions=tf.cast( tf.argmax(val_scores, axis=-1), tf.float32), labels=tf.cast(val_labels, tf.bool), display_name='validation_precision_recall') log.info('Added validation summaries.') # Set up summaries and saver saver = tf.train.Saver(tf.global_variables()) # Initialize the graph sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) # Need to initialize both of these if supplying num_epochs to inputs sess.run( tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())) # Set up exemplar threading coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) # Create dictionaries of important training and validation information train_dict = { 'train_loss': train_loss, 'train_images': train_images, 'train_labels': train_labels, 'train_op': train_op, 'train_scores': train_scores, 'train_grad_images': train_grad_images } val_dict = { 'val_loss': val_loss, 'val_images': val_images, 'val_labels': val_labels, 'val_scores': val_scores, 'val_grad_images': val_grad_images } if isinstance(train_accuracy, list): for tidx, (ta, va) in enumerate(zip(train_accuracy, val_accuracy)): train_dict['train_accuracy_%s' % tidx] = ta val_dict['val_accuracy_%s' % tidx] = va else: train_dict['train_accuracy_0'] = train_accuracy val_dict['val_accuracy_0'] = val_accuracy if load_and_evaluate_ckpt is not None: # Remove the train operation and add a ckpt pointer del train_dict['train_op'] if hasattr(dataset_module, 'aux_score'): # Attach auxillary scores to tensor dicts for m in dataset_module.aux_scores: train_dict['train_aux_%s' % m] = train_aux[m] val_dict['val_aux_%s' % m] = val_aux[m] # Start training loop checkpoint_dir = dir_list['checkpoints'] step = 0 train_losses, train_accs, train_aux, timesteps = {}, {}, {}, {} val_scores, val_aux, val_labels, val_grads = {}, {}, {}, {} train_images, val_images = {}, {} train_scores, train_labels = {}, {} train_aux_check = np.any(['aux_score' in k for k in train_dict.keys()]) val_aux_check = np.any(['aux_score' in k for k in val_dict.keys()]) # Restore model saver.restore(sess, ckpt_file) # Start evaluation try: while not coord.should_stop(): start_time = time.time() train_vars = sess.run(train_dict.values()) it_train_dict = { k: v for k, v in zip(train_dict.keys(), train_vars) } duration = time.time() - start_time train_losses[step] = it_train_dict['train_loss'] train_accs[step] = it_train_dict['train_accuracy_0'] train_images[step] = it_train_dict['train_images'] train_labels[step] = it_train_dict['train_labels'] train_scores[step] = it_train_dict['train_scores'] timesteps[step] = duration if train_aux_check: # Loop through to find aux scores it_train_aux = { itk: itv for itk, itv in it_train_dict.iteritems() if 'aux_score' in itk } train_aux[step] = it_train_aux assert not np.isnan(it_train_dict['train_loss']).any( ), 'Model diverged with loss = NaN' if step % config.validation_iters == 0: it_val_scores, it_val_labels, it_val_aux, it_val_grads, it_val_ims = [], [], [], [], [] for num_vals in range(config.num_validation_evals): # Validation accuracy as the average of n batches val_vars = sess.run(val_dict.values()) it_val_dict = { k: v for k, v in zip(val_dict.keys(), val_vars) } it_val_labels += [it_val_dict['val_labels']] it_val_scores += [it_val_dict['val_scores']] it_val_grads += [it_val_dict['val_grad_images']] it_val_ims += [it_val_dict['val_images']] if val_aux_check: iva = { itk: itv for itk, itv in it_val_dict.iteritems() if 'aux_score' in itk } it_val_aux += [iva] val_scores[step] = it_val_scores val_labels[step] = it_val_labels val_aux[step] = it_val_aux val_images[step] = it_val_grads val_grads[step] = it_val_ims # End iteration step += 1 except tf.errors.OutOfRangeError: print 'Done with evaluation for %d epochs, %d steps.' % (config.epochs, step) print 'Saved to: %s' % checkpoint_dir finally: coord.request_stop() coord.join(threads) sess.close() import ipdb ipdb.set_trace() np.savez( 'val_imgs_grads', val_images=val_images, # it_val_dict['val_images'], val_grads=val_grads, # it_val_dict['val_grad_images'], val_labels=val_labels, # it_val_dict['val_labels'], val_scores=val_scores) # it_val_dict['val_scores'][0])
def main( experiment_name, list_experiments=False, load_and_evaluate_ckpt=None, placeholder_data=None, grad_images=False, gpu_device='/gpu:0'): """Create a tensorflow worker to run experiments in your DB.""" if list_experiments: exps = db.list_experiments() print '_' * 30 print 'Initialized experiments:' print '_' * 30 for l in exps: print l.values()[0] print '_' * 30 if len(exps) == 0: print 'No experiments found.' else: print 'You can add to the DB with: '\ 'python prepare_experiments.py --experiment=%s' % \ exps[0].values()[0] return if experiment_name is None: print 'No experiment specified. Pulling one out of the DB.' experiment_name = db.get_experiment_name() # Prepare to run the model config = Config() condition_label = '%s_%s' % (experiment_name, py_utils.get_dt_stamp()) experiment_label = '%s' % (experiment_name) log = logger.get(os.path.join(config.log_dir, condition_label)) assert experiment_name is not None, 'Empty experiment name.' experiment_dict = experiments.experiments()[experiment_name]() config = add_to_config(d=experiment_dict, config=config) # Globals config.load_and_evaluate_ckpt = load_and_evaluate_ckpt if load_and_evaluate_ckpt is not None: # Remove the train operation and add a ckpt pointer from ops import evaluation config, exp_params = process_DB_exps( experiment_name=experiment_name, log=log, config=config) # Update config w/ DB params dataset_module = py_utils.import_module( model_dir=config.dataset_info, dataset=config.dataset) dataset_module = dataset_module.data_processing() # hardcoded class name train_key = [k for k in dataset_module.folds.keys() if 'train' in k] if not len(train_key): train_key = 'train' else: train_key = train_key[0] train_data, train_means_image, train_means_label = get_data_pointers( dataset=config.dataset, base_dir=config.tf_records, cv=train_key, log=log) val_key = [k for k in dataset_module.folds.keys() if 'val' in k] if not len(val_key): val_key = 'train' else: val_key = val_key[0] val_data, val_means_image, val_means_label = get_data_pointers( dataset=config.dataset, base_dir=config.tf_records, cv=val_key, log=log) # Initialize output folders dir_list = { 'checkpoints': os.path.join( config.checkpoints, condition_label), 'summaries': os.path.join( config.summaries, condition_label), 'condition_evaluations': os.path.join( config.condition_evaluations, condition_label), 'experiment_evaluations': os.path.join( # DEPRECIATED config.experiment_evaluations, experiment_label), 'visualization': os.path.join( config.visualizations, condition_label), 'weights': os.path.join( config.condition_evaluations, condition_label, 'weights') } [py_utils.make_dir(v) for v in dir_list.values()] # Prepare data loaders on the cpu if all(isinstance(i, list) for i in config.data_augmentations): if config.data_augmentations: config.data_augmentations = py_utils.flatten_list( config.data_augmentations, log) if load_and_evaluate_ckpt is not None: config.epochs = 1 config.train_shuffle = False config.val_shuffle = False with tf.device('/cpu:0'): if placeholder_data: placeholder_shape = placeholder_data['train_image_shape'] placeholder_dtype = placeholder_data['train_image_dtype'] original_train_images = tf.placeholder( dtype=placeholder_dtype, shape=placeholder_shape, name='train_images') placeholder_shape = placeholder_data['train_label_shape'] placeholder_dtype = placeholder_data['train_label_dtype'] original_train_labels = tf.placeholder( dtype=placeholder_dtype, shape=placeholder_shape, name='train_labels') placeholder_shape = placeholder_data['val_image_shape'] placeholder_dtype = placeholder_data['val_image_dtype'] original_val_images = tf.placeholder( dtype=placeholder_dtype, shape=placeholder_shape, name='val_images') placeholder_shape = placeholder_data['val_label_shape'] placeholder_dtype = placeholder_data['val_label_dtype'] original_val_labels = tf.placeholder( dtype=placeholder_dtype, shape=placeholder_shape, name='val_labels') # Apply augmentations ( train_images, train_labels ) = data_loader.placeholder_image_augmentations( images=original_train_images, model_input_image_size=dataset_module.model_input_image_size, labels=original_train_labels, data_augmentations=config.data_augmentations, batch_size=config.batch_size) ( val_images, val_labels ) = data_loader.placeholder_image_augmentations( images=original_val_images, model_input_image_size=dataset_module.model_input_image_size, labels=original_val_labels, data_augmentations=config.data_augmentations, batch_size=config.batch_size) # Store in the placeholder dict placeholder_data['train_images'] = original_train_images placeholder_data['train_labels'] = original_train_labels placeholder_data['val_images'] = original_val_images placeholder_data['val_labels'] = original_val_labels else: train_images, train_labels = data_loader.inputs( dataset=train_data, batch_size=config.batch_size, model_input_image_size=dataset_module.model_input_image_size, tf_dict=dataset_module.tf_dict, data_augmentations=config.data_augmentations, num_epochs=config.epochs, tf_reader_settings=dataset_module.tf_reader, shuffle=config.shuffle_train, resize_output=config.resize_output) if hasattr(config, 'val_augmentations'): val_augmentations = config.val_augmentations else: val_augmentations = config.data_augmentations val_images, val_labels = data_loader.inputs( dataset=val_data, batch_size=config.batch_size, model_input_image_size=dataset_module.model_input_image_size, tf_dict=dataset_module.tf_dict, data_augmentations=val_augmentations, num_epochs=config.epochs, tf_reader_settings=dataset_module.tf_reader, shuffle=config.shuffle_val, resize_output=config.resize_output) log.info('Created tfrecord dataloader tensors.') # Load model specification struct_name = config.model_struct.split(os.path.sep)[-1] try: model_dict = py_utils.import_module( dataset=struct_name, model_dir=os.path.join( 'models', 'structs', experiment_name).replace(os.path.sep, '.') ) except IOError: print 'Could not find the model structure: %s in folder %s' % ( struct_name, experiment_name) # Inject model_dict with hyperparameters if requested model_dict.layer_structure = hp_opt_utils.inject_model_with_hps( layer_structure=model_dict.layer_structure, exp_params=exp_params) # Prepare variables for the models if len(dataset_module.output_size) == 2: log.warning( 'Found > 1 dimension for your output size.' 'Converting to a scalar.') dataset_module.output_size = np.prod( dataset_module.output_size) if hasattr(model_dict, 'output_structure'): # Use specified output layer output_structure = model_dict.output_structure else: output_structure = None # Correct number of output neurons if needed if config.dataloader_override and\ 'weights' in output_structure[-1].keys(): output_neurons = output_structure[-1]['weights'][0] size_check = output_neurons != dataset_module.output_size fc_check = output_structure[-1]['layers'][0] == 'fc' if size_check and fc_check: output_structure[-1]['weights'][0] = dataset_module.output_size log.warning('Adjusted output neurons from %s to %s.' % ( output_neurons, dataset_module.output_size)) # Prepare model on GPU if not hasattr(dataset_module, 'input_normalization'): dataset_module.input_normalization = None with tf.device(gpu_device): with tf.variable_scope('cnn') as scope: # Training model model = model_utils.model_class( mean=train_means_image, training=True, output_size=dataset_module.output_size, input_normalization=dataset_module.input_normalization) train_scores, model_summary, _ = model.build( data=train_images, layer_structure=model_dict.layer_structure, output_structure=output_structure, log=log, tower_name='cnn') if grad_images: oh_dims = int(train_scores.get_shape()[-1]) target_scores = tf.one_hot(train_labels, oh_dims) * train_scores train_gradients = tf.gradients(target_scores, train_images)[0] log.info('Built training model.') log.debug( json.dumps(model_summary, indent=4), verbose=0) print_model_architecture(model_summary) # Normalize labels on GPU if needed if 'normalize_labels' in exp_params.keys(): if exp_params['normalize_labels'] == 'zscore': train_labels -= train_means_label['mean'] train_labels /= train_means_label['std'] val_labels -= train_means_label['mean'] val_labels /= train_means_label['std'] log.info('Z-scoring labels.') elif exp_params['normalize_labels'] == 'mean': train_labels -= train_means_label['mean'] val_labels -= val_means_label['mean'] log.info('Mean-centering labels.') # Check the shapes of labels and scores if not isinstance(train_scores, list): if len( train_scores.get_shape()) != len( train_labels.get_shape()): train_shape = train_scores.get_shape().as_list() label_shape = train_labels.get_shape().as_list() val_shape = val_scores.get_shape().as_list() val_label_shape = val_labels.get_shape().as_list() if len( train_shape) == 2 and len( label_shape) == 1 and train_shape[-1] == 1: train_labels = tf.expand_dims(train_labels, axis=-1) val_labels = tf.expand_dims(val_labels, axis=-1) elif len( train_shape) == 2 and len( label_shape) == 1 and train_shape[-1] == 1: train_scores = tf.expand_dims(train_scores, axis=-1) val_scores = tf.expand_dims(val_scores, axis=-1) # Prepare the loss function train_loss, _ = loss_utils.loss_interpreter( logits=train_scores, # TODO labels=train_labels, loss_type=config.loss_function, weights=config.loss_weights, dataset_module=dataset_module) # Add loss tensorboard tracking if isinstance(train_loss, list): for lidx, tl in enumerate(train_loss): tf.summary.scalar('training_loss_%s' % lidx, tl) train_loss = tf.add_n(train_loss) else: tf.summary.scalar('training_loss', train_loss) # Add weight decay if requested if len(model.regularizations) > 0: train_loss = loss_utils.wd_loss( regularizations=model.regularizations, loss=train_loss, wd_penalty=config.regularization_strength) assert config.lr is not None, 'No learning rate.' # TODO: Make a QC function if config.lr > 1: old_lr = config.lr config.lr = loss_utils.create_lr_schedule( train_batch=config.batch_size, num_training=config.lr) config.optimizer = 'momentum' log.info('Forcing momentum classifier.') else: old_lr = None train_op = loss_utils.optimizer_interpreter( loss=train_loss, lr=config.lr, optimizer=config.optimizer, constraints=config.optimizer_constraints, model=model) log.info('Built training loss function.') # Add a score for the training set train_accuracy = eval_metrics.metric_interpreter( metric=dataset_module.score_metric, # TODO: Attach to exp cnfg pred=train_scores, # TODO labels=train_labels) # Add aux scores if requested train_aux = {} if hasattr(dataset_module, 'aux_scores'): for m in dataset_module.aux_scores: train_aux[m] = eval_metrics.metric_interpreter( metric=m, pred=train_scores, labels=train_labels) # [0] # TODO: Fix for multiloss # Prepare remaining tensorboard summaries if config.tensorboard_images: if len(train_images.get_shape()) == 4: tf_fun.image_summaries(train_images, tag='Training images') if (np.asarray( train_labels.get_shape().as_list()) > 1).sum() > 2: tf_fun.image_summaries( train_labels, tag='Training_targets') tf_fun.image_summaries( train_scores, tag='Training_predictions') if isinstance(train_accuracy, list): for tidx, ta in enumerate(train_accuracy): tf.summary.scalar('training_accuracy_%s' % tidx, ta) else: tf.summary.scalar('training_accuracy', train_accuracy) if config.pr_curve: if isinstance(train_scores, list): for pidx, train_score in enumerate(train_scores): train_label = train_labels[:, pidx] pr_summary.op( tag='training_pr_%s' % pidx, predictions=tf.cast( tf.argmax( train_score, axis=-1), tf.float32), labels=tf.cast(train_label, tf.bool), display_name='training_precision_recall_%s' % pidx) else: pr_summary.op( tag='training_pr', predictions=tf.cast( tf.argmax( train_scores, axis=-1), tf.float32), labels=tf.cast(train_labels, tf.bool), display_name='training_precision_recall') log.info('Added training summaries.') with tf.variable_scope('cnn', tf.AUTO_REUSE) as scope: # Validation model scope.reuse_variables() val_model = model_utils.model_class( mean=train_means_image, # Normalize with train data training=False, output_size=dataset_module.output_size, input_normalization=dataset_module.input_normalization) val_scores, _, _ = val_model.build( # Ignore summary data=val_images, layer_structure=model_dict.layer_structure, output_structure=output_structure, log=log, tower_name='cnn') if grad_images: oh_dims = int(val_scores.get_shape()[-1]) target_scores = tf.one_hot(val_labels, oh_dims) * val_scores val_gradients = tf.gradients(target_scores, val_images)[0] log.info('Built validation model.') # Check the shapes of labels and scores val_loss, _ = loss_utils.loss_interpreter( logits=val_scores, labels=val_labels, loss_type=config.loss_function, weights=config.loss_weights, dataset_module=dataset_module) # Add loss tensorboard tracking if isinstance(val_loss, list): for lidx, tl in enumerate(val_loss): tf.summary.scalar('validation_loss_%s' % lidx, tl) val_loss = tf.add_n(val_loss) else: tf.summary.scalar('validation_loss', val_loss) # Add a score for the validation set val_accuracy = eval_metrics.metric_interpreter( metric=dataset_module.score_metric, # TODO pred=val_scores, labels=val_labels) # Add aux scores if requested val_aux = {} if hasattr(dataset_module, 'aux_scores'): for m in dataset_module.aux_scores: val_aux[m] = eval_metrics.metric_interpreter( metric=m, pred=val_scores, labels=val_labels) # [0] # TODO: Fix for multiloss # Prepare tensorboard summaries if config.tensorboard_images: if len(val_images.get_shape()) == 4: tf_fun.image_summaries( val_images, tag='Validation') if (np.asarray( val_labels.get_shape().as_list()) > 1).sum() > 2: tf_fun.image_summaries( val_labels, tag='Validation_targets') tf_fun.image_summaries( val_scores, tag='Validation_predictions') if isinstance(val_accuracy, list): for vidx, va in enumerate(val_accuracy): tf.summary.scalar('validation_accuracy_%s' % vidx, va) else: tf.summary.scalar('validation_accuracy', val_accuracy) if config.pr_curve: if isinstance(val_scores, list): for pidx, val_score in enumerate(val_scores): val_label = val_labels[:, pidx] pr_summary.op( tag='validation_pr_%s' % pidx, predictions=tf.cast( tf.argmax( val_score, axis=-1), tf.float32), labels=tf.cast(val_label, tf.bool), display_name='validation_precision_recall_%s' % pidx) else: pr_summary.op( tag='validation_pr', predictions=tf.cast( tf.argmax( val_scores, axis=-1), tf.float32), labels=tf.cast(val_labels, tf.bool), display_name='validation_precision_recall') log.info('Added validation summaries.') # Set up summaries and saver if not hasattr(config, 'max_to_keep'): config.max_to_keep = None saver = tf.train.Saver( var_list=tf.global_variables(), max_to_keep=config.max_to_keep) summary_op = tf.summary.merge_all() # Initialize the graph sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) # Need to initialize both of these if supplying num_epochs to inputs sess.run( tf.group( tf.global_variables_initializer(), tf.local_variables_initializer()) ) summary_writer = tf.summary.FileWriter(dir_list['summaries'], sess.graph) # Set up exemplar threading if placeholder_data: coord, threads = None, None else: coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) # Create dictionaries of important training and validation information train_dict = { 'train_loss': train_loss, 'train_images': train_images, 'train_labels': train_labels, 'train_op': train_op, 'train_scores': train_scores } val_dict = { 'val_loss': val_loss, 'val_images': val_images, 'val_labels': val_labels, 'val_scores': val_scores, } if grad_images: train_dict['train_gradients'] = train_gradients val_dict['val_gradients'] = val_gradients if isinstance(train_accuracy, list): for tidx, (ta, va) in enumerate(zip(train_accuracy, val_accuracy)): train_dict['train_accuracy_%s' % tidx] = ta val_dict['val_accuracy_%s' % tidx] = va else: train_dict['train_accuracy_0'] = train_accuracy val_dict['val_accuracy_0'] = val_accuracy if load_and_evaluate_ckpt is not None: # Remove the train operation and add a ckpt pointer del train_dict['train_op'] if hasattr(dataset_module, 'aux_score'): # Attach auxillary scores to tensor dicts for m in dataset_module.aux_scores: train_dict['train_aux_%s' % m] = train_aux[m] val_dict['val_aux_%s' % m] = val_aux[m] # Start training loop if old_lr is not None: config.lr = old_lr np.save( os.path.join( dir_list['condition_evaluations'], 'training_config_file'), config) log.info('Starting training') if load_and_evaluate_ckpt is not None: return evaluation.evaluation_loop( config=config, db=db, coord=coord, sess=sess, summary_op=summary_op, summary_writer=summary_writer, saver=saver, threads=threads, summary_dir=dir_list['summaries'], checkpoint_dir=dir_list['checkpoints'], weight_dir=dir_list['weights'], train_dict=train_dict, val_dict=val_dict, train_model=model, val_model=val_model, exp_params=exp_params, placeholder_data=placeholder_data) else: output_dict = training.training_loop( config=config, db=db, coord=coord, sess=sess, summary_op=summary_op, summary_writer=summary_writer, saver=saver, threads=threads, summary_dir=dir_list['summaries'], checkpoint_dir=dir_list['checkpoints'], weight_dir=dir_list['weights'], train_dict=train_dict, val_dict=val_dict, train_model=model, val_model=val_model, exp_params=exp_params) log.info('Finished training.') model_name = config.model_struct.replace('/', '_') if output_dict is not None: py_utils.save_npys( data=output_dict, model_name=model_name, output_string=dir_list['experiment_evaluations'])
def plot_fits( experiment='760_cells_2017_11_04_16_29_09', query_db=False, num_models=3, template_exp='ALLEN_selected_cells_1', process_pnodes=False, allen_dir='/home/drew/Documents/Allen_Brain_Observatory', output_dir='tests/ALLEN_files', stimulus_type='tfrecord', top_n=1, grad='lrp', target_layer='conv1_1', # conv1_1, sep_conv1_1, dog1_1 target_model='conv2d'): # conv2d, sep_conv2d, dog """Plot fits across the RF. experiment: Name of Allen experiment you're plotting. query_db: Use data from DB versus data in Numpys. num_models: The number of architectures you're testing. template_exp: The name of the contextual_circuit model template used.""" sys.path.append(allen_dir) from allen_config import Allen_Brain_Observatory_Config if process_pnodes: from pnodes_declare_datasets_loop import query_hp_hist, sel_exp_query else: from declare_datasets_loop import query_hp_hist, sel_exp_query config = Config() main_config = Allen_Brain_Observatory_Config() db_config = credentials.postgresql_connection() files = glob( os.path.join( allen_dir, main_config.multi_exps, experiment, '*.npz')) assert len(files), 'Couldn\'t find files.' out_data, xs, ys = [], [], [] perfs, model_types, exps, arg_perf = [], [], [], [] count = 0 for f in files: data = np.load(f) d = { 'x': data['rf_data'].item()['on_center_x'], 'y': data['rf_data'].item()['on_center_y'], # x: files['dataset_method'].item()['x_min'], # y: files['dataset_method'].item()['y_min'], } exp_name = { 'experiment_name': data['dataset_method'].item()[ 'experiment_name']} if query_db: perf = query_hp_hist( exp_name['experiment_name'], db_config=db_config) if perf is None: print 'No fits for: %s' % exp_name['experiment_name'] else: raise NotImplementedError d['perf'] = perf d['max_val'] = np.max(perf) out_data += [d] xs += [np.round(d['x'])] ys += [np.round(d['y'])] perfs += [np.max(d['perf'])] count += 1 else: data_files = glob( os.path.join( main_config.ccbp_exp_evals, exp_name['experiment_name'], '*val_losses.npy')) # Scores has preds, labels has GT for gd in data_files: mt = gd.split( os.path.sep)[-1].split( template_exp + '_')[-1].split('_' + 'val')[0] it_data = np.load(gd).item() sinds = np.asarray(it_data.keys())[np.argsort(it_data.keys())] sit_data = [it_data[idx] for idx in sinds] d['perf'] = sit_data d['max_val'] = np.max(sit_data) d['max_idx'] = np.argmax(sit_data) d['mt'] = mt out_data += [d] xs += [np.round(d['x'])] ys += [np.round(d['y'])] perfs += [np.max(sit_data)] arg_perf += [np.argmax(sit_data)] exps += [gd.split(os.path.sep)[-2]] model_types += [mt] count += 1 # Package as a df xs = np.round(np.asarray(xs)).astype(int) ys = np.round(np.asarray(ys)).astype(int) perfs = np.asarray(perfs) arg_perf = np.asarray(arg_perf) exps = np.asarray(exps) model_types = np.asarray(model_types) # Filter to only keep top-scoring values at each x/y (dirty trick) fxs, fys, fperfs, fmodel_types, fexps, fargs = [], [], [], [], [], [] xys = np.vstack((xs, ys)).transpose() cxy = np.ascontiguousarray( # Unique rows xys).view( np.dtype((np.void, xys.dtype.itemsize * xys.shape[1]))) _, idx = np.unique(cxy, return_index=True) uxys = xys[idx] scores = [] for xy in uxys: sel_idx = (xys == xy).sum(axis=-1) == 2 sperfs = perfs[sel_idx] sexps = exps[sel_idx] sargs = arg_perf[sel_idx] sel_mts = model_types[sel_idx] # Only get top conv/sep spots sperfs = sperfs[sel_mts != 'dog'] sperfs = sperfs[sel_mts != 'DoG'] scores += [sperfs.mean() / sperfs.std()] best_fits = np.argmax(np.asarray(scores)) xs = np.asarray([uxys[best_fits][0]]) ys = np.asarray([uxys[best_fits][1]]) sel_idx = (xys == uxys[best_fits]).sum(axis=-1) == 2 perfs = np.asarray(perfs[sel_idx]) exps = np.asarray(exps[sel_idx]) model_types = np.asarray(model_types[sel_idx]) umt, model_types_inds = np.unique(model_types, return_inverse=True) # Get weights for the top-n fitting models of each type it_perfs = perfs[model_types == target_model] it_exps = exps[model_types == target_model] # it_args = arg_perf[model_types == target_model] sorted_perfs = np.argsort(it_perfs)[::-1][:top_n] for idx in sorted_perfs: perf = sel_exp_query( experiment_name=it_exps[idx], model=target_model, db_config=db_config) # perf_steps = np.argsort([v['training_step'] for v in perf])[::-1] perf_steps = [v['validation_loss'] for v in perf] max_score = np.max(perf_steps) arg_perf_steps = np.argmax(perf_steps) sel_model = perf[arg_perf_steps] # perf_steps[it_args[idx]]] print 'Using %s' % sel_model model_file = sel_model['ckpt_file'].split('.')[0] model_ckpt = '%s.ckpt-%s' % ( model_file, model_file.split(os.path.sep)[-1].split('_')[-1]) model_meta = '%s.meta' % model_ckpt # Pull stimuli stim_dir = os.path.join( main_config.tf_record_output, sel_model['experiment_name']) stim_files = glob(stim_dir + '*') stim_meta_file = [x for x in stim_files if 'meta' in x][0] # stim_val_data = [x for x in stim_files if 'val.tfrecords' in x][0] stim_val_data = [x for x in stim_files if 'train.tfrecords' in x][0] stim_val_mean = [x for x in stim_files if 'train_means' in x][0] assert stim_meta_file is not None assert stim_val_data is not None assert stim_val_mean is not None stim_meta_data = np.load(stim_meta_file).item() rf_stim_meta_data = stim_meta_data['rf_data'] stim_mean_data = np.load( stim_val_mean).items()[0][1].item()['image']['mean'] # Store sparse noise for reference sparse_rf_on = { 'center_x': rf_stim_meta_data.get('on_center_x', None), 'center_y': rf_stim_meta_data.get('on_center_y', None), 'width_x': rf_stim_meta_data.get('on_width_x', None), 'width_y': rf_stim_meta_data.get('on_width_y', None), 'distance': rf_stim_meta_data.get('on_distance', None), 'area': rf_stim_meta_data.get('on_area', None), 'rotation': rf_stim_meta_data.get('on_rotation', None), } sparse_rf_off = { 'center_x': rf_stim_meta_data.get('off_center_x', None), 'center_y': rf_stim_meta_data.get('off_center_y', None), 'width_x': rf_stim_meta_data.get('off_width_x', None), 'width_y': rf_stim_meta_data.get('off_width_y', None), 'distance': rf_stim_meta_data.get('off_distance', None), 'area': rf_stim_meta_data.get('off_area', None), 'rotation': rf_stim_meta_data.get('off_rotation', None), } sparse_rf = {'on': sparse_rf_on, 'off': sparse_rf_off} # Pull responses dataset_module = py_utils.import_module( model_dir=config.dataset_info, dataset=sel_model['experiment_name']) dataset_module = dataset_module.data_processing() with tf.device('/cpu:0'): if stimulus_type == 'sparse_noise': pass elif stimulus_type == 'drifting_grating': pass elif stimulus_type == 'tfrecord': val_images, val_labels = data_loader.inputs( dataset=stim_val_data, batch_size=1, model_input_image_size=dataset_module.model_input_image_size, tf_dict=dataset_module.tf_dict, data_augmentations=[None], # dataset_module.preprocess, num_epochs=1, tf_reader_settings=dataset_module.tf_reader, shuffle=False ) # Mean normalize log = logger.get(os.path.join(output_dir, 'sta_logs', target_model)) data_dir = os.path.join(output_dir, 'data', target_model) py_utils.make_dir(data_dir) sys.path.append(os.path.join('models', 'structs', sel_model['experiment_name'])) model_dict = __import__(target_model) if hasattr(model_dict, 'output_structure'): # Use specified output layer output_structure = model_dict.output_structure else: output_structure = None model = model_utils.model_class( mean=stim_mean_data, training=True, # FIXME output_size=dataset_module.output_size) with tf.device('/gpu:0'): with tf.variable_scope('cnn') as scope: val_scores, model_summary = model.build( data=val_images, layer_structure=model_dict.layer_structure, output_structure=output_structure, log=log, tower_name='cnn') if grad == 'vanilla': grad_image = tf.gradients(model.output, val_images)[0] elif grad == 'lrp': eval_graph = tf.Graph() with eval_graph.as_default(): with eval_graph.gradient_override_map( {'Relu': 'GradLRP'}): grad_image = tf.gradients(model.output, val_images)[0] elif grad == 'cam': eval_graph = tf.Graph() with eval_graph.as_default(): with eval_graph.gradient_override_map( {'Relu': 'GuidedRelu'}): grad_image = tf.gradients(model.output, val_images)[0] else: raise NotImplementedError print(json.dumps(model_summary, indent=4)) # Set up summaries and saver saver = tf.train.Saver(tf.global_variables()) summary_op = tf.summary.merge_all() # Initialize the graph sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) # Need to initialize both of these if supplying num_epochs to inputs sess.run( tf.group( tf.global_variables_initializer(), tf.local_variables_initializer()) ) saver.restore(sess, model_ckpt) # Set up exemplar threading coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) if target_model == 'conv2d': fname = [ x for x in tf.global_variables() if 'conv1_1_filters:0' in x.name] elif target_model == 'sep_conv2d': fname = [ x for x in tf.global_variables() if 'sep_conv1_1_filters:0' in x.name] elif target_model == 'dog' or target_model == 'DoG': fname = [ x for x in tf.global_variables() if 'dog1_1_filters:0' in x.name] else: raise NotImplementedError val_tensors = { 'images': val_images, 'labels': val_labels, 'filts': fname, 'responses': model.output, # model[target_layer], 'grads': grad_image } all_images, all_preds, all_grads, all_responses = [], [], [], [] step = 0 try: while not coord.should_stop(): val_vals = sess.run(val_tensors.values()) val_dict = {k: v for k, v in zip(val_tensors.keys(), val_vals)} all_images += [val_dict['images']] all_responses += [val_dict['responses']] all_preds += [val_dict['labels'].squeeze()] all_grads += [val_dict['grads'].squeeze()] print 'Finished step %s' % step step += 1 except: print 'Finished tfrecords' finally: coord.request_stop() coord.join(threads) sess.close() # Process and save data # if target_model != 'dog': # filters = val_dict['filts'][0].squeeze().transpose(2, 0, 1) all_images = np.concatenate(all_images).squeeze() all_grads = np.asarray(all_grads) all_preds = np.asarray(all_preds).reshape(-1, 1) all_responses = np.asarray(all_responses).squeeze() np.savez( os.path.join(data_dir, 'data'), images=all_images, pred=all_preds, # filters=filters, grads=all_grads) # if target_model != 'dog': # save_mosaic( # maps=filters, # [0].squeeze().transpose(2, 0, 1), # output=os.path.join(data_dir, '%s_filters' % target_layer), # rc=8, # cc=4, # title='%s filters' % ( # target_layer)) print 'Complete.'
def plot_fits( experiment='760_cells_2017_11_04_16_29_09', query_db=False, template_exp='ALLEN_selected_cells_1', process_pnodes=False, allen_dir='/home/drew/Documents/Allen_Brain_Observatory', output_dir='tests/ALLEN_files', stimulus_dir='/media/data_cifs/AllenData/DataForTrain/all_stimulus_template', stimulus_type='tfrecord', top_n=100, recalc=False, preload_stim=False, target_layer='conv1_1', # conv1_1, sep_conv1_1, dog1_1 target_model='conv2d'): # conv2d, sep_conv2d, dog """Plot fits across the RF. experiment: Name of Allen experiment you're plotting. query_db: Use data from DB versus data in Numpys. num_models: The number of architectures you're testing. template_exp: The name of the contextual_circuit model template used.""" sys.path.append(allen_dir) from allen_config import Allen_Brain_Observatory_Config if process_pnodes: from pnodes_declare_datasets_loop import query_hp_hist, sel_exp_query else: from declare_datasets_loop import query_hp_hist, sel_exp_query config = Config() main_config = Allen_Brain_Observatory_Config() db_config = credentials.postgresql_connection() files = glob( os.path.join(allen_dir, main_config.multi_exps, experiment, '*.npz')) assert len(files), 'Couldn\'t find files.' out_data, xs, ys = [], [], [] perfs, model_types, exps, arg_perf = [], [], [], [] count = 0 for f in files: data = np.load(f) d = { 'x': data['rf_data'].item()['on_center_x'], 'y': data['rf_data'].item()['on_center_y'], # x: files['dataset_method'].item()['x_min'], # y: files['dataset_method'].item()['y_min'], } exp_name = { 'experiment_name': data['dataset_method'].item()['experiment_name'] } if query_db: perf = query_hp_hist(exp_name['experiment_name'], db_config=db_config) if perf is None: print 'No fits for: %s' % exp_name['experiment_name'] else: raise NotImplementedError d['perf'] = perf d['max_val'] = np.max(perf) out_data += [d] xs += [np.round(d['x'])] ys += [np.round(d['y'])] perfs += [np.max(d['perf'])] count += 1 else: data_files = glob( os.path.join( main_config.ccbp_exp_evals, exp_name['experiment_name'], '*val_losses.npy')) # Scores has preds, labels has GT score_files = glob( os.path.join( main_config.ccbp_exp_evals, exp_name['experiment_name'], '*val_scores.npy')) # Scores has preds, labels has GT lab_files = glob( os.path.join( main_config.ccbp_exp_evals, exp_name['experiment_name'], '*val_labels.npy')) # Scores has preds, labels has GT for gd, sd, ld in zip(data_files, score_files, lab_files): mt = gd.split(os.path.sep)[-1].split(template_exp + '_')[-1].split('_' + 'val')[0] if not recalc: it_data = np.load(gd).item() else: lds = np.load(ld).item() sds = np.load(sd).item() it_data = { k: np.corrcoef(lds[k], sds[k])[0, 1] for k in sds.keys() } sinds = np.asarray(it_data.keys())[np.argsort(it_data.keys())] sit_data = [it_data[idx] for idx in sinds] d['perf'] = sit_data d['max_val'] = np.max(sit_data) d['max_idx'] = np.argmax(sit_data) d['mt'] = mt out_data += [d] xs += [np.round(d['x'])] ys += [np.round(d['y'])] perfs += [np.max(sit_data)] arg_perf += [np.argmax(sit_data)] exps += [gd.split(os.path.sep)[-2]] model_types += [mt] count += 1 # Package as a df xs = np.round(np.asarray(xs)).astype(int) ys = np.round(np.asarray(ys)).astype(int) perfs = np.asarray(perfs) arg_perf = np.asarray(arg_perf) exps = np.asarray(exps) model_types = np.asarray(model_types) # Filter to only keep top-scoring values at each x/y (dirty trick) fxs, fys, fperfs, fmodel_types, fexps, fargs = [], [], [], [], [], [] xys = np.vstack((xs, ys)).transpose() cxy = np.ascontiguousarray( # Unique rows xys).view(np.dtype((np.void, xys.dtype.itemsize * xys.shape[1]))) _, idx = np.unique(cxy, return_index=True) uxys = xys[idx] for xy in uxys: sel_idx = (xys == xy).sum(axis=-1) == 2 sperfs = perfs[sel_idx] sexps = exps[sel_idx] sargs = arg_perf[sel_idx] sel_mts = model_types[sel_idx] bp = np.argmax(sperfs) fxs += [xy[0]] fys += [xy[1]] fperfs += [sperfs[bp]] fargs += [sargs[bp]] fmodel_types += [sel_mts[bp]] fexps += [sexps[bp]] xs = np.asarray(fxs) ys = np.asarray(fys) perfs = np.asarray(fperfs) arg_perf = np.asarray(fargs) exps = np.asarray(fexps) model_types = np.asarray(fmodel_types) umt, model_types_inds = np.unique(model_types, return_inverse=True) # Get weights for the top-n fitting models of each type it_perfs = perfs[model_types == target_model] it_exps = exps[model_types == target_model] # it_args = arg_perf[model_types == target_model] sorted_perfs = np.argsort(it_perfs)[::-1][:top_n] perf = sel_exp_query(experiment_name=it_exps[sorted_perfs[0]], model=target_model, db_config=db_config) dummy_sel_model = perf[-1] print 'Using %s' % dummy_sel_model model_file = dummy_sel_model['ckpt_file'].split('.')[0] model_ckpt = '%s.ckpt-%s' % (model_file, model_file.split( os.path.sep)[-1].split('_')[-1]) model_meta = '%s.meta' % model_ckpt # Pull responses dataset_module = py_utils.import_module( model_dir=config.dataset_info, dataset=dummy_sel_model['experiment_name']) dataset_module = dataset_module.data_processing() with tf.device('/cpu:0'): val_images = tf.placeholder(tf.float32, shape=[1] + [x for x in dataset_module.im_size]) # Pull stimuli stim_dir = os.path.join(main_config.tf_record_output, dummy_sel_model['experiment_name']) stim_files = glob(stim_dir + '*') stim_meta_file = [x for x in stim_files if 'meta' in x][0] stim_val_data = [x for x in stim_files if 'train.tfrecords' in x][0] stim_val_mean = [x for x in stim_files if 'train_means' in x][0] assert stim_meta_file is not None assert stim_val_data is not None assert stim_val_mean is not None stim_meta_data = np.load(stim_meta_file).item() rf_stim_meta_data = stim_meta_data['rf_data'] stim_mean_data = np.load( stim_val_mean).items()[0][1].item()['image']['mean'] # Mean normalize log = logger.get(os.path.join(output_dir, 'sta_logs', target_model)) data_dir = os.path.join(output_dir, 'data', target_model) py_utils.make_dir(data_dir) sys.path.append( os.path.join('models', 'structs', dummy_sel_model['experiment_name'])) model_dict = __import__(target_model) if hasattr(model_dict, 'output_structure'): # Use specified output layer output_structure = model_dict.output_structure else: output_structure = None model = model_utils.model_class( mean=stim_mean_data, training=True, # FIXME output_size=dataset_module.output_size) with tf.device('/gpu:0'): with tf.variable_scope('cnn') as scope: val_scores, model_summary = model.build( data=val_images, layer_structure=model_dict.layer_structure, output_structure=output_structure, log=log, tower_name='cnn') grad_image = tf.gradients(model.output, val_images)[0] print(json.dumps(model_summary, indent=4)) # Set up summaries and saver saver = tf.train.Saver(tf.global_variables()) summary_op = tf.summary.merge_all() # Initialize the graph sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) # Need to initialize both of these if supplying num_epochs to inputs sess.run( tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())) all_filters = [] all_rfs = [] max_scores = [] for idx in sorted_perfs: perf = sel_exp_query(experiment_name=it_exps[idx], model=target_model, db_config=db_config) # perf_steps = np.argsort([v['training_step'] for v in perf])[::-1] perf_steps = [v['validation_loss'] for v in perf] max_score = np.max(perf_steps) arg_perf_steps = np.argmax(perf_steps) sel_model = perf[arg_perf_steps] # perf_steps[it_args[idx]]] print 'Using %s' % sel_model model_file = sel_model['ckpt_file'].split('.')[0] model_ckpt = '%s.ckpt-%s' % (model_file, model_file.split( os.path.sep)[-1].split('_')[-1]) model_meta = '%s.meta' % model_ckpt # Store sparse noise for reference stim_dir = os.path.join(main_config.tf_record_output, sel_model['experiment_name']) stim_files = glob(stim_dir + '*') stim_meta_file = [x for x in stim_files if 'meta' in x][0] stim_val_data = [x for x in stim_files if 'train.tfrecords' in x][0] stim_val_mean = [x for x in stim_files if 'train_means' in x][0] assert stim_meta_file is not None assert stim_val_data is not None assert stim_val_mean is not None stim_meta_data = np.load(stim_meta_file).item() rf_stim_meta_data = stim_meta_data['rf_data'] stim_mean_data = np.load( stim_val_mean).items()[0][1].item()['image']['mean'] rf_stim_meta_data = rf_stim_meta_data.values()[0][0] sparse_rf_on = { 'center_x': rf_stim_meta_data.get('on_center_x', None), 'center_y': rf_stim_meta_data.get('on_center_y', None), 'width_x': rf_stim_meta_data.get('on_width_x', None), 'width_y': rf_stim_meta_data.get('on_width_y', None), 'distance': rf_stim_meta_data.get('on_distance', None), 'area': rf_stim_meta_data.get('on_area', None), 'rotation': rf_stim_meta_data.get('on_rotation', None), } sparse_rf_off = { 'center_x': rf_stim_meta_data.get('off_center_x', None), 'center_y': rf_stim_meta_data.get('off_center_y', None), 'width_x': rf_stim_meta_data.get('off_width_x', None), 'width_y': rf_stim_meta_data.get('off_width_y', None), 'distance': rf_stim_meta_data.get('off_distance', None), 'area': rf_stim_meta_data.get('off_area', None), 'rotation': rf_stim_meta_data.get('off_rotation', None), } sparse_rf = {'on': sparse_rf_on, 'off': sparse_rf_off} # Set up exemplar threading if target_model == 'conv2d': fname = [ x for x in tf.global_variables() if 'conv1_1_filters:0' in x.name ] elif target_model == 'sep_conv2d': fname = [ x for x in tf.global_variables() if 'sep_conv1_1_filters:0' in x.name ] elif target_model == 'dog' or target_model == 'DoG': fname = [ x for x in tf.global_variables() if 'dog1_1_filters:0' in x.name ] else: raise NotImplementedError print 'Using %s' % sel_model model_file = sel_model['ckpt_file'].split('.')[0] model_ckpt = '%s.ckpt-%s' % (model_file, model_file.split( os.path.sep)[-1].split('_')[-1]) saver.restore(sess, model_ckpt) all_filters += [sess.run(fname)] all_rfs += [sparse_rf] max_scores += [max_score] np.savez('tests/ALLEN_files/filters/%s_%s_recalc_%s' % (experiment, target_model, recalc), rfs=all_rfs, perf=max_scores, filters=all_filters) print 'SAVED'
def plot_fits( experiment='760_cells_2017_11_04_16_29_09', query_db=False, num_models=3, template_exp='ALLEN_selected_cells_1', process_pnodes=False, allen_dir='/home/drew/Documents/Allen_Brain_Observatory', output_dir='tests/ALLEN_files', stimulus_dir='/media/data_cifs/AllenData/DataForTrain/all_stimulus_template', stimulus_type='tfrecord', top_n=0, preload_stim=False, target_layer='conv1_1', # conv1_1, sep_conv1_1, dog1_1 target_model='conv2d'): # conv2d, sep_conv2d, dog """Plot fits across the RF. experiment: Name of Allen experiment you're plotting. query_db: Use data from DB versus data in Numpys. num_models: The number of architectures you're testing. template_exp: The name of the contextual_circuit model template used.""" sys.path.append(allen_dir) from allen_config import Allen_Brain_Observatory_Config if process_pnodes: from pnodes_declare_datasets_loop import query_hp_hist, sel_exp_query else: from declare_datasets_loop import query_hp_hist, sel_exp_query config = Config() main_config = Allen_Brain_Observatory_Config() db_config = credentials.postgresql_connection() files = glob( os.path.join( allen_dir, main_config.multi_exps, experiment, '*.npz')) assert len(files), 'Couldn\'t find files.' out_data, xs, ys = [], [], [] perfs, model_types, exps, arg_perf = [], [], [], [] count = 0 for f in files: data = np.load(f) d = { 'x': data['rf_data'].item()['on_center_x'], 'y': data['rf_data'].item()['on_center_y'], # x: files['dataset_method'].item()['x_min'], # y: files['dataset_method'].item()['y_min'], } exp_name = { 'experiment_name': data['dataset_method'].item()[ 'experiment_name']} if query_db: perf = query_hp_hist( exp_name['experiment_name'], db_config=db_config) if perf is None: print 'No fits for: %s' % exp_name['experiment_name'] else: raise NotImplementedError d['perf'] = perf d['max_val'] = np.max(perf) out_data += [d] xs += [np.round(d['x'])] ys += [np.round(d['y'])] perfs += [np.max(d['perf'])] count += 1 else: data_files = glob( os.path.join( main_config.ccbp_exp_evals, exp_name['experiment_name'], '*val_losses.npy')) # Scores has preds, labels has GT for gd in data_files: mt = gd.split( os.path.sep)[-1].split( template_exp + '_')[-1].split('_' + 'val')[0] it_data = np.load(gd).item() sinds = np.asarray(it_data.keys())[np.argsort(it_data.keys())] sit_data = [it_data[idx] for idx in sinds] d['perf'] = sit_data d['max_val'] = np.max(sit_data) d['max_idx'] = np.argmax(sit_data) d['mt'] = mt out_data += [d] xs += [np.round(d['x'])] ys += [np.round(d['y'])] perfs += [np.max(sit_data)] arg_perf += [np.argmax(sit_data)] exps += [gd.split(os.path.sep)[-2]] model_types += [mt] count += 1 # Package as a df xs = np.round(np.asarray(xs)).astype(int) ys = np.round(np.asarray(ys)).astype(int) perfs = np.asarray(perfs) arg_perf = np.asarray(arg_perf) exps = np.asarray(exps) model_types = np.asarray(model_types) # Filter to only keep top-scoring values at each x/y (dirty trick) fxs, fys, fperfs, fmodel_types, fexps, fargs = [], [], [], [], [], [] xys = np.vstack((xs, ys)).transpose() cxy = np.ascontiguousarray( # Unique rows xys).view( np.dtype((np.void, xys.dtype.itemsize * xys.shape[1]))) _, idx = np.unique(cxy, return_index=True) uxys = xys[idx] for xy in uxys: sel_idx = (xys == xy).sum(axis=-1) == 2 sperfs = perfs[sel_idx] sexps = exps[sel_idx] sargs = arg_perf[sel_idx] sel_mts = model_types[sel_idx] bp = np.argmax(sperfs) fxs += [xy[0]] fys += [xy[1]] fperfs += [sperfs[bp]] fargs += [sargs[bp]] fmodel_types += [sel_mts[bp]] fexps += [sexps[bp]] xs = np.asarray(fxs) ys = np.asarray(fys) perfs = np.asarray(fperfs) arg_perf = np.asarray(fargs) exps = np.asarray(fexps) model_types = np.asarray(fmodel_types) umt, model_types_inds = np.unique(model_types, return_inverse=True) # Get weights for the top-n fitting models of each type it_perfs = perfs[model_types == target_model] it_exps = exps[model_types == target_model] # it_args = arg_perf[model_types == target_model] # sorted_perfs = np.argsort(it_perfs)[::-1][:top_n] sorted_perfs = [np.argsort(it_perfs)[::-1][top_n]] for idx in sorted_perfs: perf = sel_exp_query( experiment_name=it_exps[idx], model=target_model, db_config=db_config) # perf_steps = np.argsort([v['training_step'] for v in perf])[::-1] perf_steps = [v['validation_loss'] for v in perf] max_score = np.max(perf_steps) arg_perf_steps = np.argmax(perf_steps) sel_model = perf[arg_perf_steps] # perf_steps[it_args[idx]]] print 'Using %s' % sel_model model_file = sel_model['ckpt_file'].split('.')[0] model_ckpt = '%s.ckpt-%s' % ( model_file, model_file.split(os.path.sep)[-1].split('_')[-1]) model_meta = '%s.meta' % model_ckpt # Pull stimuli stim_dir = os.path.join( main_config.tf_record_output, sel_model['experiment_name']) stim_files = glob(stim_dir + '*') stim_meta_file = [x for x in stim_files if 'meta' in x][0] # stim_val_data = [x for x in stim_files if 'val.tfrecords' in x][0] stim_val_data = [x for x in stim_files if 'train.tfrecords' in x][0] stim_val_mean = [x for x in stim_files if 'train_means' in x][0] assert stim_meta_file is not None assert stim_val_data is not None assert stim_val_mean is not None stim_meta_data = np.load(stim_meta_file).item() rf_stim_meta_data = stim_meta_data['rf_data'] stim_mean_data = np.load( stim_val_mean).items()[0][1].item()['image']['mean'] # Store sparse noise for reference sparse_rf_on = { 'center_x': rf_stim_meta_data.get('on_center_x', None), 'center_y': rf_stim_meta_data.get('on_center_y', None), 'width_x': rf_stim_meta_data.get('on_width_x', None), 'width_y': rf_stim_meta_data.get('on_width_y', None), 'distance': rf_stim_meta_data.get('on_distance', None), 'area': rf_stim_meta_data.get('on_area', None), 'rotation': rf_stim_meta_data.get('on_rotation', None), } sparse_rf_off = { 'center_x': rf_stim_meta_data.get('off_center_x', None), 'center_y': rf_stim_meta_data.get('off_center_y', None), 'width_x': rf_stim_meta_data.get('off_width_x', None), 'width_y': rf_stim_meta_data.get('off_width_y', None), 'distance': rf_stim_meta_data.get('off_distance', None), 'area': rf_stim_meta_data.get('off_area', None), 'rotation': rf_stim_meta_data.get('off_rotation', None), } sparse_rf = {'on': sparse_rf_on, 'off': sparse_rf_off} # Pull responses dataset_module = py_utils.import_module( model_dir=config.dataset_info, dataset=sel_model['experiment_name']) dataset_module = dataset_module.data_processing() with tf.device('/cpu:0'): val_images = tf.placeholder( tf.float32, shape=[1] + [x for x in dataset_module.im_size]) # Mean normalize log = logger.get(os.path.join(output_dir, 'sta_logs', target_model)) data_dir = os.path.join(output_dir, 'data', target_model) py_utils.make_dir(data_dir) sys.path.append(os.path.join('models', 'structs', sel_model['experiment_name'])) model_dict = __import__(target_model) if hasattr(model_dict, 'output_structure'): # Use specified output layer output_structure = model_dict.output_structure else: output_structure = None model = model_utils.model_class( mean=stim_mean_data, training=True, # FIXME output_size=dataset_module.output_size) with tf.device('/gpu:0'): with tf.variable_scope('cnn') as scope: val_scores, model_summary = model.build( data=val_images, layer_structure=model_dict.layer_structure, output_structure=output_structure, log=log, tower_name='cnn') grad_image = tf.gradients(model.output, val_images)[0] print(json.dumps(model_summary, indent=4)) # Set up summaries and saver saver = tf.train.Saver(tf.global_variables()) summary_op = tf.summary.merge_all() # Initialize the graph sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) # Need to initialize both of these if supplying num_epochs to inputs sess.run( tf.group( tf.global_variables_initializer(), tf.local_variables_initializer()) ) saver.restore(sess, model_ckpt) # Set up exemplar threading if target_model == 'conv2d': fname = [ x for x in tf.global_variables() if 'conv1_1_filters:0' in x.name] elif target_model == 'sep_conv2d': fname = [ x for x in tf.global_variables() if 'sep_conv1_1_filters:0' in x.name] elif target_model == 'dog': fname = [ x for x in tf.global_variables() if 'dog1_1_filters:0' in x.name] else: raise NotImplementedError val_tensors = { 'images': val_images, # 'labels': val_labels, 'filts': fname, 'responses': model[target_layer], 'labels': model['output'], 'grads': grad_image } all_images, all_preds, all_grads, all_responses = [], [], [], [] num_steps = 10000 stimuli = os.path.join( stimulus_dir, 'locally_sparse_noise_8deg_template.pkl') stimuli = pickle.load(open(stimuli, 'rb'))[:num_steps] ih, iw = 304, 608 ns, sh, sw = stimuli.shape sh, sw = ih, iw sh = int(sh) sw = int(sw) tb = int((ih - sh) // 2) lr = int((iw - sw) // 2) gns = 32 cnst = 127.5 for step in range(num_steps): # step, stim in enumerate(stimuli): # step in range(num_steps): chosen_stim = np.random.permutation(ns)[0] if preload_stim: it_stim = stimuli[chosen_stim].astype(np.float32) it_stim = it_stim.astype(np.float32) noise_im = (misc.imresize(it_stim, [sh, sw], interp='nearest')) noise_im = cv2.copyMakeBorder( noise_im.squeeze(), tb, tb, lr, lr, cv2.BORDER_CONSTANT, value=cnst) else: stim_noise = scipy.sparse.csr_matrix(scipy.sparse.random(ih // gns, iw // gns, density=0.05)).todense() stim_noise_mask = stim_noise == 0 stim_noise[stim_noise > 0.5] = 255. stim_noise[stim_noise < 0.5] = 0. stim_noise[stim_noise_mask] = cnst noise_im = (misc.imresize(stim_noise.squeeze(), [sh, sw], interp='nearest')) if np.random.rand() < 0.5: noise_im = np.fliplr(noise_im) if np.random.rand() < 0.5: noise_im = np.flipud(noise_im) # noise_im = (misc.imresize(it_stim, [ih, iw], interp='nearest'))[None, :, :, None] noise_im = noise_im / 255. noise_im = noise_im[None, :, :, None] assert noise_im.max() <= 1 val_vals = sess.run(val_tensors.values(), feed_dict={val_images: noise_im}) val_dict = {k: v for k, v in zip(val_tensors.keys(), val_vals)} all_images += [noise_im] # val_dict['images']] # all_responses += [val_dict['responses']] all_preds += [val_dict['labels'].squeeze()] # all_grads += [val_dict['grads'].squeeze()] print 'Finished step %s' % step # Process and save data all_images = np.concatenate(all_images).squeeze() ev, vals = peakdet(all_preds, np.median(all_preds)) sp = np.zeros_like(all_preds) sp[ev[:, 0].astype(int)] = 1 plt.imshow(np.matmul(all_images.reshape(all_images.shape[0], -1).transpose(), all_preds).reshape(ih, iw));plt.show() plt.imshow(np.matmul(all_images.reshape(all_images.shape[0], -1).transpose(), sp).reshape(ih, iw));plt.show() filters = val_dict['filts'][0].squeeze().transpose(2, 0, 1) import ipdb;ipdb.set_trace() all_grads = np.asarray(all_grads) all_preds = np.asarray(all_preds).reshape(-1, 1) import ipdb;ipdb.set_trace() # res_f = all_responses.reshape(ne * h * w, k) # res_g = res_grads.reshape(ne, rh * rw) # i_cov = np.cov(res_i.transpose()) # f_cov = np.cov(res_f.transpose()) # g_cov = np.cov(res_g.transpose()) # sp = (all_preds > all_preds.mean()).astype(np.float32) # res_i = res_g # ev, vals = peakdet(all_preds, 0.5) # sp = np.zeros_like(all_preds) # sp[ev[:, 0].astype(int)] = 1 # slen = ne # nsp = np.sum(sp) # number of spikes # swid = rh * rw # Msz = np.dot(np.dot(slen, swid), ne) # Size of full stimulus matrix # rowlen = 1830 # np.dot(swid, ne) # Length of a single row of stimulus matrix # Compute raw mean and covariance # RawMu = np.mean(res_i, 0).T # RawCov = np.dot(res_i.T, res_i) / (slen-1.) - (RawMu*np.vstack(RawMu)*slen) / (slen-1.) # Compute spike-triggered mean and covariance # iisp = np.nonzero((sp > 0.)) # spvec = sp[iisp] # STA = np.divide(np.dot(spvec.T, res_i[iisp[0],:]).T, nsp) # STC = np.dot(res_i[iisp[0],:].T, np.multiply(res_i[iisp[0],:], ml.repmat(spvec, rowlen, 1).T))/(nsp-1.) - (STA*np.vstack(STA)*nsp)/(nsp-1.) # res_i_cov = np.matmul(res_i.transpose(), res_i) # inv_res_i = np.linalg.pinv(res_i_cov) # sta = inv_res_i * np.matmul(res_i.transpose(), all_preds) # sta = inv_res_i * np.matmul(res_i.transpose(), spike_preds) # sti = (1. / float(ne)) * (np.linalg.pinv(i_cov) * np.matmul(res_i, all_preds)) # sta = (1. / float(ne)) * (np.linalg.pinv(f_cov) * np.matmul(res_f, all_preds)) # sta = sta.reshape(h, w) # stg = (1. / float(ne)) * np.matmul(all_grads.reshape(h * w, ne), all_preds) # stg = stg.reshape(h, w) np.savez( os.path.join(data_dir, 'data'), images=all_images, pred=all_preds, filters=filters, STA=STA, fits=fits, grads=all_grads) if target_model != 'dog': save_mosaic( maps=filters, # [0].squeeze().transpose(2, 0, 1), output=os.path.join(data_dir, '%s_filters' % target_layer), rc=8, cc=4, title='%s filters' % ( target_layer)) else: import ipdb;ipdb.set_trace() print 'Complete.'