def extract_dataset(dataset, config, cv, out_dir): """Save dataset npys into a directory""" dataset_module = py_utils.import_module(pre_path=config.dataset_classes, module=dataset) dataset_module = dataset_module.data_processing() (train_data, _, _) = py_utils.get_data_pointers( dataset=dataset_module.output_name, base_dir= "/media/data_cifs/cluttered_nist_experiments/tf_records", # config.tf_records, local_dir= "/media/data_cifs/cluttered_nist_experiments/tf_records", # config.local_tf_records, cv=cv) train_images, train_labels, train_aux = data_loader.inputs( dataset=train_data, batch_size=1000, # config.train_batch_size, model_input_image_size=dataset_module.model_input_image_size, tf_dict=dataset_module.tf_dict, data_augmentations=[], # config.train_augmentations, num_epochs=1, # config.epochs, aux=None, # train_aux_loss, tf_reader_settings=dataset_module.tf_reader, shuffle=False) # config.shuffle_train) sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) sess.run( tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) count = 0 try: while not coord.should_stop(): images, labels = sess.run([train_images, train_labels]) np.savez(os.path.join(out_dir, "{}".format(count)), images=images, labels=labels) count += 1 except tf.errors.OutOfRangeError: print("Finished loop") finally: coord.request_stop() coord.join(threads) sess.close()
def plot_fits( experiment='760_cells_2017_11_04_16_29_09', query_db=False, num_models=3, template_exp='ALLEN_selected_cells_1', process_pnodes=False): """Plot fits across the RF. experiment: Name of Allen experiment you're plotting. query_db: Use data from DB versus data in Numpys. num_models: The number of architectures you're testing. template_exp: The name of the contextual_circuit model template used.""" if process_pnodes: from pnodes_declare_datasets_loop import query_hp_hist, sel_exp_query else: from declare_datasets_loop import query_hp_hist, sel_exp_query main_config = Allen_Brain_Observatory_Config() sys.path.append(main_config.cc_path) from db import credentials from ops import data_loader db_config = credentials.postgresql_connection() files = glob( os.path.join( main_config.multi_exps, experiment, '*.npz')) out_data, xs, ys = [], [], [] perfs, model_types, exps, arg_perf = [], [], [], [] count = 0 for f in files: data = np.load(f) d = { 'x': data['rf_data'].item()['on_center_x'], 'y': data['rf_data'].item()['on_center_y'], # x: files['dataset_method'].item()['x_min'], # y: files['dataset_method'].item()['y_min'], } exp_name = { 'experiment_name': data['dataset_method'].item()[ 'experiment_name']} if query_db: perf = query_hp_hist( exp_name['experiment_name'], db_config=db_config) if perf is None: print 'No fits for: %s' % exp_name['experiment_name'] else: raise NotImplementedError d['perf'] = perf d['max_val'] = np.max(perf) out_data += [d] xs += [np.round(d['x'])] ys += [np.round(d['y'])] perfs += [np.max(d['perf'])] count += 1 else: data_files = glob( os.path.join( main_config.ccbp_exp_evals, exp_name['experiment_name'], '*val_losses.npy')) # Scores has preds, labels has GT for gd in data_files: mt = gd.split( os.path.sep)[-1].split( template_exp + '_')[-1].split('_' + 'val')[0] it_data = np.load(gd).item() sinds = np.asarray(it_data.keys())[np.argsort(it_data.keys())] sit_data = [it_data[idx] for idx in sinds] d['perf'] = sit_data d['max_val'] = np.max(sit_data) d['max_idx'] = np.argmax(sit_data) d['mt'] = mt out_data += [d] xs += [np.round(d['x'])] ys += [np.round(d['y'])] perfs += [np.max(sit_data)] arg_perf += [np.argmax(sit_data)] exps += [gd.split(os.path.sep)[-2]] model_types += [mt] count += 1 # Package as a df xs = np.round(np.asarray(xs)).astype(int) ys = np.round(np.asarray(ys)).astype(int) perfs = np.asarray(perfs) arg_perf = np.asarray(arg_perf) exps = np.asarray(exps) model_types = np.asarray(model_types) # Filter to only keep top-scoring values at each x/y (dirty trick) fxs, fys, fperfs, fmodel_types, fexps, fargs = [], [], [], [], [], [] xys = np.vstack((xs, ys)).transpose() cxy = np.ascontiguousarray( # Unique rows xys).view( np.dtype((np.void, xys.dtype.itemsize * xys.shape[1]))) _, idx = np.unique(cxy, return_index=True) uxys = xys[idx] for xy in uxys: sel_idx = (xys == xy).sum(axis=-1) == 2 sperfs = perfs[sel_idx] sexps = exps[sel_idx] sargs = arg_perf[sel_idx] sel_mts = model_types[sel_idx] bp = np.argmax(sperfs) fxs += [xy[0]] fys += [xy[1]] fperfs += [sperfs[bp]] fargs += [sargs[bp]] fmodel_types += [sel_mts[bp]] fexps += [sexps[bp]] xs = np.asarray(fxs) ys = np.asarray(fys) perfs = np.asarray(fperfs) arg_perf = np.asarray(fargs) exps = np.asarray(fexps) model_types = np.asarray(fmodel_types) umt, model_types_inds = np.unique(model_types, return_inverse=True) # Get weights for the top-n fitting models of each type top_n = 1 target_layer = 'conv2d' it_perfs = perfs[model_types == target_layer] it_exps = exps[model_types == target_layer] # it_args = arg_perf[model_types == target_layer] sorted_perfs = np.argsort(it_perfs)[::-1][:top_n] for idx in sorted_perfs: perf = sel_exp_query( experiment_name=it_exps[idx], model=target_layer, db_config=db_config) # perf_steps = np.argsort([v['training_step'] for v in perf])[::-1] perf_steps = [v['validation_loss'] for v in perf] max_score = np.max(perf_steps) arg_perf_steps = np.argmax(perf_steps) sel_model = perf[arg_perf_steps] # perf_steps[it_args[idx]]] print 'Using %s' % sel_model model_file = sel_model['ckpt_file'].split('.')[0] model_ckpt = '%s.ckpt-%s' % ( model_file, model_file.split(os.path.sep)[-1].split('_')[-1]) model_meta = '%s.meta' % model_ckpt import ipdb;ipdb.set_trace() # Pull stimuli stim_dir = os.path.join( main_config.tf_record_output, sel_model['experiment_name']) stim_files = glob(os.path.join(stim_dir, '*')) stim_meta_file = [x for x in stim_files if 'meta' in x] stim_val_data = [x for x in stim_files if 'val.tfrecords' in x] stim_val_mean = [x for x in stim_files if 'val_means' in x] stim_meta_data = np.load(stim_meta_file).item() # Store sparse noise for reference sparse_rf_on = { 'center_x': stim_meta_data['on_center_x'], 'center_y': stim_meta_data['on_center_y'], 'width_x': stim_meta_data['on_width_x'], 'width_y': stim_meta_data['on_width_y'], 'distance': stim_meta_data['on_distance'], 'area': stim_meta_data['on_area'], 'rotation': stim_meta_data['on_rotation'], } sparse_rf_off = { 'center_x': stim_meta_data['off_center_x'], 'center_y': stim_meta_data['off_center_y'], 'width_x': stim_meta_data['off_width_x'], 'width_y': stim_meta_data['off_width_y'], 'distance': stim_meta_data['off_distance'], 'area': stim_meta_data['off_area'], 'rotation': stim_meta_data['off_rotation'], } sparse_rf = {'on': sparse_rf_on, 'off': sparse_rf_off} # Pull responses val_images, val_labels = data_loader.inputs( dataset=stim_val_data, batch_size=config.batch_size, model_input_image_size=dataset_module.model_input_image_size, tf_dict=dataset_module.tf_dict, data_augmentations=config.data_augmentations, num_epochs=config.epochs, tf_reader_settings=dataset_module.tf_reader, shuffle=config.shuffle ) # Mean normalize if target_layer == 'DoG': pass else: with tf.Session() as sess: saver = tf.train.import_meta_graph( model_meta, clear_devices=True) saver.restore(sess, model_ckpt) if target_layer == 'conv2d': fname = [ x for x in tf.global_variables() if 'conv1_1_filters:0' in x.name] elif target_layer == 'sep_conv2d': fname = [ x for x in tf.global_variables() if 'sep_conv1_1_filters:0' in x.name] filts = sess.run(fname) import ipdb;ipdb.set_trace() save_mosaic( maps=filts[0].squeeze().transpose(2, 0, 1), output='%s_filters' % target_layer, rc=8, cc=4, title='%s filters for cell where rho=%s' % ( target_layer, np.around(max_score, 2)))
def train_classifier_on_model(train_pointer, model_type, model_weights, selected_layer, config): # Make output directories if they do not exist dt_stamp = '%s_%s_%s_%s' % (model_type, selected_layer, str( config.lr)[2:], re.split('\.', str(datetime.now()))[0].replace( ' ', '_').replace(':', '_').replace('-', '_')) config.checkpoint_directory = os.path.join(config.checkpoint_directory, dt_stamp) # timestamp this run dir_list = [config.checkpoint_directory] [utilities.make_dir(d) for d in dir_list] print '-' * 60 print 'Training %s over a %s. Saving to %s' % (config.classifier, model_type, dt_stamp) print '-' * 60 dcn_flavor = import_cnn(model_type) # Prepare data on CPU with tf.device('/cpu:0'): train_images, train_labels, train_files = inputs( train_pointer, config.train_batch, config.train_image_size, config.model_image_size[:2], num_epochs=config.epochs, shuffle_batch=True) # Prepare pretrained model on GPU with tf.device('/gpu:0'): with tf.variable_scope('cnn'): if 'ckpt' in model_weights: cnn = dcn_flavor.model() else: cnn = dcn_flavor.model(weight_path=model_weights) cnn.build(train_images) sample_layer = cnn[selected_layer] class_accuracy = tf_loss.class_accuracy(cnn.prob, train_labels) saver = tf.train.Saver(tf.all_variables(), max_to_keep=10) # Initialize the graph sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) # Need to initialize both of these if supplying num_epochs to inputs sess.run( tf.group(tf.initialize_all_variables(), tf.initialize_local_variables())) # Set up exemplar threading coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) # Start training loop np.save(os.path.join(config.checkpoint_directory, 'training_config_file'), config) step, scores, labs = 0, [], [] if 'ckpt' in model_weights: saver.restore(sess, model_weights) try: print 'Getting scores' while not coord.should_stop(): import ipdb ipdb.set_trace() start_time = time.time() score, lab, acc = sess.run( [sample_layer, train_labels, class_accuracy]) scores += [score] labs += [lab] duration = time.time() - start_time # End iteration print_status(step, 1, config, duration, acc, '') step += 1 except tf.errors.OutOfRangeError: print 'Finished extracting scores.' finally: coord.request_stop() X = np.concatenate(scores) y = np.concatenate(labs) mu = np.mean(X, axis=0) sd = np.std(X, axis=0) X = (X - mu) / sd svc = svm.LinearSVC(dual=False, C=config.c, verbose=True).fit(X, y) ckpt_path = os.path.join(config.checkpoint_directory, 'model_%s.pkl' % step) with open(ckpt_path, 'wb') as fid: cPickle.dump(svc, fid) norm_path = os.path.join(config.checkpoint_directory, 'model_%s_normalization' % step) np.savez(norm_path, mu=mu, sd=sd, scores=scores, labs=labs) print 'Saved to: %s' % config.checkpoint_directory print 'Saved checkpoint to: %s' % ckpt_path coord.join(threads) sess.close() # Return the final checkpoint for testing return ckpt_path, config.checkpoint_directory
def main(experiment_name, list_experiments=False, gpu_device='/gpu:0'): """Create a tensorflow worker to run experiments in your DB.""" if list_experiments: exps = db.list_experiments() print '_' * 30 print 'Initialized experiments:' print '_' * 30 for l in exps: print l.values()[0] print '_' * 30 print 'You can add to the DB with: '\ 'python prepare_experiments.py --experiment=%s' % \ exps[0].values()[0] return if experiment_name is None: print 'No experiment specified. Pulling one out of the DB.' experiment_name = db.get_experiment_name() # Prepare to run the model config = Config() condition_label = '%s_%s' % (experiment_name, py_utils.get_dt_stamp()) experiment_label = '%s' % (experiment_name) log = logger.get(os.path.join(config.log_dir, condition_label)) experiment_dict = experiments.experiments()[experiment_name]() config = add_to_config(d=experiment_dict, config=config) # Globals config, exp_params = process_DB_exps( experiment_name=experiment_name, log=log, config=config) # Update config w/ DB params dataset_module = py_utils.import_module(model_dir=config.dataset_info, dataset=config.dataset) dataset_module = dataset_module.data_processing() # hardcoded class name train_data, train_means = get_data_pointers( dataset=config.dataset, base_dir=config.tf_records, cv=dataset_module.folds.keys()[1], # TODO: SEARCH FOR INDEX. log=log) val_data, val_means = get_data_pointers(dataset=config.dataset, base_dir=config.tf_records, cv=dataset_module.folds.keys()[0], log=log) # Initialize output folders dir_list = { 'checkpoints': os.path.join(config.checkpoints, condition_label), 'summaries': os.path.join(config.summaries, condition_label), 'condition_evaluations': os.path.join(config.condition_evaluations, condition_label), 'experiment_evaluations': os.path.join( # DEPRECIATED config.experiment_evaluations, experiment_label), 'visualization': os.path.join(config.visualizations, condition_label), 'weights': os.path.join(config.condition_evaluations, condition_label, 'weights') } [py_utils.make_dir(v) for v in dir_list.values()] # Prepare data loaders on the cpu config.data_augmentations = py_utils.flatten_list( config.data_augmentations, log) with tf.device('/cpu:0'): train_images, train_labels = data_loader.inputs( dataset=train_data, batch_size=config.batch_size, model_input_image_size=dataset_module.model_input_image_size, tf_dict=dataset_module.tf_dict, data_augmentations=config.data_augmentations, num_epochs=config.epochs, tf_reader_settings=dataset_module.tf_reader, shuffle=config.shuffle) val_images, val_labels = data_loader.inputs( dataset=val_data, batch_size=config.batch_size, model_input_image_size=dataset_module.model_input_image_size, tf_dict=dataset_module.tf_dict, data_augmentations=config.data_augmentations, num_epochs=config.epochs, tf_reader_settings=dataset_module.tf_reader, shuffle=config.shuffle) log.info('Created tfrecord dataloader tensors.') # Load model specification struct_name = config.model_struct.split(os.path.sep)[-1] try: model_dict = py_utils.import_module( dataset=struct_name, model_dir=os.path.join('models', 'structs', experiment_name).replace(os.path.sep, '.')) except IOError: print 'Could not find the model structure: %s' % experiment_name # Inject model_dict with hyperparameters if requested model_dict.layer_structure = hp_opt_utils.inject_model_with_hps( layer_structure=model_dict.layer_structure, exp_params=exp_params) # Prepare model on GPU with tf.device(gpu_device): with tf.variable_scope('cnn') as scope: # Training model if len(dataset_module.output_size) > 1: log.warning('Found > 1 dimension for your output size.' 'Converting to a scalar.') dataset_module.output_size = np.prod( dataset_module.output_size) if hasattr(model_dict, 'output_structure'): # Use specified output layer output_structure = model_dict.output_structure else: output_structure = None model = model_utils.model_class( mean=train_means, training=True, output_size=dataset_module.output_size) train_scores, model_summary = model.build( data=train_images, layer_structure=model_dict.layer_structure, output_structure=output_structure, log=log, tower_name='cnn') log.info('Built training model.') log.debug(json.dumps(model_summary, indent=4), verbose=0) print_model_architecture(model_summary) # Prepare the loss function train_loss, _ = loss_utils.loss_interpreter( logits=train_scores, labels=train_labels, loss_type=config.loss_function, dataset_module=dataset_module) # Add weight decay if requested if len(model.regularizations) > 0: train_loss = loss_utils.wd_loss( regularizations=model.regularizations, loss=train_loss, wd_penalty=config.regularization_strength) train_op = loss_utils.optimizer_interpreter( loss=train_loss, lr=config.lr, optimizer=config.optimizer, constraints=config.optimizer_constraints, model=model) log.info('Built training loss function.') train_accuracy = eval_metrics.metric_interpreter( metric=dataset_module.score_metric, pred=train_scores, labels=train_labels) # training accuracy if int(train_images.get_shape()[-1]) <= 3: tf.summary.image('train images', train_images) tf.summary.scalar('training loss', train_loss) tf.summary.scalar('training accuracy', train_accuracy) log.info('Added training summaries.') # Validation model scope.reuse_variables() val_model = model_utils.model_class( mean=val_means, training=True, output_size=dataset_module.output_size) val_scores, _ = val_model.build( # Ignore summary data=val_images, layer_structure=model_dict.layer_structure, output_structure=output_structure, log=log, tower_name='cnn') log.info('Built validation model.') val_loss, _ = loss_utils.loss_interpreter( logits=val_scores, labels=val_labels, loss_type=config.loss_function, dataset_module=dataset_module) val_accuracy = eval_metrics.metric_interpreter( metric=dataset_module.score_metric, pred=val_scores, labels=val_labels) # training accuracy if int(train_images.get_shape()[-1]) <= 3: tf.summary.image('val images', val_images) tf.summary.scalar('validation loss', val_loss) tf.summary.scalar('validation accuracy', val_accuracy) log.info('Added validation summaries.') # Set up summaries and saver saver = tf.train.Saver(tf.global_variables()) summary_op = tf.summary.merge_all() # Initialize the graph sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) # Need to initialize both of these if supplying num_epochs to inputs sess.run( tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())) summary_writer = tf.summary.FileWriter(dir_list['summaries'], sess.graph) # Set up exemplar threading coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) # Create dictionaries of important training and validation information train_dict = { 'train_loss': train_loss, 'train_accuracy': train_accuracy, 'train_images': train_images, 'train_labels': train_labels, 'train_op': train_op, 'train_scores': train_scores } val_dict = { 'val_loss': val_loss, 'val_accuracy': val_accuracy, 'val_images': val_images, 'val_labels': val_labels, 'val_scores': val_scores, } # Start training loop np.save( os.path.join(dir_list['condition_evaluations'], 'training_config_file'), config) log.info('Starting training') output_dict = training.training_loop( config=config, db=db, coord=coord, sess=sess, summary_op=summary_op, summary_writer=summary_writer, saver=saver, threads=threads, summary_dir=dir_list['summaries'], checkpoint_dir=dir_list['checkpoints'], weight_dir=dir_list['weights'], train_dict=train_dict, val_dict=val_dict, train_model=model, val_model=val_model, exp_params=exp_params) log.info('Finished training.') model_name = config.model_struct.replace('/', '_') py_utils.save_npys(data=output_dict, model_name=model_name, output_string=dir_list['experiment_evaluations'])
def main(experiment_name, list_experiments=False, load_and_evaluate_ckpt=None, config_file=None, ckpt_file=None, gpu_device='/gpu:0'): """Create a tensorflow worker to run experiments in your DB.""" if list_experiments: exps = db.list_experiments() print '_' * 30 print 'Initialized experiments:' print '_' * 30 for l in exps: print l.values()[0] print '_' * 30 if len(exps) == 0: print 'No experiments found.' else: print 'You can add to the DB with: '\ 'python prepare_experiments.py --experiment=%s' % \ exps[0].values()[0] return if experiment_name is None: print 'No experiment specified. Pulling one out of the DB.' experiment_name = db.get_experiment_name() # Prepare to run the model config = Config() condition_label = '%s_%s' % (experiment_name, py_utils.get_dt_stamp()) experiment_label = '%s' % (experiment_name) log = logger.get(os.path.join(config.log_dir, condition_label)) experiment_dict = experiments.experiments()[experiment_name]() config = add_to_config(d=experiment_dict, config=config) # Globals config.load_and_evaluate_ckpt = load_and_evaluate_ckpt config, exp_params = process_DB_exps( experiment_name=experiment_name, log=log, config=config) # Update config w/ DB params config = np.load(config_file).item() dataset_module = py_utils.import_module(model_dir=config.dataset_info, dataset=config.dataset) dataset_module = dataset_module.data_processing() # hardcoded class name train_data, train_means_image, train_means_label = get_data_pointers( dataset=config.dataset, base_dir=config.tf_records, cv=dataset_module.folds.keys()[1], # TODO: SEARCH FOR INDEX. log=log) val_data, val_means_image, val_means_label = get_data_pointers( dataset=config.dataset, base_dir=config.tf_records, cv=dataset_module.folds.keys()[0], log=log) # Initialize output folders dir_list = { 'checkpoints': os.path.join(config.checkpoints, condition_label), 'summaries': os.path.join(config.summaries, condition_label), 'condition_evaluations': os.path.join(config.condition_evaluations, condition_label), 'experiment_evaluations': os.path.join( # DEPRECIATED config.experiment_evaluations, experiment_label), 'visualization': os.path.join(config.visualizations, condition_label), 'weights': os.path.join(config.condition_evaluations, condition_label, 'weights') } [py_utils.make_dir(v) for v in dir_list.values()] # Prepare data loaders on the cpu if all(isinstance(i, list) for i in config.data_augmentations): if config.data_augmentations: config.data_augmentations = py_utils.flatten_list( config.data_augmentations, log) config.epochs = 1 config.shuffle = False with tf.device('/cpu:0'): train_images, train_labels = data_loader.inputs( dataset=train_data, batch_size=config.batch_size, model_input_image_size=dataset_module.model_input_image_size, tf_dict=dataset_module.tf_dict, data_augmentations=config.data_augmentations, num_epochs=config.epochs, tf_reader_settings=dataset_module.tf_reader, shuffle=config.shuffle_train, resize_output=config.resize_output) if hasattr(config, 'val_augmentations'): val_augmentations = config.val_augmentations else: val_augmentations = config.data_augmentations val_images, val_labels = data_loader.inputs( dataset=val_data, batch_size=config.batch_size, model_input_image_size=dataset_module.model_input_image_size, tf_dict=dataset_module.tf_dict, data_augmentations=['resize_and_crop'], num_epochs=config.epochs, tf_reader_settings=dataset_module.tf_reader, shuffle=config.shuffle_val, resize_output=config.resize_output) log.info('Created tfrecord dataloader tensors.') # Load model specification struct_name = config.model_struct.split(os.path.sep)[-1] try: model_dict = py_utils.import_module( dataset=struct_name, model_dir=os.path.join('models', 'structs', experiment_name).replace(os.path.sep, '.')) except IOError: print 'Could not find the model structure: %s in folder %s' % ( struct_name, experiment_name) # Inject model_dict with hyperparameters if requested model_dict.layer_structure = hp_opt_utils.inject_model_with_hps( layer_structure=model_dict.layer_structure, exp_params=exp_params) # Prepare model on GPU with tf.device(gpu_device): with tf.variable_scope('cnn') as scope: # Normalize labels if needed if 'normalize_labels' in exp_params.keys(): if exp_params['normalize_labels'] == 'zscore': train_labels -= train_means_label['mean'] train_labels /= train_means_label['std'] log.info('Z-scoring labels.') elif exp_params['normalize_labels'] == 'mean': train_labels -= train_means_label['mean'] log.info('Mean-centering labels.') # Training model if len(dataset_module.output_size) == 2: log.warning('Found > 1 dimension for your output size.' 'Converting to a scalar.') dataset_module.output_size = np.prod( dataset_module.output_size) if hasattr(model_dict, 'output_structure'): # Use specified output layer output_structure = model_dict.output_structure else: output_structure = None model = model_utils.model_class( mean=train_means_image, training=True, output_size=dataset_module.output_size) train_scores, model_summary = model.build( data=train_images, layer_structure=model_dict.layer_structure, output_structure=output_structure, log=log, tower_name='cnn') eval_graph = tf.Graph() with eval_graph.as_default(): with eval_graph.gradient_override_map({'selu': 'GradLRP'}): train_grad_images = tf.gradients( train_scores[0] * tf.cast(train_labels, tf.float32), train_images)[0] log.info('Built training model.') log.debug(json.dumps(model_summary, indent=4), verbose=0) print_model_architecture(model_summary) # Check the shapes of labels and scores if not isinstance(train_scores, list): if len(train_scores.get_shape()) != len( train_labels.get_shape()): train_shape = train_scores.get_shape().as_list() label_shape = train_labels.get_shape().as_list() if len(train_shape) == 2 and len( label_shape) == 1 and train_shape[-1] == 1: train_labels = tf.expand_dims(train_labels, axis=-1) elif len(train_shape) == 2 and len( label_shape) == 1 and train_shape[-1] == 1: train_scores = tf.expand_dims(train_scores, axis=-1) # Prepare the loss function train_loss, _ = loss_utils.loss_interpreter( logits=train_scores, # TODO labels=train_labels, loss_type=config.loss_function, weights=config.loss_weights, dataset_module=dataset_module) # Add loss tensorboard tracking if isinstance(train_loss, list): for lidx, tl in enumerate(train_loss): tf.summary.scalar('training_loss_%s' % lidx, tl) train_loss = tf.add_n(train_loss) else: tf.summary.scalar('training_loss', train_loss) # Add weight decay if requested if len(model.regularizations) > 0: train_loss = loss_utils.wd_loss( regularizations=model.regularizations, loss=train_loss, wd_penalty=config.regularization_strength) train_op = loss_utils.optimizer_interpreter( loss=train_loss, lr=config.lr, optimizer=config.optimizer, constraints=config.optimizer_constraints, model=model) log.info('Built training loss function.') # Add a score for the training set train_accuracy = eval_metrics.metric_interpreter( metric=dataset_module.score_metric, # TODO: Attach to exp cnfg pred=train_scores, # TODO labels=train_labels) # Add aux scores if requested train_aux = {} if hasattr(dataset_module, 'aux_scores'): for m in dataset_module.aux_scores: train_aux[m] = eval_metrics.metric_interpreter( metric=m, pred=train_scores, labels=train_labels)[0] # TODO: Fix for multiloss # Prepare remaining tensorboard summaries if len(train_images.get_shape()) == 4: tf_fun.image_summaries(train_images, tag='Training images') if len(train_labels.get_shape()) > 2: tf_fun.image_summaries(train_labels, tag='Training_targets') tf_fun.image_summaries(train_scores, tag='Training_predictions') if isinstance(train_accuracy, list): for tidx, ta in enumerate(train_accuracy): tf.summary.scalar('training_accuracy_%s' % tidx, ta) else: tf.summary.scalar('training_accuracy', train_accuracy) if config.pr_curve: if isinstance(train_scores, list): for pidx, train_score in enumerate(train_scores): train_label = train_labels[:, pidx] pr_summary.op( tag='training_pr_%s' % pidx, predictions=tf.cast( tf.argmax(train_score, axis=-1), tf.float32), labels=tf.cast(train_label, tf.bool), display_name='training_precision_recall_%s' % pidx) else: pr_summary.op(tag='training_pr', predictions=tf.cast( tf.argmax(train_scores, axis=-1), tf.float32), labels=tf.cast(train_labels, tf.bool), display_name='training_precision_recall') log.info('Added training summaries.') # Validation model scope.reuse_variables() val_model = model_utils.model_class( mean=train_means_image, # Normalize with train data training=False, # False, output_size=dataset_module.output_size) val_scores, _ = val_model.build( # Ignore summary data=val_images, layer_structure=model_dict.layer_structure, output_structure=output_structure, log=log, tower_name='cnn') eval_graph = tf.Graph() with eval_graph.as_default(): with eval_graph.gradient_override_map({'selu': 'GradLRP'}): val_grad_images = tf.gradients( val_scores[0] * tf.cast(val_labels, tf.float32), val_images)[0] log.info('Built validation model.') # Check the shapes of labels and scores if not isinstance(train_scores, list): if len(val_scores.get_shape()) != len(val_labels.get_shape()): val_shape = val_scores.get_shape().as_list() val_label_shape = val_labels.get_shape().as_list() if len(val_shape) == 2 and len( val_label_shape) == 1 and val_shape[-1] == 1: val_labels = tf.expand_dims(val_labels, axis=-1) if len(val_shape) == 2 and len( val_label_shape) == 1 and val_shape[-1] == 1: val_scores = tf.expand_dims(val_scores, axis=-1) val_loss, _ = loss_utils.loss_interpreter( logits=val_scores, labels=val_labels, loss_type=config.loss_function, weights=config.loss_weights, dataset_module=dataset_module) # Add loss tensorboard tracking if isinstance(val_loss, list): for lidx, tl in enumerate(val_loss): tf.summary.scalar('validation_loss_%s' % lidx, tl) val_loss = tf.add_n(val_loss) else: tf.summary.scalar('validation_loss', val_loss) # Add a score for the validation set val_accuracy = eval_metrics.metric_interpreter( metric=dataset_module.score_metric, # TODO pred=val_scores, labels=val_labels) # Add aux scores if requested val_aux = {} if hasattr(dataset_module, 'aux_scores'): for m in dataset_module.aux_scores: val_aux[m] = eval_metrics.metric_interpreter( metric=m, pred=val_scores, labels=val_labels)[0] # TODO: Fix for multiloss # Prepare tensorboard summaries if len(val_images.get_shape()) == 4: tf_fun.image_summaries(val_images, tag='Validation') if len(val_labels.get_shape()) > 2: tf_fun.image_summaries(val_labels, tag='Validation_targets') tf_fun.image_summaries(val_scores, tag='Validation_predictions') if isinstance(val_accuracy, list): for vidx, va in enumerate(val_accuracy): tf.summary.scalar('validation_accuracy_%s' % vidx, va) else: tf.summary.scalar('validation_accuracy', val_accuracy) if config.pr_curve: if isinstance(val_scores, list): for pidx, val_score in enumerate(val_scores): val_label = val_labels[:, pidx] pr_summary.op( tag='validation_pr_%s' % pidx, predictions=tf.cast(tf.argmax(val_score, axis=-1), tf.float32), labels=tf.cast(val_label, tf.bool), display_name='validation_precision_recall_%s' % pidx) else: pr_summary.op(tag='validation_pr', predictions=tf.cast( tf.argmax(val_scores, axis=-1), tf.float32), labels=tf.cast(val_labels, tf.bool), display_name='validation_precision_recall') log.info('Added validation summaries.') # Set up summaries and saver saver = tf.train.Saver(tf.global_variables()) # Initialize the graph sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) # Need to initialize both of these if supplying num_epochs to inputs sess.run( tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())) # Set up exemplar threading coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) # Create dictionaries of important training and validation information train_dict = { 'train_loss': train_loss, 'train_images': train_images, 'train_labels': train_labels, 'train_op': train_op, 'train_scores': train_scores, 'train_grad_images': train_grad_images } val_dict = { 'val_loss': val_loss, 'val_images': val_images, 'val_labels': val_labels, 'val_scores': val_scores, 'val_grad_images': val_grad_images } if isinstance(train_accuracy, list): for tidx, (ta, va) in enumerate(zip(train_accuracy, val_accuracy)): train_dict['train_accuracy_%s' % tidx] = ta val_dict['val_accuracy_%s' % tidx] = va else: train_dict['train_accuracy_0'] = train_accuracy val_dict['val_accuracy_0'] = val_accuracy if load_and_evaluate_ckpt is not None: # Remove the train operation and add a ckpt pointer del train_dict['train_op'] if hasattr(dataset_module, 'aux_score'): # Attach auxillary scores to tensor dicts for m in dataset_module.aux_scores: train_dict['train_aux_%s' % m] = train_aux[m] val_dict['val_aux_%s' % m] = val_aux[m] # Start training loop checkpoint_dir = dir_list['checkpoints'] step = 0 train_losses, train_accs, train_aux, timesteps = {}, {}, {}, {} val_scores, val_aux, val_labels, val_grads = {}, {}, {}, {} train_images, val_images = {}, {} train_scores, train_labels = {}, {} train_aux_check = np.any(['aux_score' in k for k in train_dict.keys()]) val_aux_check = np.any(['aux_score' in k for k in val_dict.keys()]) # Restore model saver.restore(sess, ckpt_file) # Start evaluation try: while not coord.should_stop(): start_time = time.time() train_vars = sess.run(train_dict.values()) it_train_dict = { k: v for k, v in zip(train_dict.keys(), train_vars) } duration = time.time() - start_time train_losses[step] = it_train_dict['train_loss'] train_accs[step] = it_train_dict['train_accuracy_0'] train_images[step] = it_train_dict['train_images'] train_labels[step] = it_train_dict['train_labels'] train_scores[step] = it_train_dict['train_scores'] timesteps[step] = duration if train_aux_check: # Loop through to find aux scores it_train_aux = { itk: itv for itk, itv in it_train_dict.iteritems() if 'aux_score' in itk } train_aux[step] = it_train_aux assert not np.isnan(it_train_dict['train_loss']).any( ), 'Model diverged with loss = NaN' if step % config.validation_iters == 0: it_val_scores, it_val_labels, it_val_aux, it_val_grads, it_val_ims = [], [], [], [], [] for num_vals in range(config.num_validation_evals): # Validation accuracy as the average of n batches val_vars = sess.run(val_dict.values()) it_val_dict = { k: v for k, v in zip(val_dict.keys(), val_vars) } it_val_labels += [it_val_dict['val_labels']] it_val_scores += [it_val_dict['val_scores']] it_val_grads += [it_val_dict['val_grad_images']] it_val_ims += [it_val_dict['val_images']] if val_aux_check: iva = { itk: itv for itk, itv in it_val_dict.iteritems() if 'aux_score' in itk } it_val_aux += [iva] val_scores[step] = it_val_scores val_labels[step] = it_val_labels val_aux[step] = it_val_aux val_images[step] = it_val_grads val_grads[step] = it_val_ims # End iteration step += 1 except tf.errors.OutOfRangeError: print 'Done with evaluation for %d epochs, %d steps.' % (config.epochs, step) print 'Saved to: %s' % checkpoint_dir finally: coord.request_stop() coord.join(threads) sess.close() import ipdb ipdb.set_trace() np.savez( 'val_imgs_grads', val_images=val_images, # it_val_dict['val_images'], val_grads=val_grads, # it_val_dict['val_grad_images'], val_labels=val_labels, # it_val_dict['val_labels'], val_scores=val_scores) # it_val_dict['val_scores'][0])
def main( experiment_name, list_experiments=False, load_and_evaluate_ckpt=None, placeholder_data=None, grad_images=False, gpu_device='/gpu:0'): """Create a tensorflow worker to run experiments in your DB.""" if list_experiments: exps = db.list_experiments() print '_' * 30 print 'Initialized experiments:' print '_' * 30 for l in exps: print l.values()[0] print '_' * 30 if len(exps) == 0: print 'No experiments found.' else: print 'You can add to the DB with: '\ 'python prepare_experiments.py --experiment=%s' % \ exps[0].values()[0] return if experiment_name is None: print 'No experiment specified. Pulling one out of the DB.' experiment_name = db.get_experiment_name() # Prepare to run the model config = Config() condition_label = '%s_%s' % (experiment_name, py_utils.get_dt_stamp()) experiment_label = '%s' % (experiment_name) log = logger.get(os.path.join(config.log_dir, condition_label)) assert experiment_name is not None, 'Empty experiment name.' experiment_dict = experiments.experiments()[experiment_name]() config = add_to_config(d=experiment_dict, config=config) # Globals config.load_and_evaluate_ckpt = load_and_evaluate_ckpt if load_and_evaluate_ckpt is not None: # Remove the train operation and add a ckpt pointer from ops import evaluation config, exp_params = process_DB_exps( experiment_name=experiment_name, log=log, config=config) # Update config w/ DB params dataset_module = py_utils.import_module( model_dir=config.dataset_info, dataset=config.dataset) dataset_module = dataset_module.data_processing() # hardcoded class name train_key = [k for k in dataset_module.folds.keys() if 'train' in k] if not len(train_key): train_key = 'train' else: train_key = train_key[0] train_data, train_means_image, train_means_label = get_data_pointers( dataset=config.dataset, base_dir=config.tf_records, cv=train_key, log=log) val_key = [k for k in dataset_module.folds.keys() if 'val' in k] if not len(val_key): val_key = 'train' else: val_key = val_key[0] val_data, val_means_image, val_means_label = get_data_pointers( dataset=config.dataset, base_dir=config.tf_records, cv=val_key, log=log) # Initialize output folders dir_list = { 'checkpoints': os.path.join( config.checkpoints, condition_label), 'summaries': os.path.join( config.summaries, condition_label), 'condition_evaluations': os.path.join( config.condition_evaluations, condition_label), 'experiment_evaluations': os.path.join( # DEPRECIATED config.experiment_evaluations, experiment_label), 'visualization': os.path.join( config.visualizations, condition_label), 'weights': os.path.join( config.condition_evaluations, condition_label, 'weights') } [py_utils.make_dir(v) for v in dir_list.values()] # Prepare data loaders on the cpu if all(isinstance(i, list) for i in config.data_augmentations): if config.data_augmentations: config.data_augmentations = py_utils.flatten_list( config.data_augmentations, log) if load_and_evaluate_ckpt is not None: config.epochs = 1 config.train_shuffle = False config.val_shuffle = False with tf.device('/cpu:0'): if placeholder_data: placeholder_shape = placeholder_data['train_image_shape'] placeholder_dtype = placeholder_data['train_image_dtype'] original_train_images = tf.placeholder( dtype=placeholder_dtype, shape=placeholder_shape, name='train_images') placeholder_shape = placeholder_data['train_label_shape'] placeholder_dtype = placeholder_data['train_label_dtype'] original_train_labels = tf.placeholder( dtype=placeholder_dtype, shape=placeholder_shape, name='train_labels') placeholder_shape = placeholder_data['val_image_shape'] placeholder_dtype = placeholder_data['val_image_dtype'] original_val_images = tf.placeholder( dtype=placeholder_dtype, shape=placeholder_shape, name='val_images') placeholder_shape = placeholder_data['val_label_shape'] placeholder_dtype = placeholder_data['val_label_dtype'] original_val_labels = tf.placeholder( dtype=placeholder_dtype, shape=placeholder_shape, name='val_labels') # Apply augmentations ( train_images, train_labels ) = data_loader.placeholder_image_augmentations( images=original_train_images, model_input_image_size=dataset_module.model_input_image_size, labels=original_train_labels, data_augmentations=config.data_augmentations, batch_size=config.batch_size) ( val_images, val_labels ) = data_loader.placeholder_image_augmentations( images=original_val_images, model_input_image_size=dataset_module.model_input_image_size, labels=original_val_labels, data_augmentations=config.data_augmentations, batch_size=config.batch_size) # Store in the placeholder dict placeholder_data['train_images'] = original_train_images placeholder_data['train_labels'] = original_train_labels placeholder_data['val_images'] = original_val_images placeholder_data['val_labels'] = original_val_labels else: train_images, train_labels = data_loader.inputs( dataset=train_data, batch_size=config.batch_size, model_input_image_size=dataset_module.model_input_image_size, tf_dict=dataset_module.tf_dict, data_augmentations=config.data_augmentations, num_epochs=config.epochs, tf_reader_settings=dataset_module.tf_reader, shuffle=config.shuffle_train, resize_output=config.resize_output) if hasattr(config, 'val_augmentations'): val_augmentations = config.val_augmentations else: val_augmentations = config.data_augmentations val_images, val_labels = data_loader.inputs( dataset=val_data, batch_size=config.batch_size, model_input_image_size=dataset_module.model_input_image_size, tf_dict=dataset_module.tf_dict, data_augmentations=val_augmentations, num_epochs=config.epochs, tf_reader_settings=dataset_module.tf_reader, shuffle=config.shuffle_val, resize_output=config.resize_output) log.info('Created tfrecord dataloader tensors.') # Load model specification struct_name = config.model_struct.split(os.path.sep)[-1] try: model_dict = py_utils.import_module( dataset=struct_name, model_dir=os.path.join( 'models', 'structs', experiment_name).replace(os.path.sep, '.') ) except IOError: print 'Could not find the model structure: %s in folder %s' % ( struct_name, experiment_name) # Inject model_dict with hyperparameters if requested model_dict.layer_structure = hp_opt_utils.inject_model_with_hps( layer_structure=model_dict.layer_structure, exp_params=exp_params) # Prepare variables for the models if len(dataset_module.output_size) == 2: log.warning( 'Found > 1 dimension for your output size.' 'Converting to a scalar.') dataset_module.output_size = np.prod( dataset_module.output_size) if hasattr(model_dict, 'output_structure'): # Use specified output layer output_structure = model_dict.output_structure else: output_structure = None # Correct number of output neurons if needed if config.dataloader_override and\ 'weights' in output_structure[-1].keys(): output_neurons = output_structure[-1]['weights'][0] size_check = output_neurons != dataset_module.output_size fc_check = output_structure[-1]['layers'][0] == 'fc' if size_check and fc_check: output_structure[-1]['weights'][0] = dataset_module.output_size log.warning('Adjusted output neurons from %s to %s.' % ( output_neurons, dataset_module.output_size)) # Prepare model on GPU if not hasattr(dataset_module, 'input_normalization'): dataset_module.input_normalization = None with tf.device(gpu_device): with tf.variable_scope('cnn') as scope: # Training model model = model_utils.model_class( mean=train_means_image, training=True, output_size=dataset_module.output_size, input_normalization=dataset_module.input_normalization) train_scores, model_summary, _ = model.build( data=train_images, layer_structure=model_dict.layer_structure, output_structure=output_structure, log=log, tower_name='cnn') if grad_images: oh_dims = int(train_scores.get_shape()[-1]) target_scores = tf.one_hot(train_labels, oh_dims) * train_scores train_gradients = tf.gradients(target_scores, train_images)[0] log.info('Built training model.') log.debug( json.dumps(model_summary, indent=4), verbose=0) print_model_architecture(model_summary) # Normalize labels on GPU if needed if 'normalize_labels' in exp_params.keys(): if exp_params['normalize_labels'] == 'zscore': train_labels -= train_means_label['mean'] train_labels /= train_means_label['std'] val_labels -= train_means_label['mean'] val_labels /= train_means_label['std'] log.info('Z-scoring labels.') elif exp_params['normalize_labels'] == 'mean': train_labels -= train_means_label['mean'] val_labels -= val_means_label['mean'] log.info('Mean-centering labels.') # Check the shapes of labels and scores if not isinstance(train_scores, list): if len( train_scores.get_shape()) != len( train_labels.get_shape()): train_shape = train_scores.get_shape().as_list() label_shape = train_labels.get_shape().as_list() val_shape = val_scores.get_shape().as_list() val_label_shape = val_labels.get_shape().as_list() if len( train_shape) == 2 and len( label_shape) == 1 and train_shape[-1] == 1: train_labels = tf.expand_dims(train_labels, axis=-1) val_labels = tf.expand_dims(val_labels, axis=-1) elif len( train_shape) == 2 and len( label_shape) == 1 and train_shape[-1] == 1: train_scores = tf.expand_dims(train_scores, axis=-1) val_scores = tf.expand_dims(val_scores, axis=-1) # Prepare the loss function train_loss, _ = loss_utils.loss_interpreter( logits=train_scores, # TODO labels=train_labels, loss_type=config.loss_function, weights=config.loss_weights, dataset_module=dataset_module) # Add loss tensorboard tracking if isinstance(train_loss, list): for lidx, tl in enumerate(train_loss): tf.summary.scalar('training_loss_%s' % lidx, tl) train_loss = tf.add_n(train_loss) else: tf.summary.scalar('training_loss', train_loss) # Add weight decay if requested if len(model.regularizations) > 0: train_loss = loss_utils.wd_loss( regularizations=model.regularizations, loss=train_loss, wd_penalty=config.regularization_strength) assert config.lr is not None, 'No learning rate.' # TODO: Make a QC function if config.lr > 1: old_lr = config.lr config.lr = loss_utils.create_lr_schedule( train_batch=config.batch_size, num_training=config.lr) config.optimizer = 'momentum' log.info('Forcing momentum classifier.') else: old_lr = None train_op = loss_utils.optimizer_interpreter( loss=train_loss, lr=config.lr, optimizer=config.optimizer, constraints=config.optimizer_constraints, model=model) log.info('Built training loss function.') # Add a score for the training set train_accuracy = eval_metrics.metric_interpreter( metric=dataset_module.score_metric, # TODO: Attach to exp cnfg pred=train_scores, # TODO labels=train_labels) # Add aux scores if requested train_aux = {} if hasattr(dataset_module, 'aux_scores'): for m in dataset_module.aux_scores: train_aux[m] = eval_metrics.metric_interpreter( metric=m, pred=train_scores, labels=train_labels) # [0] # TODO: Fix for multiloss # Prepare remaining tensorboard summaries if config.tensorboard_images: if len(train_images.get_shape()) == 4: tf_fun.image_summaries(train_images, tag='Training images') if (np.asarray( train_labels.get_shape().as_list()) > 1).sum() > 2: tf_fun.image_summaries( train_labels, tag='Training_targets') tf_fun.image_summaries( train_scores, tag='Training_predictions') if isinstance(train_accuracy, list): for tidx, ta in enumerate(train_accuracy): tf.summary.scalar('training_accuracy_%s' % tidx, ta) else: tf.summary.scalar('training_accuracy', train_accuracy) if config.pr_curve: if isinstance(train_scores, list): for pidx, train_score in enumerate(train_scores): train_label = train_labels[:, pidx] pr_summary.op( tag='training_pr_%s' % pidx, predictions=tf.cast( tf.argmax( train_score, axis=-1), tf.float32), labels=tf.cast(train_label, tf.bool), display_name='training_precision_recall_%s' % pidx) else: pr_summary.op( tag='training_pr', predictions=tf.cast( tf.argmax( train_scores, axis=-1), tf.float32), labels=tf.cast(train_labels, tf.bool), display_name='training_precision_recall') log.info('Added training summaries.') with tf.variable_scope('cnn', tf.AUTO_REUSE) as scope: # Validation model scope.reuse_variables() val_model = model_utils.model_class( mean=train_means_image, # Normalize with train data training=False, output_size=dataset_module.output_size, input_normalization=dataset_module.input_normalization) val_scores, _, _ = val_model.build( # Ignore summary data=val_images, layer_structure=model_dict.layer_structure, output_structure=output_structure, log=log, tower_name='cnn') if grad_images: oh_dims = int(val_scores.get_shape()[-1]) target_scores = tf.one_hot(val_labels, oh_dims) * val_scores val_gradients = tf.gradients(target_scores, val_images)[0] log.info('Built validation model.') # Check the shapes of labels and scores val_loss, _ = loss_utils.loss_interpreter( logits=val_scores, labels=val_labels, loss_type=config.loss_function, weights=config.loss_weights, dataset_module=dataset_module) # Add loss tensorboard tracking if isinstance(val_loss, list): for lidx, tl in enumerate(val_loss): tf.summary.scalar('validation_loss_%s' % lidx, tl) val_loss = tf.add_n(val_loss) else: tf.summary.scalar('validation_loss', val_loss) # Add a score for the validation set val_accuracy = eval_metrics.metric_interpreter( metric=dataset_module.score_metric, # TODO pred=val_scores, labels=val_labels) # Add aux scores if requested val_aux = {} if hasattr(dataset_module, 'aux_scores'): for m in dataset_module.aux_scores: val_aux[m] = eval_metrics.metric_interpreter( metric=m, pred=val_scores, labels=val_labels) # [0] # TODO: Fix for multiloss # Prepare tensorboard summaries if config.tensorboard_images: if len(val_images.get_shape()) == 4: tf_fun.image_summaries( val_images, tag='Validation') if (np.asarray( val_labels.get_shape().as_list()) > 1).sum() > 2: tf_fun.image_summaries( val_labels, tag='Validation_targets') tf_fun.image_summaries( val_scores, tag='Validation_predictions') if isinstance(val_accuracy, list): for vidx, va in enumerate(val_accuracy): tf.summary.scalar('validation_accuracy_%s' % vidx, va) else: tf.summary.scalar('validation_accuracy', val_accuracy) if config.pr_curve: if isinstance(val_scores, list): for pidx, val_score in enumerate(val_scores): val_label = val_labels[:, pidx] pr_summary.op( tag='validation_pr_%s' % pidx, predictions=tf.cast( tf.argmax( val_score, axis=-1), tf.float32), labels=tf.cast(val_label, tf.bool), display_name='validation_precision_recall_%s' % pidx) else: pr_summary.op( tag='validation_pr', predictions=tf.cast( tf.argmax( val_scores, axis=-1), tf.float32), labels=tf.cast(val_labels, tf.bool), display_name='validation_precision_recall') log.info('Added validation summaries.') # Set up summaries and saver if not hasattr(config, 'max_to_keep'): config.max_to_keep = None saver = tf.train.Saver( var_list=tf.global_variables(), max_to_keep=config.max_to_keep) summary_op = tf.summary.merge_all() # Initialize the graph sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) # Need to initialize both of these if supplying num_epochs to inputs sess.run( tf.group( tf.global_variables_initializer(), tf.local_variables_initializer()) ) summary_writer = tf.summary.FileWriter(dir_list['summaries'], sess.graph) # Set up exemplar threading if placeholder_data: coord, threads = None, None else: coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) # Create dictionaries of important training and validation information train_dict = { 'train_loss': train_loss, 'train_images': train_images, 'train_labels': train_labels, 'train_op': train_op, 'train_scores': train_scores } val_dict = { 'val_loss': val_loss, 'val_images': val_images, 'val_labels': val_labels, 'val_scores': val_scores, } if grad_images: train_dict['train_gradients'] = train_gradients val_dict['val_gradients'] = val_gradients if isinstance(train_accuracy, list): for tidx, (ta, va) in enumerate(zip(train_accuracy, val_accuracy)): train_dict['train_accuracy_%s' % tidx] = ta val_dict['val_accuracy_%s' % tidx] = va else: train_dict['train_accuracy_0'] = train_accuracy val_dict['val_accuracy_0'] = val_accuracy if load_and_evaluate_ckpt is not None: # Remove the train operation and add a ckpt pointer del train_dict['train_op'] if hasattr(dataset_module, 'aux_score'): # Attach auxillary scores to tensor dicts for m in dataset_module.aux_scores: train_dict['train_aux_%s' % m] = train_aux[m] val_dict['val_aux_%s' % m] = val_aux[m] # Start training loop if old_lr is not None: config.lr = old_lr np.save( os.path.join( dir_list['condition_evaluations'], 'training_config_file'), config) log.info('Starting training') if load_and_evaluate_ckpt is not None: return evaluation.evaluation_loop( config=config, db=db, coord=coord, sess=sess, summary_op=summary_op, summary_writer=summary_writer, saver=saver, threads=threads, summary_dir=dir_list['summaries'], checkpoint_dir=dir_list['checkpoints'], weight_dir=dir_list['weights'], train_dict=train_dict, val_dict=val_dict, train_model=model, val_model=val_model, exp_params=exp_params, placeholder_data=placeholder_data) else: output_dict = training.training_loop( config=config, db=db, coord=coord, sess=sess, summary_op=summary_op, summary_writer=summary_writer, saver=saver, threads=threads, summary_dir=dir_list['summaries'], checkpoint_dir=dir_list['checkpoints'], weight_dir=dir_list['weights'], train_dict=train_dict, val_dict=val_dict, train_model=model, val_model=val_model, exp_params=exp_params) log.info('Finished training.') model_name = config.model_struct.replace('/', '_') if output_dict is not None: py_utils.save_npys( data=output_dict, model_name=model_name, output_string=dir_list['experiment_evaluations'])
def train_vgg16(device): os.environ['CUDA_VISIBLE_DEVICES'] = str(device) config = vggConfig() train_data = config.training_images train_meta = np.load(config.training_meta) print 'Using train tfrecords: %s | %s image/heatmap combos' % ( [train_data], len(train_meta['labels'])) validation_data = config.validation_images val_meta = np.load(config.validation_meta) print 'Using validation tfrecords: %s | %s images' % ( validation_data, len(val_meta['labels'])) # Make output directories if they do not exist dt_stamp = 'grayscale_' +\ str(config.initial_learning_rate)[2:] + '_' + str( len(train_meta['labels'])) + '_' + re.split( '\.', str(datetime.now()))[0].\ replace(' ', '_').replace(':', '_').replace('-', '_') config.train_checkpoint = os.path.join(config.train_checkpoint, dt_stamp) # timestamp this run out_dir = os.path.join(config.results, dt_stamp) dir_list = [ config.train_checkpoint, config.train_summaries, config.results, out_dir ] [make_dir(d) for d in dir_list] print '-' * 60 print('Training model:' + dt_stamp) print '-' * 60 # Prepare data on CPU train_images, train_labels = inputs(train_data, config.train_batch, config.image_size, config.model_image_size[:2], train=config.data_augmentations, num_epochs=config.epochs, return_heatmaps=False, is_grayscale=True) val_images, val_labels = inputs(validation_data, config.validation_batch, config.image_size, config.model_image_size[:2], num_epochs=None, return_heatmaps=False, is_grayscale=True) step = get_or_create_global_step() step_op = tf.assign(step, step + 1) # Prepare model on GPU with tf.variable_scope('cnn') as scope: vgg = vgg16.model_struct() train_mode = tf.get_variable(name='training', initializer=True) vgg.build(train_images, is_training=True, is_grayscale=True, batchnorm=True) # Prepare the loss function loss = softmax_loss(logits=vgg.fc8, labels=train_labels) # Add weight decay of fc6/7/8 if config.wd_penalty is not None: loss = wd_loss(loss=loss, trainables=tf.trainable_variables(), config=config) lr = tf.train.exponential_decay( learning_rate=config.initial_learning_rate, global_step=step_op, decay_steps=config.decay_steps, decay_rate=config.learning_rate_decay_factor, staircase=True) if config.optimizer == "adam": update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train_op = tf.train.AdamOptimizer(lr).minimize(loss) elif config.optimizer == "sgd": update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train_op = tf.train.GradientDescentOptimizer(lr).minimize(loss) elif config.optimizer == "nestrov": update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train_op = tf.train.MomentumOptimizer( lr, config.momentum, use_nesterov=True).minimize(loss) else: raise Exception( "Not known optimizer! options are adam, sgd or nestrov") train_accuracy = class_accuracy(vgg.prob, train_labels) # training accuracy # Add summaries for debugging tf.summary.image('train images', train_images) tf.summary.image('validation images', val_images) tf.summary.scalar("loss", loss) tf.summary.scalar("training accuracy", train_accuracy) # Setup validation op scope.reuse_variables() # Validation graph is the same as training except no batchnorm val_vgg = vgg16.model_struct() val_vgg.build(val_images, is_training=False, is_grayscale=True, batchnorm=True) # Calculate validation accuracy val_accuracy = class_accuracy(val_vgg.prob, val_labels) tf.summary.scalar("validation accuracy", val_accuracy) # Set up summaries and saver saver = tf.train.Saver(tf.global_variables(), max_to_keep=config.keep_checkpoints) restorer = tf.train.Saver() summary_op = tf.summary.merge_all() # Initialize the graph sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) # Need to initialize both of these if supplying num_epochs to inputs sess.run( tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())) restorer.restore( sess, '/media/data_cifs/andreas/vgg_train/checkpoints/grayscale_-05_1283163_2017_09_09_19_20_09/model_228000.ckpt-228000' ) summary_dir = os.path.join(config.train_summaries, dt_stamp) summary_writer = tf.summary.FileWriter(summary_dir, sess.graph) # Set up exemplar threading coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) # Start training loop np.save(os.path.join(out_dir, 'training_config_file'), config) training_loop(config, coord, sess, train_op, step_op, summary_op, summary_writer, loss, saver, threads, out_dir, summary_dir, validation_data, val_accuracy, train_accuracy, lr)
def test_vgg16(): dbc = config.config() validation_pointer = os.path.join( dbc.packaged_data_path, '%s_%s.%s' % ('validation', dbc.packaged_data_file, dbc.output_format)) # Prepare data on CPU with tf.device('/cpu:0'): val_images, val_labels, val_files = inputs( tfrecord_file=validation_pointer, batch_size=dbc.validation_batch, im_size=dbc.validation_image_size, model_input_shape=dbc.model_image_size[:2], num_epochs=1, data_augmentations=dbc.validation_augmentations, shuffle_batch=True) # Prepare pretrained model on GPU with tf.device('/gpu:0'): with tf.variable_scope('cnn'): cnn = vgg16.Vgg16() validation_mode = tf.Variable(False, name='training') cnn.build(val_images, output_shape=1000, train_mode=validation_mode) sample_layer = cnn['fc7'] accs = class_accuracy(cnn.prob, val_labels) saver = tf.train.Saver(tf.all_variables(), max_to_keep=10) # Initialize the graph sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) sess.run( tf.group(tf.initialize_all_variables(), tf.initialize_local_variables())) # Set up exemplar threading coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) saver.restore(sess, dbc.model_types['vgg16'][0]) # Start training loop results = {'accs': [], 'preds': [], 'labs': [], 'files': []} np_path = os.path.join(dbc.checkpoint_directory, 'validation_results') step = 0 scores, labels = [], [] try: print 'Testing model' while not coord.should_stop(): start_time = time.time() score, lab, f, probs = sess.run( [sample_layer, val_labels, val_files, cnn['prob']]) import ipdb ipdb.set_trace() print acc except tf.errors.OutOfRangeError: print 'Done testing.' finally: np.savez(np_path, **results) print 'Saved to: %s' % np_path coord.request_stop() coord.join(threads) sess.close() print '%.4f%% correct' % np.mean(results['accs']) if simulate_subjects: sim_subs = [] print 'Simulating subjects' scores = np.concatenate(scores) labels = np.concatenate(results['labs']) for sub in tqdm(range(simulate_subjects)): it_results = {'accs': [], 'preds': [], 'labs': [], 'files': []} neuron_drop = np.random.rand(scores.shape[1]) > .95 it_scores = np.copy(scores) it_scores[:, neuron_drop] = 0 pred = svc.predict(it_scores) acc = np.mean(pred == labels) it_results['accs'] += [acc] it_results['preds'] += [pred] it_results['labs'] += [labels] it_results['files'] += [np.concatenate(results['files'])] sim_subs += [it_results] np.save(np_path + '_sim_subs', sim_subs)
def model_builder( params, config, model_spec, gpu_device, cpu_device, placeholders=False, tensorboard_images=False): """Standard model building routines.""" config = py_utils.add_to_config( d=params, config=config) exp_label = '%s_%s' % (params['exp_name'], py_utils.get_dt_stamp()) directories = py_utils.prepare_directories(config, exp_label) dataset_module = py_utils.import_module( model_dir=config.dataset_info, dataset=config.dataset) dataset_module = dataset_module.data_processing() # hardcoded class name train_key = [k for k in dataset_module.folds.keys() if 'train' in k] if not len(train_key): train_key = 'train' else: train_key = train_key[0] ( train_data, train_means_image, train_means_label) = py_utils.get_data_pointers( dataset=config.dataset, base_dir=config.tf_records, cv=train_key) val_key = [k for k in dataset_module.folds.keys() if 'val' in k] if not len(val_key): val_key = 'train' else: val_key = val_key[0] if hasattr(config, 'val_dataset'): val_dataset = config.val_dataset else: val_dataset = config.dataset val_data, val_means_image, val_means_label = py_utils.get_data_pointers( dataset=val_dataset, base_dir=config.tf_records, cv=val_key) # Create data tensors with tf.device(cpu_device): if placeholders: ( train_images, train_labels, val_images, val_labels) = get_placeholders(dataset_module, config) placeholders = dataset_module.get_data() else: train_images, train_labels = data_loader.inputs( dataset=train_data, batch_size=config.batch_size, model_input_image_size=dataset_module.model_input_image_size, tf_dict=dataset_module.tf_dict, data_augmentations=config.data_augmentations, num_epochs=config.epochs, tf_reader_settings=dataset_module.tf_reader, shuffle=config.shuffle_train) val_images, val_labels = data_loader.inputs( dataset=val_data, batch_size=config.batch_size, model_input_image_size=dataset_module.model_input_image_size, tf_dict=dataset_module.tf_dict, data_augmentations=config.val_augmentations, num_epochs=config.epochs, tf_reader_settings=dataset_module.tf_reader, shuffle=config.shuffle_val) # Build training and val models with tf.device(gpu_device): train_logits, train_hgru_act = model_spec( data_tensor=train_images, reuse=None, training=True) val_logits, val_hgru_act = model_spec( data_tensor=val_images, reuse=tf.AUTO_REUSE, training=False) # Derive loss loss_type = None if hasattr(config, 'loss_type'): loss_type = config.loss_type train_loss = losses.derive_loss( labels=train_labels, logits=train_logits, loss_type=loss_type) val_loss = losses.derive_loss( labels=val_labels, logits=val_logits, loss_type=loss_type) if hasattr(config, 'metric_type'): metric_type = config.metric_type else: metric_type = 'accuracy' if metric_type == 'pearson': train_accuracy = metrics.pearson_score( labels=train_labels, pred=train_logits, REDUCTION=tf.reduce_mean) val_accuracy = metrics.pearson_score( labels=val_labels, pred=val_logits, REDUCTION=tf.reduce_mean) else: train_accuracy = metrics.class_accuracy( labels=train_labels, logits=train_logits) val_accuracy = metrics.class_accuracy( labels=val_labels, logits=val_logits) tf.summary.scalar('train_accuracy', train_accuracy) tf.summary.scalar('val_accuracy', val_accuracy) if tensorboard_images: tf.summary.image('train_images', train_images) tf.summary.image('val_images', val_images) # Build optimizer train_op = optimizers.get_optimizer( train_loss, config['lr'], config['optimizer']) # Initialize tf variables saver = tf.train.Saver( var_list=tf.global_variables()) summary_op = tf.summary.merge_all() sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) sess.run( tf.group( tf.global_variables_initializer(), tf.local_variables_initializer())) summary_writer = tf.summary.FileWriter( directories['summaries'], sess.graph) if not placeholders: coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) else: coord, threads = None, None # Create dictionaries of important training and validation information train_dict = { 'train_loss': train_loss, 'train_images': train_images, 'train_labels': train_labels, 'train_op': train_op, 'train_accuracy': train_accuracy } if isinstance(train_hgru_act, dict): for k, v in train_hgru_act.iteritems(): train_dict[k] = v else: train_dict['activity'] = train_hgru_act val_dict = { 'val_loss': val_loss, 'val_images': val_images, 'val_labels': val_labels, 'val_accuracy': val_accuracy, } if isinstance(val_hgru_act, dict): for k, v in val_hgru_act.iteritems(): val_dict[k] = v else: val_dict['activity'] = val_hgru_act # Count parameters num_params = np.sum( [np.prod(x.get_shape().as_list()) for x in tf.trainable_variables()]) print 'Model has approximately %s trainable params.' % num_params # Create datastructure for saving data ds = data_structure.data( batch_size=config.batch_size, validation_iters=config.validation_iters, num_validation_evals=config.num_validation_evals, shuffle_val=config.shuffle_val, lr=config.lr, loss_function=config.loss_function, optimizer=config.optimizer, model_name=config.model_name, dataset=config.dataset, num_params=num_params, output_directory=config.results) # Start training loop training.training_loop( config=config, coord=coord, sess=sess, summary_op=summary_op, summary_writer=summary_writer, saver=saver, threads=threads, directories=directories, train_dict=train_dict, val_dict=val_dict, exp_label=exp_label, data_structure=ds, placeholders=placeholders)
def build_model(exp_params, config, log, dt_string, gpu_device, cpu_device, use_db=True, add_config=None, placeholders=False, checkpoint=None, test=False, map_out=None, num_batches=None, tensorboard_images=False): """Standard model building routines.""" config = py_utils.add_to_config(d=exp_params, config=config) if not hasattr(config, 'force_path'): config.force_path = False exp_label = '%s_%s_%s' % (exp_params['model'], exp_params['experiment'], py_utils.get_dt_stamp()) directories = py_utils.prepare_directories(config, exp_label) dataset_module = py_utils.import_module(pre_path=config.dataset_classes, module=config.train_dataset) train_dataset_module = dataset_module.data_processing() if not config.force_path: (train_data, _, _) = py_utils.get_data_pointers( dataset=train_dataset_module.output_name, base_dir=config.tf_records, local_dir=config.local_tf_records, cv='train') else: train_data = train_dataset_module.train_path dataset_module = py_utils.import_module(pre_path=config.dataset_classes, module=config.val_dataset) val_dataset_module = dataset_module.data_processing() if not config.force_path: val_data, _, _ = py_utils.get_data_pointers( dataset=val_dataset_module.output_name, base_dir=config.tf_records, local_dir=config.local_tf_records, cv='val') else: val_data = train_dataset_module.val_path # val_means_image, val_means_label = None, None # Create data tensors if hasattr(train_dataset_module, 'aux_loss'): train_aux_loss = train_dataset_module.aux_loss else: train_aux_loss = None with tf.device(cpu_device): if placeholders and not test: # Train with placeholders (pl_train_images, pl_train_labels, pl_val_images, pl_val_labels, train_images, train_labels, val_images, val_labels) = get_placeholders(train_dataset=train_dataset_module, val_dataset=val_dataset_module, config=config) train_module_data = train_dataset_module.get_data() val_module_data = val_dataset_module.get_data() placeholders = { 'train': { 'images': train_module_data[0]['train'], 'labels': train_module_data[1]['train'] }, 'val': { 'images': val_module_data[0]['val'], 'labels': val_module_data[1]['val'] }, } train_aux, val_aux = None, None elif placeholders and test: test_dataset_module = train_dataset_module # Test with placeholders (pl_test_images, pl_test_labels, test_images, test_labels) = get_placeholders_test( test_dataset=test_dataset_module, config=config) test_module_data = test_dataset_module.get_data() placeholders = { 'test': { 'images': test_module_data[0]['test'], 'labels': test_module_data[1]['test'] }, } train_aux, val_aux = None, None else: train_images, train_labels, train_aux = data_loader.inputs( dataset=train_data, batch_size=config.train_batch_size, model_input_image_size=train_dataset_module. model_input_image_size, tf_dict=train_dataset_module.tf_dict, data_augmentations=config.train_augmentations, num_epochs=config.epochs, aux=train_aux_loss, tf_reader_settings=train_dataset_module.tf_reader, shuffle=config.shuffle_train) if hasattr(val_dataset_module, 'val_model_input_image_size'): val_dataset_module.model_input_image_size = val_dataset_module.val_model_input_image_size val_images, val_labels, val_aux = data_loader.inputs( dataset=val_data, batch_size=config.val_batch_size, model_input_image_size=val_dataset_module. model_input_image_size, tf_dict=val_dataset_module.tf_dict, data_augmentations=config.val_augmentations, num_epochs=None, tf_reader_settings=val_dataset_module.tf_reader, shuffle=config.shuffle_val) # Build training and val models model_spec = py_utils.import_module(module=config.model, pre_path=config.model_classes) if hasattr(train_dataset_module, 'force_output_size'): train_dataset_module.output_size = train_dataset_module.force_output_size if hasattr(val_dataset_module, 'force_output_size'): val_dataset_module.output_size = val_dataset_module.force_output_size if hasattr(config, 'loss_function'): train_loss_function = config.loss_function val_loss_function = config.loss_function else: train_loss_function = config.train_loss_function val_loss_function = config.val_loss_function # Route test vs train/val h_check = [ x for x in tf.trainable_variables() if 'homunculus' in x.name or 'humonculus' in x.name ] if not hasattr(config, 'default_restore'): config.default_restore = False if test: assert len(gpu_device) == 1, 'Testing only works with 1 gpu.' gpu_device = gpu_device[0] with tf.device(gpu_device): if not placeholders: test_images = val_images test_labels = val_labels test_dataset_module = val_dataset_module test_logits, test_vars = model_spec.build_model( data_tensor=test_images, reuse=None, training=False, output_shape=test_dataset_module.output_size) if test_logits.dtype is not tf.float32: test_logits = tf.cast(test_logits, tf.float32) # Derive loss if not hasattr(config, 'test_loss_function'): test_loss_function = val_loss_function else: test_loss_function = config.test_loss_function test_loss = losses.derive_loss(labels=test_labels, logits=test_logits, loss_type=test_loss_function) # Derive score test_score = losses.derive_score(labels=test_labels, logits=test_logits, loss_type=test_loss_function, score_type=config.score_function) # Initialize model (sess, saver, summary_op, summary_writer, coord, threads, restore_saver) = initialize_tf(config=config, placeholders=placeholders, ckpt=checkpoint, default_restore=config.default_restore, directories=directories) if placeholders: proc_images = test_images proc_labels = test_labels test_images = pl_test_images test_labels = pl_test_labels _, H, W, _ = test_vars['model_output_y'].shape jacobian = tf.gradients(test_logits, test_vars['model_output_x'])[ 0] # g.batch_jacobian(test_vars['model_output_x'], test_images) test_dict = { 'test_loss': test_loss, 'test_score': test_score, 'test_images': test_images, 'test_labels': test_labels, 'test_logits': test_logits, 'test_jacobian': jacobian } if placeholders: test_dict['test_proc_images'] = proc_images test_dict['test_proc_labels'] = proc_labels if len(h_check): test_dict['homunculus'] = h_check[0] if isinstance(test_vars, dict): for k, v in test_vars.iteritems(): test_dict[k] = v else: test_dict['activity'] = test_vars else: train_losses, val_losses, tower_grads, norm_updates = [], [], [], [] train_scores, val_scores = [], [] train_image_list, train_label_list = [], [] val_image_list, val_label_list = [], [] train_reuse = None if not hasattr(config, 'lr_schedule'): config.lr_schedule = None if hasattr(config, 'loss_function'): train_loss_function = config.loss_function val_loss_function = config.loss_function else: train_loss_function = config.train_loss_function val_loss_function = config.val_loss_function # Prepare loop if not placeholders: train_batch_queue = tf_fun.get_batch_queues(images=train_images, labels=train_labels, gpu_device=gpu_device) val_batch_queue = tf_fun.get_batch_queues(images=val_images, labels=val_labels, gpu_device=gpu_device) config.lr = optimizers.get_lr_schedule(lr=config.lr, lr_schedule=config.lr_schedule) opt = optimizers.get_optimizers(optimizer=config.optimizer, lr=config.lr, dtype=train_images.dtype) with tf.device(cpu_device): global_step = tf.train.get_or_create_global_step() for i, gpu in enumerate(gpu_device): # rs = tf.AUTO_REUSE if i > 0 else None with tf.device(gpu): with tf.name_scope('tower_%d' % i) as scope: # Prepare tower data if placeholders: # Multi-gpu: will have to split # train_images per gpu by hand train_image_batch = train_images val_image_batch = val_images train_label_batch = train_labels val_label_batch = val_labels else: (train_image_batch, train_label_batch) = train_batch_queue.dequeue() (val_image_batch, val_label_batch) = val_batch_queue.dequeue() train_image_list += [train_image_batch] train_label_list += [train_label_batch] val_image_list += [val_image_batch] val_label_list += [val_label_batch] # Build models train_logits, train_vars = model_spec.build_model( data_tensor=train_image_batch, reuse=train_reuse, training=True, output_shape=train_dataset_module.output_size) num_training_vars = len(tf.trainable_variables()) val_logits, val_vars = model_spec.build_model( data_tensor=val_image_batch, reuse=True, training=False, output_shape=val_dataset_module.output_size) num_validation_vars = len(tf.trainable_variables()) assert num_training_vars == num_validation_vars, \ 'Found a different # of train and val variables.' train_reuse = True # Derive losses if train_logits.dtype is not tf.float32: train_logits = tf.cast(train_logits, tf.float32) if val_logits.dtype is not tf.float32: val_logits = tf.cast(val_logits, tf.float32) train_loss = losses.derive_loss( labels=train_label_batch, logits=train_logits, images=train_image_batch, loss_type=train_loss_function) val_loss = losses.derive_loss( labels=val_label_batch, logits=val_logits, images=val_image_batch, loss_type=val_loss_function) # Derive score train_score = losses.derive_score( labels=train_labels, logits=train_logits, loss_type=train_loss_function, score_type=config.score_function) val_score = losses.derive_score( labels=val_labels, logits=val_logits, loss_type=val_loss_function, score_type=config.score_function) # Add aux losses if requested if hasattr(model_spec, 'weight_decay'): wd = (model_spec.weight_decay() * tf.add_n([ tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'batch_normalization' not in v.name and 'horizontal' not in v.name and 'mu' not in v.name and 'beta' not in v.name and 'intercept' not in v.name ])) tf.summary.scalar('weight_decay', wd) train_loss += wd if hasattr(model_spec, 'bsds_weight_decay'): wd = (model_spec.bsds_weight_decay()['l2'] * tf.add_n([ tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'horizontal' not in v.name and 'norm' not in v.name ])) tf.summary.scalar('weight_decay_readout', wd) train_loss += wd wd = (model_spec.bsds_weight_decay()['l1'] * tf.add_n([ tf.reduce_sum(tf.abs(v)) for v in tf.trainable_variables() if 'horizontal' in v.name ])) tf.summary.scalar('weight_decay_horizontal', wd) train_loss += wd if hasattr(model_spec, 'orthogonal'): weights = [ v for v in tf.trainable_variables() if 'horizontal' in v.name ] assert len(weights) is not None, \ 'No horizontal weights for laplace.' wd = model_spec.orthogonal() * tf.add_n( [tf_fun.orthogonal(w) for w in weights]) tf.summary.scalar('weight_decay', wd) train_loss += wd if hasattr(model_spec, 'laplace'): weights = [ v for v in tf.trainable_variables() if 'horizontal' in v.name ] assert len(weights) is not None, \ 'No horizontal weights for laplace.' wd = model_spec.laplace() * tf.add_n( [tf_fun.laplace(w) for w in weights]) tf.summary.scalar('weight_decay', wd) train_loss += wd # Derive auxilary losses if hasattr(config, 'aux_loss'): aux_loss_type, scale = config.aux_loss.items()[0] for k, v in train_vars.iteritems(): # if k in train_dataset_module.aux_loss.keys(): # ( # aux_loss_type, # scale # ) = train_dataset_module.aux_loss[k] train_loss += (losses.derive_loss( labels=train_labels, logits=v, loss_type=aux_loss_type) * scale) # Gather everything train_losses += [train_loss] val_losses += [val_loss] train_scores += [train_score] val_scores += [val_score] # Compute and store gradients with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE): grads = opt.compute_gradients(train_loss) optimizers.check_grads(grads) tower_grads += [grads] # Gather normalization variables norm_updates += [ tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope=scope) ] # Recompute and optimize gradients grads = optimizers.average_gradients(tower_grads) if hasattr(config, 'clip_gradients') and config.clip_gradients: grads = optimizers.apply_grad_clip(grads, config.clip_gradients) op_vars = [] if hasattr(config, 'exclusion_lr') and hasattr(config, 'exclusion_scope'): grads_0 = [ x for x in grads if config.exclusion_scope not in x[1].name ] grads_1 = [x for x in grads if config.exclusion_scope in x[1].name] op_vars_0 = optimizers.apply_gradients(opt=opt, grads=grads_0, global_step=global_step) opt_1 = optimizers.get_optimizers(optimizer=config.optimizer, lr=config.exclusion_lr, dtype=train_images.dtype) op_vars_1 = optimizers.apply_gradients(opt=opt_1, grads=grads_1, global_step=global_step) op_vars += [op_vars_0] op_vars += [op_vars_1] else: op_vars += [ optimizers.apply_gradients(opt=opt, grads=grads, global_step=global_step) ] if not hasattr(config, 'variable_moving_average'): config.variable_moving_average = False if config.variable_moving_average: variable_averages = tf.train.ExponentialMovingAverage( config.variable_moving_average, global_step) op_vars += [variable_averages.apply(tf.trainable_variables())] if len(norm_updates): op_vars += [tf.group(*norm_updates)] train_op = tf.group(*op_vars) # Summarize losses and scores train_loss = tf.reduce_mean(train_losses) val_loss = tf.reduce_mean(val_losses) train_score = tf.reduce_mean(train_scores) val_score = tf.reduce_mean(val_scores) if len(train_image_list) > 1: train_image_list = tf.stack(train_image_list, axis=0) train_label_list = tf.stack(train_label_list, axis=0) else: train_image_list = train_image_list[0] train_label_list = train_label_list[0] if len(val_image_list) > 1: val_image_list = tf.stack(val_image_list, axis=0) val_label_list = tf.stack(val_label_list, axis=0) else: val_image_list = val_image_list[0] val_label_list = val_label_list[0] tf.summary.scalar('train_loss', train_loss) tf.summary.scalar('val_loss', val_loss) if tensorboard_images: tf.summary.image('train_images', train_images) tf.summary.image('val_images', val_images) # Initialize model (sess, saver, summary_op, summary_writer, coord, threads, restore_saver) = initialize_tf(config=config, placeholders=placeholders, ckpt=checkpoint, default_restore=config.default_restore, directories=directories) # Create dictionaries of important training and validation information if placeholders: proc_train_images = train_images proc_train_labels = train_labels proc_val_images = val_images proc_val_labels = val_labels train_images = pl_train_images train_labels = pl_train_labels val_images = pl_val_images val_labels = pl_val_labels train_dict = { 'train_loss': train_loss, 'train_score': train_score, 'train_images': train_image_list, 'train_labels': train_label_list, 'train_logits': train_logits, 'train_op': train_op } if placeholders: train_dict['proc_train_images'] = proc_train_images train_dict['proc_train_labels'] = proc_train_labels if train_aux is not None: train_dict['train_aux'] = train_aux if tf.contrib.framework.is_tensor(config.lr): train_dict['lr'] = config.lr else: train_dict['lr'] = tf.constant(config.lr) if isinstance(train_vars, dict): for k, v in train_vars.iteritems(): train_dict[k] = v else: train_dict['activity'] = train_vars if hasattr(config, 'save_gradients') and config.save_gradients: grad = tf.gradients(train_logits, train_images)[0] if grad is not None: train_dict['gradients'] = grad else: log.warning('Could not calculate val gradients.') val_dict = { 'val_loss': val_loss, 'val_score': val_score, 'val_images': val_image_list, 'val_logits': val_logits, 'val_labels': val_label_list, } if placeholders: val_dict['proc_val_images'] = proc_val_images val_dict['proc_val_labels'] = proc_val_labels if val_aux is not None: val_dict['aux'] = val_aux if isinstance(val_vars, dict): for k, v in val_vars.iteritems(): val_dict[k] = v else: val_dict['activity'] = val_vars if hasattr(config, 'save_gradients') and config.save_gradients: grad = tf.gradients(val_logits, val_images)[0] if grad is not None: val_dict['gradients'] = grad else: log.warning('Could not calculate val gradients.') if len(h_check): val_dict['homunculus'] = h_check[0] # Add optional info to the config if add_config is not None: extra_list = add_config.split(',') for eidx, extra in enumerate(extra_list): setattr(config, 'extra_%s' % eidx, extra) # Count parameters num_params = tf_fun.count_parameters(var_list=tf.trainable_variables()) print 'Model has approximately %s trainable params.' % num_params if test: return training.test_loop(log=log, config=config, sess=sess, summary_op=summary_op, summary_writer=summary_writer, saver=saver, restore_saver=restore_saver, directories=directories, test_dict=test_dict, exp_label=exp_label, num_params=num_params, checkpoint=checkpoint, num_batches=num_batches, save_weights=config.save_weights, save_checkpoints=config.save_checkpoints, save_activities=config.save_activities, save_gradients=config.save_gradients, map_out=map_out, placeholders=placeholders) else: # Start training loop training.training_loop(log=log, config=config, coord=coord, sess=sess, summary_op=summary_op, summary_writer=summary_writer, saver=saver, restore_saver=restore_saver, threads=threads, directories=directories, train_dict=train_dict, val_dict=val_dict, exp_label=exp_label, num_params=num_params, checkpoint=checkpoint, use_db=use_db, save_weights=config.save_weights, save_checkpoints=config.save_checkpoints, save_activities=config.save_activities, save_gradients=config.save_gradients, placeholders=placeholders)
def train_classifier_on_model(train_pointer, model_type, model_weights, selected_layer, config): # Make output directories if they do not exist dt_stamp = '%s_%s_%s_%s' % (model_type, selected_layer, str( config.lr)[2:], re.split('\.', str(datetime.now()))[0].replace( ' ', '_').replace(':', '_').replace('-', '_')) config.checkpoint_directory = os.path.join(config.checkpoint_directory, dt_stamp) # timestamp this run dir_list = [config.checkpoint_directory] [utilities.make_dir(d) for d in dir_list] print '-' * 60 print 'Training %s over a %s. Saving to %s' % (config.classifier, model_type, dt_stamp) print '-' * 60 dcn_flavor = import_cnn(model_type) # Prepare data on CPU with tf.device('/cpu:0'): train_images, train_labels, train_files = inputs( train_pointer, config.train_batch, config.train_image_size, config.model_image_size[:2], num_epochs=config.epochs, shuffle_batch=True) # Prepare pretrained model on GPU with tf.device('/gpu:0'): with tf.variable_scope('cnn'): if 'ckpt' in model_weights: cnn = dcn_flavor.model() else: cnn = dcn_flavor.model(weight_path=model_weights) cnn.build(train_images) sample_layer = cnn[selected_layer] weights, yhat, classifier, class_loss = tf_loss.choose_classifier( sample_layer=sample_layer, y=train_labels, config=config) saver = tf.train.Saver(tf.all_variables(), max_to_keep=10) # Initialize the graph sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) # Need to initialize both of these if supplying num_epochs to inputs sess.run( tf.group(tf.initialize_all_variables(), tf.initialize_local_variables())) # Set up exemplar threading coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) # Start training loop np.save(os.path.join(config.checkpoint_directory, 'training_config_file'), config) step, losses = 0, [] if 'ckpt' in model_weights: saver.restore(sess, model_weights) try: print 'Training model' while not coord.should_stop(): start_time = time.time() _, loss_value, labels = sess.run( [classifier, class_loss, train_labels]) losses.append(loss_value) duration = time.time() - start_time assert not np.isnan(loss_value), 'Model diverged with loss = NaN' # End iteration print_status(step, loss_value, config, duration, 0, '') step += 1 except tf.errors.OutOfRangeError: print 'Done training for %d epochs, %d steps.' % (config.epochs, step) print 'Saved to: %s' % config.checkpoint_directory finally: ckpt_path = os.path.join(config.checkpoint_directory, 'model_' + str(step) + '.ckpt') saver.save(sess, ckpt_path, global_step=step) print 'Saved checkpoint to: %s' % ckpt_path coord.request_stop() coord.join(threads) sess.close() # Return the final checkpoint for testing return ckpt_path, config.checkpoint_directory
def plot_fits( experiment='760_cells_2017_11_04_16_29_09', query_db=False, num_models=3, template_exp='ALLEN_selected_cells_1', process_pnodes=False, allen_dir='/home/drew/Documents/Allen_Brain_Observatory', output_dir='tests/ALLEN_files', stimulus_type='tfrecord', top_n=1, grad='lrp', target_layer='conv1_1', # conv1_1, sep_conv1_1, dog1_1 target_model='conv2d'): # conv2d, sep_conv2d, dog """Plot fits across the RF. experiment: Name of Allen experiment you're plotting. query_db: Use data from DB versus data in Numpys. num_models: The number of architectures you're testing. template_exp: The name of the contextual_circuit model template used.""" sys.path.append(allen_dir) from allen_config import Allen_Brain_Observatory_Config if process_pnodes: from pnodes_declare_datasets_loop import query_hp_hist, sel_exp_query else: from declare_datasets_loop import query_hp_hist, sel_exp_query config = Config() main_config = Allen_Brain_Observatory_Config() db_config = credentials.postgresql_connection() files = glob( os.path.join( allen_dir, main_config.multi_exps, experiment, '*.npz')) assert len(files), 'Couldn\'t find files.' out_data, xs, ys = [], [], [] perfs, model_types, exps, arg_perf = [], [], [], [] count = 0 for f in files: data = np.load(f) d = { 'x': data['rf_data'].item()['on_center_x'], 'y': data['rf_data'].item()['on_center_y'], # x: files['dataset_method'].item()['x_min'], # y: files['dataset_method'].item()['y_min'], } exp_name = { 'experiment_name': data['dataset_method'].item()[ 'experiment_name']} if query_db: perf = query_hp_hist( exp_name['experiment_name'], db_config=db_config) if perf is None: print 'No fits for: %s' % exp_name['experiment_name'] else: raise NotImplementedError d['perf'] = perf d['max_val'] = np.max(perf) out_data += [d] xs += [np.round(d['x'])] ys += [np.round(d['y'])] perfs += [np.max(d['perf'])] count += 1 else: data_files = glob( os.path.join( main_config.ccbp_exp_evals, exp_name['experiment_name'], '*val_losses.npy')) # Scores has preds, labels has GT for gd in data_files: mt = gd.split( os.path.sep)[-1].split( template_exp + '_')[-1].split('_' + 'val')[0] it_data = np.load(gd).item() sinds = np.asarray(it_data.keys())[np.argsort(it_data.keys())] sit_data = [it_data[idx] for idx in sinds] d['perf'] = sit_data d['max_val'] = np.max(sit_data) d['max_idx'] = np.argmax(sit_data) d['mt'] = mt out_data += [d] xs += [np.round(d['x'])] ys += [np.round(d['y'])] perfs += [np.max(sit_data)] arg_perf += [np.argmax(sit_data)] exps += [gd.split(os.path.sep)[-2]] model_types += [mt] count += 1 # Package as a df xs = np.round(np.asarray(xs)).astype(int) ys = np.round(np.asarray(ys)).astype(int) perfs = np.asarray(perfs) arg_perf = np.asarray(arg_perf) exps = np.asarray(exps) model_types = np.asarray(model_types) # Filter to only keep top-scoring values at each x/y (dirty trick) fxs, fys, fperfs, fmodel_types, fexps, fargs = [], [], [], [], [], [] xys = np.vstack((xs, ys)).transpose() cxy = np.ascontiguousarray( # Unique rows xys).view( np.dtype((np.void, xys.dtype.itemsize * xys.shape[1]))) _, idx = np.unique(cxy, return_index=True) uxys = xys[idx] scores = [] for xy in uxys: sel_idx = (xys == xy).sum(axis=-1) == 2 sperfs = perfs[sel_idx] sexps = exps[sel_idx] sargs = arg_perf[sel_idx] sel_mts = model_types[sel_idx] # Only get top conv/sep spots sperfs = sperfs[sel_mts != 'dog'] sperfs = sperfs[sel_mts != 'DoG'] scores += [sperfs.mean() / sperfs.std()] best_fits = np.argmax(np.asarray(scores)) xs = np.asarray([uxys[best_fits][0]]) ys = np.asarray([uxys[best_fits][1]]) sel_idx = (xys == uxys[best_fits]).sum(axis=-1) == 2 perfs = np.asarray(perfs[sel_idx]) exps = np.asarray(exps[sel_idx]) model_types = np.asarray(model_types[sel_idx]) umt, model_types_inds = np.unique(model_types, return_inverse=True) # Get weights for the top-n fitting models of each type it_perfs = perfs[model_types == target_model] it_exps = exps[model_types == target_model] # it_args = arg_perf[model_types == target_model] sorted_perfs = np.argsort(it_perfs)[::-1][:top_n] for idx in sorted_perfs: perf = sel_exp_query( experiment_name=it_exps[idx], model=target_model, db_config=db_config) # perf_steps = np.argsort([v['training_step'] for v in perf])[::-1] perf_steps = [v['validation_loss'] for v in perf] max_score = np.max(perf_steps) arg_perf_steps = np.argmax(perf_steps) sel_model = perf[arg_perf_steps] # perf_steps[it_args[idx]]] print 'Using %s' % sel_model model_file = sel_model['ckpt_file'].split('.')[0] model_ckpt = '%s.ckpt-%s' % ( model_file, model_file.split(os.path.sep)[-1].split('_')[-1]) model_meta = '%s.meta' % model_ckpt # Pull stimuli stim_dir = os.path.join( main_config.tf_record_output, sel_model['experiment_name']) stim_files = glob(stim_dir + '*') stim_meta_file = [x for x in stim_files if 'meta' in x][0] # stim_val_data = [x for x in stim_files if 'val.tfrecords' in x][0] stim_val_data = [x for x in stim_files if 'train.tfrecords' in x][0] stim_val_mean = [x for x in stim_files if 'train_means' in x][0] assert stim_meta_file is not None assert stim_val_data is not None assert stim_val_mean is not None stim_meta_data = np.load(stim_meta_file).item() rf_stim_meta_data = stim_meta_data['rf_data'] stim_mean_data = np.load( stim_val_mean).items()[0][1].item()['image']['mean'] # Store sparse noise for reference sparse_rf_on = { 'center_x': rf_stim_meta_data.get('on_center_x', None), 'center_y': rf_stim_meta_data.get('on_center_y', None), 'width_x': rf_stim_meta_data.get('on_width_x', None), 'width_y': rf_stim_meta_data.get('on_width_y', None), 'distance': rf_stim_meta_data.get('on_distance', None), 'area': rf_stim_meta_data.get('on_area', None), 'rotation': rf_stim_meta_data.get('on_rotation', None), } sparse_rf_off = { 'center_x': rf_stim_meta_data.get('off_center_x', None), 'center_y': rf_stim_meta_data.get('off_center_y', None), 'width_x': rf_stim_meta_data.get('off_width_x', None), 'width_y': rf_stim_meta_data.get('off_width_y', None), 'distance': rf_stim_meta_data.get('off_distance', None), 'area': rf_stim_meta_data.get('off_area', None), 'rotation': rf_stim_meta_data.get('off_rotation', None), } sparse_rf = {'on': sparse_rf_on, 'off': sparse_rf_off} # Pull responses dataset_module = py_utils.import_module( model_dir=config.dataset_info, dataset=sel_model['experiment_name']) dataset_module = dataset_module.data_processing() with tf.device('/cpu:0'): if stimulus_type == 'sparse_noise': pass elif stimulus_type == 'drifting_grating': pass elif stimulus_type == 'tfrecord': val_images, val_labels = data_loader.inputs( dataset=stim_val_data, batch_size=1, model_input_image_size=dataset_module.model_input_image_size, tf_dict=dataset_module.tf_dict, data_augmentations=[None], # dataset_module.preprocess, num_epochs=1, tf_reader_settings=dataset_module.tf_reader, shuffle=False ) # Mean normalize log = logger.get(os.path.join(output_dir, 'sta_logs', target_model)) data_dir = os.path.join(output_dir, 'data', target_model) py_utils.make_dir(data_dir) sys.path.append(os.path.join('models', 'structs', sel_model['experiment_name'])) model_dict = __import__(target_model) if hasattr(model_dict, 'output_structure'): # Use specified output layer output_structure = model_dict.output_structure else: output_structure = None model = model_utils.model_class( mean=stim_mean_data, training=True, # FIXME output_size=dataset_module.output_size) with tf.device('/gpu:0'): with tf.variable_scope('cnn') as scope: val_scores, model_summary = model.build( data=val_images, layer_structure=model_dict.layer_structure, output_structure=output_structure, log=log, tower_name='cnn') if grad == 'vanilla': grad_image = tf.gradients(model.output, val_images)[0] elif grad == 'lrp': eval_graph = tf.Graph() with eval_graph.as_default(): with eval_graph.gradient_override_map( {'Relu': 'GradLRP'}): grad_image = tf.gradients(model.output, val_images)[0] elif grad == 'cam': eval_graph = tf.Graph() with eval_graph.as_default(): with eval_graph.gradient_override_map( {'Relu': 'GuidedRelu'}): grad_image = tf.gradients(model.output, val_images)[0] else: raise NotImplementedError print(json.dumps(model_summary, indent=4)) # Set up summaries and saver saver = tf.train.Saver(tf.global_variables()) summary_op = tf.summary.merge_all() # Initialize the graph sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) # Need to initialize both of these if supplying num_epochs to inputs sess.run( tf.group( tf.global_variables_initializer(), tf.local_variables_initializer()) ) saver.restore(sess, model_ckpt) # Set up exemplar threading coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) if target_model == 'conv2d': fname = [ x for x in tf.global_variables() if 'conv1_1_filters:0' in x.name] elif target_model == 'sep_conv2d': fname = [ x for x in tf.global_variables() if 'sep_conv1_1_filters:0' in x.name] elif target_model == 'dog' or target_model == 'DoG': fname = [ x for x in tf.global_variables() if 'dog1_1_filters:0' in x.name] else: raise NotImplementedError val_tensors = { 'images': val_images, 'labels': val_labels, 'filts': fname, 'responses': model.output, # model[target_layer], 'grads': grad_image } all_images, all_preds, all_grads, all_responses = [], [], [], [] step = 0 try: while not coord.should_stop(): val_vals = sess.run(val_tensors.values()) val_dict = {k: v for k, v in zip(val_tensors.keys(), val_vals)} all_images += [val_dict['images']] all_responses += [val_dict['responses']] all_preds += [val_dict['labels'].squeeze()] all_grads += [val_dict['grads'].squeeze()] print 'Finished step %s' % step step += 1 except: print 'Finished tfrecords' finally: coord.request_stop() coord.join(threads) sess.close() # Process and save data # if target_model != 'dog': # filters = val_dict['filts'][0].squeeze().transpose(2, 0, 1) all_images = np.concatenate(all_images).squeeze() all_grads = np.asarray(all_grads) all_preds = np.asarray(all_preds).reshape(-1, 1) all_responses = np.asarray(all_responses).squeeze() np.savez( os.path.join(data_dir, 'data'), images=all_images, pred=all_preds, # filters=filters, grads=all_grads) # if target_model != 'dog': # save_mosaic( # maps=filters, # [0].squeeze().transpose(2, 0, 1), # output=os.path.join(data_dir, '%s_filters' % target_layer), # rc=8, # cc=4, # title='%s filters' % ( # target_layer)) print 'Complete.'
def test_classifier(validation_pointer, model_ckpt, model_dir, model_type, model_weights, selected_layer, config, simulate_subjects=120): # Make output directories if they do not exist config.checkpoint_directory = model_dir print '-' * 60 print 'Testing the model over a %s. Saving to %s' % (model_type, model_dir) print '-' * 60 dcn_flavor = import_cnn(model_type) # Prepare data on CPU with tf.device('/cpu:0'): val_images, val_labels, val_files = inputs( tfrecord_file=validation_pointer, batch_size=config.validation_batch, im_size=config.validation_image_size, model_input_shape=config.model_image_size[:2], num_epochs=1, data_augmentations=config.validation_augmentations, shuffle_batch=True) # Prepare pretrained model on GPU with tf.device('/gpu:0'): with tf.variable_scope('cnn'): if 'ckpt' in model_weights: cnn = dcn_flavor.model() else: cnn = dcn_flavor.model(weight_path=model_weights) cnn.build(val_images) sample_layer = cnn[selected_layer] saver = tf.train.Saver(tf.all_variables(), max_to_keep=10) # Initialize the graph sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) # Need to initialize both of these if supplying num_epochs to inputs sess.run( tf.group(tf.initialize_all_variables(), tf.initialize_local_variables())) # Set up exemplar threading coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) # Start training loop results = {'accs': [], 'preds': [], 'labs': [], 'files': []} with open(model_ckpt, 'rb') as fid: svc = cPickle.load(fid) zscorer = np.load(model_ckpt.split('.')[0] + '_normalization.npz') np_path = os.path.join(config.checkpoint_directory, 'validation_results') step = 0 scores, labels = [], [] if '.ckpt' in model_weights: saver.restore(sess, model_weights) print 'Restored model from %s' % model_weights try: print 'Testing model' while not coord.should_stop(): start_time = time.time() score, lab, f = sess.run([sample_layer, val_labels, val_files]) norm_score = (score - zscorer['mu']) / zscorer['sd'] scores += [norm_score] pred = svc.predict(norm_score) acc = np.mean(pred == lab) results['accs'] += [acc] results['preds'] += [pred] results['labs'] += [lab] results['files'] += [f] duration = time.time() - start_time print_status(step, 0, config, duration, acc, np_path) step += 1 except tf.errors.OutOfRangeError: print 'Done testing.' finally: np.savez(np_path, **results) print 'Saved to: %s' % np_path coord.request_stop() coord.join(threads) sess.close() print '%.4f%% correct' % np.mean(results['accs']) if simulate_subjects: sim_subs = [] print 'Simulating subjects' scores = np.concatenate(scores) labels = np.concatenate(results['labs']) for sub in tqdm(range(simulate_subjects)): it_results = {'accs': [], 'preds': [], 'labs': [], 'files': []} neuron_drop = np.random.rand(scores.shape[1]) > .95 it_scores = np.copy(scores) it_scores[:, neuron_drop] = 0 pred = svc.predict(it_scores) acc = np.mean(pred == labels) it_results['accs'] += [acc] it_results['preds'] += [pred] it_results['labs'] += [labels] it_results['files'] += [np.concatenate(results['files'])] sim_subs += [it_results] np.save(np_path + '_sim_subs', sim_subs)
def train_and_eval(config): """Train and evaluate the model.""" # Prepare model training dt_stamp = re.split( '\.', str(datetime.now()))[0].\ replace(' ', '_').replace(':', '_').replace('-', '_') dt_dataset = config.model_type + '_' + dt_stamp + '/' config.train_checkpoint = os.path.join(config.model_output, dt_dataset) # timestamp this run config.summary_dir = os.path.join(config.train_summaries, config.model_output, dt_dataset) dir_list = [config.train_checkpoint, config.summary_dir] [make_dir(d) for d in dir_list] # Prepare model inputs train_data = config.train_data validation_data = config.val_data # Prepare data on CPU with tf.device('/cpu:0'): train_images, train_labels = inputs( tfrecord_file=train_data, batch_size=config.train_batch, im_size=config.resize, model_input_shape=config.resize, train=None, img_mean_value='train_mean_big.npz', num_epochs=config.num_epochs) val_images, val_labels = inputs(tfrecord_file=validation_data, batch_size=config.train_batch, im_size=config.resize, model_input_shape=config.resize, train=None, img_mean_value='train_mean_big.npz', num_epochs=config.num_epochs) tf.summary.image('train images', tf.cast(train_images, tf.uint8)) tf.summary.image('validation images', tf.cast(val_images, tf.uint8)) tf.summary.image( 'train labels', tf.cast( tf.reshape(train_labels, [config.train_batch, 112, 112, 1]), tf.float32)) tf.summary.image( 'validation labels', tf.cast(tf.reshape(val_labels, [config.train_batch, 112, 112, 1]), tf.float32)) num_train_imgs = 0 for record in tf.python_io.tf_record_iterator(train_data): num_train_imgs += 1 num_val_imgs = 0 for record in tf.python_io.tf_record_iterator(validation_data): num_val_imgs += 1 print 'Number of training images', num_train_imgs print 'Number of validation images', num_val_imgs # Prepare model on GPU with tf.device('/gpu:0'): with tf.variable_scope('cnn') as scope: model = model_struct(config.vgg16_npy_path) train_mode = tf.get_variable(name='training', initializer=True) model.build(train_images, train_mode=train_mode, batchnorm=config.batch_norm, trainable_layers=config.trainable_layers, config=config, shuffled=False) # Prepare the cost function cost, train_error_matrix = euclidean_loss(model.prediction, train_labels, config.train_batch) tf.summary.scalar("train cost", cost) train_op = tf.train.AdamOptimizer(config.lr).minimize(cost) tf.summary.image("prediction", model.prediction) # Setup validation op if validation_data is not False: scope.reuse_variables() # Validation graph is the same as training except no batchnorm val_model = model_struct( vgg16_npy_path= '/media/data_cifs/ajones/deepgaze/salicon_prep_g11/vgg19.npy' ) val_model.build(val_images, train_mode=train_mode, batchnorm=config.batch_norm, trainable_layers=config.trainable_layers, config=config, shuffled=True) # Calculate validation accuracy val_cost, val_error_matrix = euclidean_loss( val_model.prediction, val_labels, config.train_batch) tf.summary.scalar("validation cost", val_cost) tf.summary.image("val prediction", val_model.prediction) # Set up summaries and saver saver = tf.train.Saver(tf.global_variables(), max_to_keep=5) summary_op = tf.summary.merge_all() # Initialize the graph sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) # Need to initialize both of these if supplying num_epochs to inputs sess.run([ tf.group(tf.global_variables_initializer()), tf.local_variables_initializer() ]) sess.run([tf.local_variables_initializer()]) summary_writer = tf.summary.FileWriter(config.summary_dir, sess.graph) # Set up exemplar threading coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) # Start training loop np.save(config.train_checkpoint, config) step, epoch_no, val_max, losses = 0, 0, 0, [] epoch_loss_values = [] best_val_loss = float('inf') try: while not coord.should_stop(): start_time = time.time() halt = False if step % 20 == 0: if config.show_output: _, loss_value, val_loss, summary_str, imgs, labs, predictions, cb_logits, logits, te_mat, preds_presoft = sess.run( [ train_op, cost, val_cost, summary_op, tf.reshape(train_images[0:3], [3, 224, 224, 3]), tf.reshape(train_labels[0:3], [3, 112, 112]), tf.reshape(model.prediction[0:3], [3, 112, 112]), tf.reshape(model.center_bias_logits[0:3], [3, 112, 112]), tf.reshape(model.logits[0:3], [3, 112, 112]), tf.reshape(train_error_matrix[0:3], [3, 112, 112]), tf.reshape(model.prediction_presoft[0:3], [3, 112, 112]) ]) # plt.imshow(np.mean(sess.run(model.feature_encoder)[0], 2)) # plt.show() # import ipdb; ipdb.set_trace() else: _, loss_value, val_loss, summary_str, imgs, labs, predictions, logits = sess.run( [ train_op, cost, val_cost, summary_op, tf.reshape(train_images[0:3], [3, 224, 224, 3]), tf.reshape(train_labels[0:3], [3, 112, 112]), tf.reshape(model.prediction[0:3], [3, 112, 112]), tf.reshape(model.logits[0:3], [3, 112, 112]) ]) if step % 60 == 0: np.save('running_imgs', imgs) np.save('running_labs', labs) np.save('running_preds', predictions) np.save('running_logits', logits) summary_writer.add_summary(summary_str, step) duration = time.time() - start_time # Training status format_str = ( '%s: step %d, loss = %.2f, val loss = %.2f (%.1f examples/sec; ' '%.3f sec/batch) | logdir = %s\n') print(format_str % (datetime.now(), step, loss_value, val_loss, config.train_batch / duration, float(duration), config.summary_dir)) if val_loss < best_val_loss: saver.save(sess, os.path.join(config.train_checkpoint, 'model_' + str(step) + '.ckpt'), global_step=step) if config.show_output: # and step > 300 and step % 20 == 0: num_columns = 6 num_imgs_plot = 3 for i in range(num_imgs_plot): plt.subplot(num_imgs_plot, num_columns, i * num_columns + 1) im = plt.imshow(imgs[i]) plt.colorbar(im) plt.subplot(num_imgs_plot, num_columns, i * num_columns + 2) lab = plt.imshow(labs[i]) plt.colorbar(lab) plt.subplot(num_imgs_plot, num_columns, i * num_columns + 3) logits_out = plt.imshow(logits[i]) plt.colorbar(logits_out) plt.subplot(num_imgs_plot, num_columns, i * num_columns + 4) cbl = plt.imshow(cb_logits[i]) plt.colorbar(cbl) plt.subplot(num_imgs_plot, num_columns, i * num_columns + 5) preds = plt.imshow(preds_presoft[i]) plt.colorbar(preds) plt.subplot(num_imgs_plot, num_columns, i * num_columns + 6) preds = plt.imshow(predictions[i]) plt.colorbar(preds) plt.show() else: _, loss_value = sess.run([train_op, cost]) # assert not np.isnan(loss_value), 'Model diverged with loss = NaN' if np.isnan(loss_value): print 'yikes. nan loss -- check your cost function.' import pdb pdb.set_trace() # End iteration step += 1 except tf.errors.OutOfRangeError: print('Done training for %d epochs, %d steps.' % (epoch_no, step)) finally: coord.request_stop() np.save(os.path.join(config.tfrecord_dir, 'training_loss'), losses) coord.join(threads) sess.close()
def test_classifier(validation_pointer, model_ckpt, model_dir, model_type, model_weights, selected_layer, config, simulate_subjects=121): # Make output directories if they do not exist config.checkpoint_directory = model_dir print '-' * 60 print 'Testing the model over a %s. Saving to %s' % (model_type, model_dir) print '-' * 60 dcn_flavor = import_cnn(model_type) # Prepare data on CPU with tf.device('/cpu:0'): val_images, val_labels, val_files = inputs( validation_pointer, config.validation_batch, config.validation_image_size, config.model_image_size[:2], 1, shuffle_batch=False) # Prepare pretrained model on GPU with tf.device('/gpu:0'): with tf.variable_scope('cnn'): if 'ckpt' in model_weights: cnn = dcn_flavor.model() else: cnn = dcn_flavor.model(weight_path=model_weights) cnn.build(val_images) sample_layer = cnn[selected_layer] weights, yhat, classifier, class_loss = tf_loss.choose_classifier( sample_layer=sample_layer, y=val_labels, config=config) saver = tf.train.Saver(tf.all_variables(), max_to_keep=10) # Initialize the graph sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) # Need to initialize both of these if supplying num_epochs to inputs sess.run( tf.group(tf.initialize_all_variables(), tf.initialize_local_variables())) # Set up exemplar threading coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) # Start training loop results = {'accs': [], 'preds': [], 'labs': [], 'files': []} # if int(tf.__version__.split('.')[1]) > 10: model_ckpt += '-%s' % re.search('\d+.ckpt', model_ckpt).group().split('.ckpt')[0] saver.restore(sess, model_ckpt) np_path = os.path.join(config.checkpoint_directory, 'validation_results') step = 0 scores, labels = [], [] try: print 'Testing model' while not coord.should_stop(): start_time = time.time() pred, lab, f = sess.run([yhat, val_labels, val_files]) pred = (pred > 0).astype(int).reshape(1, -1) acc = np.mean(pred == lab) scores += [pred] results['accs'] += [acc] results['preds'] += [pred] results['labs'] += [lab] results['files'] += [f] duration = time.time() - start_time print_status(step, 0, config, duration, acc, np_path) step += 1 except tf.errors.OutOfRangeError: print 'Done testing.' finally: np.savez(np_path, **results) print 'Saved to: %s' % np_path coord.request_stop() coord.join(threads) sess.close() print '%.4f%% correct' % np.mean(results['accs']) if simulate_subjects: sim_subs = [] print 'Simulating subjects' scores = np.concatenate(scores) labels = np.concatenate(results['labs']) for sub in tqdm(range(simulate_subjects)): it_results = {'accs': [], 'preds': [], 'labs': [], 'files': []} neuron_drop = np.random.rand(scores.shape[1]) > .95 it_scores = np.copy(scores) it_scores[:, neuron_drop] = 0 acc = np.mean(pred == labels) it_results['accs'] += [acc] it_results['preds'] += [pred] it_results['labs'] += [labels] it_results['files'] += [np.concatenate(results['files'])] sim_subs += [it_results] np.save(np_path + '_sim_subs', sim_subs)