def __init__(self, output_dir=None, **config): standard_config = { 'learning_rate': 0.0, } standard_config.update(config) self.modalities = config['modalities'] # If specified, load mesaurements of the experts. if 'measurement_exp' in config or 'dirichlet_params' in config: if 'measurement_exp' in config: measurements = np.load( ExperimentData( config["measurement_exp"]).get_artifact("counts.npz")) else: measurements = config['dirichlet_params'] self.dirichlet_params = { modality: measurements[modality].astype('float32') for modality in self.modalities } self.class_counts = measurements['class_counts'].astype('float32') else: print( 'WARNING: Could not yet import measurements, you need to fit this ' 'model first.') BaseModel.__init__(self, name='DirichletFusion', output_dir=output_dir, custom_training=True, **config)
def time_dirichlet_fcn(net_config, fusion_experiment, repetitions): # cityscapes size rgb = tf.ones([1, 768, 384, 3]) depth = tf.ones([1, 768, 384, 1]) rgb_score = fcn(rgb, 'rgb', net_config['num_units'], net_config['num_classes'], trainable=False, batchnorm=False)['score'] depth_score = fcn(depth, 'depth', net_config['num_units'], net_config['num_classes'], trainable=False, batchnorm=False)['score'] rgb_prob = tf.nn.softmax(rgb_score, 3) depth_prob = tf.nn.softmax(depth_score, 3) # load dirichlet parameter record = ExperimentData(fusion_experiment).get_record() dirichlet_params = record['info']['dirichlet_params'] dirichlet_config = record['config']['net_config'] # Create all the Dirichlet distributions conditional on ground-truth class dirichlets = {modality: {} for modality in ['rgb', 'depth']} sigma = dirichlet_config['sigma'] for c in range(net_config['num_classes']): for m in ('rgb', 'depth'): dirichlets[m][c] = tf.contrib.distributions.Dirichlet( sigma * dirichlet_params[m][:, c].astype('float32'), validate_args=False, allow_nan_stats=False) # Set the Prior of the classes data_prior = ( dirichlet_params['class_counts'] / (1e-20 + dirichlet_params['class_counts'].sum())).astype('float32') fused_score = dirichlet_fusion([rgb_prob, depth_prob], list(dirichlets.values()), data_prior) fused_class = tf.argmax(fused_score, 3) sess = tf.Session() sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) times = [] for _ in range(repetitions): start = time.time() result = sess.run(fused_class) end = time.time() times.append(end - start) print('Mean Time {:.5f}s, Std {:.5f}s'.format(np.mean(times), np.std(times))) stdout.flush()
def rgb_to_depth(net_config, data_config, starting_weights, num_iterations, _run): """Training for progressive FCN with transfer from existing RGB column to Depth.""" # Set up the directories for diagnostics output_dir = create_directories(_run._id, ex) # Get the existing RGB weights training_experiment = ExperimentData(starting_weights['experiment_id']) # Take the first weights file there is in this experiment filename = (artifact['name'] for artifact in training_experiment.get_record()['artifacts'] if 'weights' in artifact['name']).next() rgb_weights = np.load(training_experiment.get_artifact(filename)) # For the first layer, take the mean over all 3 channels # Therefore, we have to define a new weights dict. new_weights = {key: rgb_weights[key] for key in rgb_weights} new_weights['rgb_conv1_1/kernel'] = rgb_weights['rgb_conv1_1/kernel'].mean( 2, keepdims=True) # We need a file handler for this new weights dict, therefore we save the weights # into a temporary file. np.savez('/tmp/translated_rgb_weights.npz', **new_weights) # create the network with ProgressiveFCN(output_dir=output_dir, **net_config) as net: # import the above created weights net.import_weights('/tmp/translated_rgb_weights.npz') train_network( net, output_dir, data_config, num_iterations, starting_weights=False, experiment=ex, additional_eval_data=get_all_sequence_validation_sets(data_config)) print('INFO Evaluate the network against the training sequences') evaluate(net, data_config) print('INFO: Evaluating against all sequences') measurements = evaluate_on_all_synthia_seqs(net, data_config) _run.info['measurements'] = measurements
def time_bayes_lookup_fcn(net_config, fusion_experiment, repetitions): # cityscapes size rgb = tf.ones([1, 768, 384, 3]) depth = tf.ones([1, 768, 384, 1]) rgb_score = fcn(rgb, 'rgb', net_config['num_units'], net_config['num_classes'], trainable=False, batchnorm=False)['score'] depth_score = fcn(depth, 'depth', net_config['num_units'], net_config['num_classes'], trainable=False, batchnorm=False)['score'] # load confusion matrices record = ExperimentData(fusion_experiment).get_record() confusion_matrices = record['info']['confusion_matrices'] # transform into list confusion_matrices = [ confusion_matrices['rgb'], confusion_matrices['depth'] ] decision_matrix = tf.constant(bayes_decision_matrix(confusion_matrices)) rgb_class = tf.argmax(tf.nn.softmax(rgb_score), 3) depth_class = tf.argmax(tf.nn.softmax(depth_score), 3) # fused_class = tf.gather_nd(decision_matrix, # tf.stack([rgb_class, depth_class], axis=-1)) # gather_nd is too slow as it does not run on GPU, try this instead: rgb_class = tf.to_int64(tf.one_hot(rgb_class, net_config['num_classes'])) depth_class = tf.to_int64( tf.one_hot(depth_class, net_config['num_classes'])) fused_class = tf.reduce_sum( tf.multiply( decision_matrix, tf.multiply(tf.expand_dims(rgb_class, -1), tf.expand_dims(depth_class, -2))), [-2, -1]) sess = tf.Session() sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) times = [] for _ in range(repetitions): start = time.time() result = sess.run(fused_class) end = time.time() times.append(end - start) print('Mean Time {:.5f}s, Std {:.5f}s'.format(np.mean(times), np.std(times))) stdout.flush()
def time_bayes_fcn(net_config, fusion_experiment, repetitions): # cityscapes size rgb = tf.ones([1, 768, 384, 3]) depth = tf.ones([1, 768, 384, 1]) rgb_score = fcn(rgb, 'rgb', net_config['num_units'], net_config['num_classes'], trainable=False, batchnorm=False)['score'] depth_score = fcn(depth, 'depth', net_config['num_units'], net_config['num_classes'], trainable=False, batchnorm=False)['score'] # load confusion matrices record = ExperimentData(fusion_experiment).get_record() confusion_matrices = record['info']['confusion_matrices'] # transform into list confusion_matrices = [ confusion_matrices['rgb'], confusion_matrices['depth'] ] rgb_class = tf.argmax(tf.nn.softmax(rgb_score), 3) depth_class = tf.argmax(tf.nn.softmax(depth_score), 3) fused_class = tf.argmax( bayes_fusion([rgb_class, depth_class], confusion_matrices)[0], 3) sess = tf.Session() sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) times = [] for _ in range(repetitions): start = time.time() result = sess.run(fused_class) end = time.time() times.append(end - start) print('Mean Time {:.5f}s, Std {:.5f}s'.format(np.mean(times), np.std(times))) stdout.flush()
def rerun(experiment_id, _run): # load the old experiment old_run = ExperimentData(experiment_id).get_record() print('Re-Run of experiment "%s"' % old_run['experiment']['name']) # load the experiment function module = module_mapper[old_run['experiment']['mainfile']] command = getattr(module, old_run['command']) config = old_run['config'] # add run to arguments if '_run' in command.__code__.co_varnames: config['_run'] = _run if 'seed' not in command.__code__.co_varnames: config.pop('seed', None) # now execute the old command command(**config) sys.stdout.flush()
def __init__(self, output_dir=None, confusion_matrices=False, **config): standard_config = { 'learning_rate': 0.0, 'class_prior': 'data' } standard_config.update(config) # load confusion matrices self.modalities = [] self.confusion_matrices = {} if confusion_matrices: for key, matrix in confusion_matrices.items(): self.modalities.append(key) self.confusion_matrices[key] = matrix.astype('float32').T else: for key, exp_id in config['eval_experiments'].items(): self.confusion_matrices[key] = np.array( ExperimentData(exp_id).get_record()['info']['confusion_matrix'] ['values']).astype('float32').T FusionModel.__init__(self, 'BayesFusion', output_dir=output_dir, **standard_config)
def collect_data(fitting_experiment): exp = ExperimentData(fitting_experiment) evaluation_data = exp.get_record()['config']['evaluation_data'] net_config = exp.get_record()['config']['net_config'] starting_weights = exp.get_record()['config']['starting_weights'] confusion_matrices = exp.get_record()['info']['confusion_matrices'] # load numpy confusion matrices confusion_matrices = { key: np.array(val['values']) for key, val in confusion_matrices.items() } data, _, _ = split_test_data(evaluation_data) # now collect insight on bayes mix predictions = [] likelihoods = [] conditionals = [] probs = [] with BayesFusion(confusion_matrices=confusion_matrices, **net_config) as net: import_weights_into_network(net, starting_weights) for batch in data.get_test_data(): insight = net.get_insight(batch) probs.append(insight[0]) likelihoods.append(insight[1]) conditionals.append(insight[2]) predictions.append(insight[3]) outpath = '/cluster/work/riner/users/blumh/measurements/{}'.format( fitting_experiment) if not path.exists(outpath): mkdir(outpath) np.savez_compressed(path.join(outpath, 'predictions.npz'), *predictions) np.savez_compressed(path.join(outpath, 'likelihoods.npz'), *likelihoods) np.savez_compressed(path.join(outpath, 'conditionals.npz'), *conditionals) np.savez_compressed(path.join(outpath, 'probs.npz'), *probs)
def time_variance_fcn(net_config, fusion_experiment, repetitions): # cityscapes size rgb = tf.ones([1, 768, 384, 3]) depth = tf.ones([1, 768, 384, 1]) # load method parameter record = ExperimentData(fusion_experiment).get_record() variance_config = record['config']['net_config'] def test_pipeline(inputs, modality): def sample_pipeline(inputs, modality, reuse=False): """One dropout sample.""" layers = fcn(inputs, modality, net_config['num_units'], net_config['num_classes'], trainable=False, is_training=False, dropout_rate=variance_config['dropout_rate'], dropout_layers=['pool3'], batchnorm=False) prob = tf.nn.softmax(layers['score']) return prob # For classification, we sample distributions with Dropout-Monte-Carlo and # fuse output according to variance samples = tf.stack([ sample_pipeline(inputs, modality, reuse=(i != 0)) for i in range(variance_config['num_samples']) ], axis=4) variance = tf.reduce_mean(tf.nn.moments(samples, [4])[1], axis=3, keep_dims=True) prob = tf.nn.softmax( fcn(inputs, modality, net_config['num_units'], net_config['num_classes'], trainable=False, is_training=False, batchnorm=False)['score']) # We get the label by passing the input without dropout return prob, variance rgb_prob, rgb_var = test_pipeline(rgb, 'rgb') depth_prob, depth_var = test_pipeline(depth, 'depth') fused_score = variance_fusion([rgb_prob, depth_prob], [rgb_var, depth_var]) label = tf.argmax(fused_score, 3) sess = tf.Session() sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) times = [] for _ in range(repetitions): start = time.time() result = sess.run(label) end = time.time() times.append(end - start) print('Mean Time {:.5f}s, Std {:.5f}s'.format(np.mean(times), np.std(times))) stdout.flush()