def time_dirichlet_fcn(net_config, fusion_experiment, repetitions): # cityscapes size rgb = tf.ones([1, 768, 384, 3]) depth = tf.ones([1, 768, 384, 1]) rgb_score = fcn(rgb, 'rgb', net_config['num_units'], net_config['num_classes'], trainable=False, batchnorm=False)['score'] depth_score = fcn(depth, 'depth', net_config['num_units'], net_config['num_classes'], trainable=False, batchnorm=False)['score'] rgb_prob = tf.nn.softmax(rgb_score, 3) depth_prob = tf.nn.softmax(depth_score, 3) # load dirichlet parameter record = ExperimentData(fusion_experiment).get_record() dirichlet_params = record['info']['dirichlet_params'] dirichlet_config = record['config']['net_config'] # Create all the Dirichlet distributions conditional on ground-truth class dirichlets = {modality: {} for modality in ['rgb', 'depth']} sigma = dirichlet_config['sigma'] for c in range(net_config['num_classes']): for m in ('rgb', 'depth'): dirichlets[m][c] = tf.contrib.distributions.Dirichlet( sigma * dirichlet_params[m][:, c].astype('float32'), validate_args=False, allow_nan_stats=False) # Set the Prior of the classes data_prior = ( dirichlet_params['class_counts'] / (1e-20 + dirichlet_params['class_counts'].sum())).astype('float32') fused_score = dirichlet_fusion([rgb_prob, depth_prob], list(dirichlets.values()), data_prior) fused_class = tf.argmax(fused_score, 3) sess = tf.Session() sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) times = [] for _ in range(repetitions): start = time.time() result = sess.run(fused_class) end = time.time() times.append(end - start) print('Mean Time {:.5f}s, Std {:.5f}s'.format(np.mean(times), np.std(times))) stdout.flush()
def time_bayes_lookup_fcn(net_config, fusion_experiment, repetitions): # cityscapes size rgb = tf.ones([1, 768, 384, 3]) depth = tf.ones([1, 768, 384, 1]) rgb_score = fcn(rgb, 'rgb', net_config['num_units'], net_config['num_classes'], trainable=False, batchnorm=False)['score'] depth_score = fcn(depth, 'depth', net_config['num_units'], net_config['num_classes'], trainable=False, batchnorm=False)['score'] # load confusion matrices record = ExperimentData(fusion_experiment).get_record() confusion_matrices = record['info']['confusion_matrices'] # transform into list confusion_matrices = [ confusion_matrices['rgb'], confusion_matrices['depth'] ] decision_matrix = tf.constant(bayes_decision_matrix(confusion_matrices)) rgb_class = tf.argmax(tf.nn.softmax(rgb_score), 3) depth_class = tf.argmax(tf.nn.softmax(depth_score), 3) # fused_class = tf.gather_nd(decision_matrix, # tf.stack([rgb_class, depth_class], axis=-1)) # gather_nd is too slow as it does not run on GPU, try this instead: rgb_class = tf.to_int64(tf.one_hot(rgb_class, net_config['num_classes'])) depth_class = tf.to_int64( tf.one_hot(depth_class, net_config['num_classes'])) fused_class = tf.reduce_sum( tf.multiply( decision_matrix, tf.multiply(tf.expand_dims(rgb_class, -1), tf.expand_dims(depth_class, -2))), [-2, -1]) sess = tf.Session() sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) times = [] for _ in range(repetitions): start = time.time() result = sess.run(fused_class) end = time.time() times.append(end - start) print('Mean Time {:.5f}s, Std {:.5f}s'.format(np.mean(times), np.std(times))) stdout.flush()
def time_depth_fcn(net_config, repetitions): # cityscapes size depth = tf.ones([1, 768, 384, 1]) depth_score = fcn(depth, 'depth', net_config['num_units'], net_config['num_classes'], trainable=False, batchnorm=False)['score'] output_class = tf.argmax(tf.nn.softmax(depth_score, 3), 3) sess = tf.Session() sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) times = [] for _ in range(repetitions): start = time.time() result = sess.run(output_class) end = time.time() times.append(end - start) print('Mean Time {:.5f}s, Std {:.5f}s'.format(np.mean(times), np.std(times))) stdout.flush()
def time_bayes_fcn(net_config, fusion_experiment, repetitions): # cityscapes size rgb = tf.ones([1, 768, 384, 3]) depth = tf.ones([1, 768, 384, 1]) rgb_score = fcn(rgb, 'rgb', net_config['num_units'], net_config['num_classes'], trainable=False, batchnorm=False)['score'] depth_score = fcn(depth, 'depth', net_config['num_units'], net_config['num_classes'], trainable=False, batchnorm=False)['score'] # load confusion matrices record = ExperimentData(fusion_experiment).get_record() confusion_matrices = record['info']['confusion_matrices'] # transform into list confusion_matrices = [ confusion_matrices['rgb'], confusion_matrices['depth'] ] rgb_class = tf.argmax(tf.nn.softmax(rgb_score), 3) depth_class = tf.argmax(tf.nn.softmax(depth_score), 3) fused_class = tf.argmax( bayes_fusion([rgb_class, depth_class], confusion_matrices)[0], 3) sess = tf.Session() sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) times = [] for _ in range(repetitions): start = time.time() result = sess.run(fused_class) end = time.time() times.append(end - start) print('Mean Time {:.5f}s, Std {:.5f}s'.format(np.mean(times), np.std(times))) stdout.flush()
def sample_pipeline(inputs, modality, reuse=False): """One dropout sample.""" layers = fcn(inputs, modality, net_config['num_units'], net_config['num_classes'], trainable=False, is_training=False, dropout_rate=variance_config['dropout_rate'], dropout_layers=['pool3'], batchnorm=False) prob = tf.nn.softmax(layers['score']) return prob
def get_prob(inputs, modality): prefix = self.config['prefixes'][modality] layers = fcn(inputs, prefix, self.config['num_units'], self.config['num_classes'], trainable=False, is_training=False, dropout_rate=0, dropout_layers=[], batchnorm=False) prob = tf.nn.softmax(layers['score']) return prob
def test_pipeline(inputs, prefix, **config): """Unified pipeline to produce semantic segmentation from the input with different network models. Currently FCN or Adapnet. """ if config['expert_model'] == 'adapnet': # Now we get the network output of the Adapnet expert. outputs = adapnet(inputs, prefix, config['num_units'], config['num_classes']) elif config['expert_model'] == 'fcn': outputs = fcn(inputs, prefix, config['num_units'], config['num_classes'], trainable=False, batchnorm=False) else: raise UserWarning('ERROR: Expert Model %s not found' % config['expert_model']) outputs['prob'] = tf.nn.softmax(outputs['score']) outputs['classification'] = tf.argmax(outputs['prob'], 3) return outputs
def time_average_fcn(net_config, repetitions): # cityscapes size rgb = tf.ones([1, 768, 384, 3]) depth = tf.ones([1, 768, 384, 1]) rgb_score = fcn(rgb, 'rgb', net_config['num_units'], net_config['num_classes'], trainable=False, batchnorm=False)['score'] depth_score = fcn(depth, 'depth', net_config['num_units'], net_config['num_classes'], trainable=False, batchnorm=False)['score'] rgb_prob = tf.nn.softmax(rgb_score, 3) depth_prob = tf.nn.softmax(depth_score, 3) fused_class = tf.argmax( tf.reduce_mean(tf.stack([rgb_prob, depth_prob], axis=0), axis=0), 3) sess = tf.Session() sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) times = [] for _ in range(repetitions): start = time.time() result = sess.run(fused_class) end = time.time() times.append(end - start) print('Mean Time {:.5f}s, Std {:.5f}s'.format(np.mean(times), np.std(times))) stdout.flush()
def sample_pipeline(inputs, modality, reuse=False): prefix = self.config['prefixes'][modality] assert self.config['expert_model'] == 'fcn' layers = fcn(inputs, prefix, self.config['num_units'], self.config['num_classes'], trainable=False, is_training=False, dropout_rate=self.config['dropout_rate'], dropout_layers=['pool3'], batchnorm=False) prob = tf.nn.softmax(layers['score']) return prob
def test_pipeline(inputs, modality): def sample_pipeline(inputs, modality, reuse=False): """One dropout sample.""" layers = fcn(inputs, modality, net_config['num_units'], net_config['num_classes'], trainable=False, is_training=False, dropout_rate=variance_config['dropout_rate'], dropout_layers=['pool3'], batchnorm=False) prob = tf.nn.softmax(layers['score']) return prob # For classification, we sample distributions with Dropout-Monte-Carlo and # fuse output according to variance samples = tf.stack([ sample_pipeline(inputs, modality, reuse=(i != 0)) for i in range(variance_config['num_samples']) ], axis=4) variance = tf.reduce_mean(tf.nn.moments(samples, [4])[1], axis=3, keep_dims=True) prob = tf.nn.softmax( fcn(inputs, modality, net_config['num_units'], net_config['num_classes'], trainable=False, is_training=False, batchnorm=False)['score']) # We get the label by passing the input without dropout return prob, variance