def resnet_model(images, is_training, num_classes, resnet_size=50, return_intermediate_values=False, film_generator_fn=None, film_generator_input=None): """Returns resnet model, optionally returning intermediate endpoint tensors. Args: images: A Tensor representing a batch [N,H,W,C] of input images. is_training: A boolean. Set to True to add operations required only when training the classifier. num_classes: Dimensionality of output logits emitted by final dense layer. resnet_size: Size of resnet. One of [18, 34, 50, 101, 152, 200]. return_intermediate_values: If True, returns a dictionary of output and intermediate activation values. film_generator_fn: Callable that returns a List (for each block layer) of Lists (per ResNet block) of FiLM conditioning vectors. film_generator_input: Embedding tensor to be passed to film_generator_fn. """ # For bigger models, we want to use "bottleneck" layers if resnet_size < 50: bottleneck = False else: bottleneck = True model = resnet_lib.Model(resnet_size=resnet_size, bottleneck=bottleneck, num_classes=num_classes, num_filters=64, kernel_size=7, conv_stride=2, first_pool_size=3, first_pool_stride=2, block_sizes=_get_block_sizes(resnet_size), block_strides=[1, 2, 2, 2], resnet_version=resnet_lib.DEFAULT_VERSION, data_format='channels_last', dtype=resnet_lib.DEFAULT_DTYPE) final_dense = model(images, is_training, film_generator_fn, film_generator_input) if return_intermediate_values: return resnet_endpoints(model) else: return final_dense
def get_resnet50_spatial(images, is_training): """ResNet50, but cut off last block and return before global pooling.""" num_classes = 1001 # Dummy value, unused. model = resnet_lib.Model(resnet_size=50, bottleneck=True, num_classes=num_classes, num_filters=64, kernel_size=7, conv_stride=2, first_pool_size=3, first_pool_stride=2, block_sizes=[3, 4, 6], block_strides=[1, 2, 2], resnet_version=resnet_lib.DEFAULT_VERSION, data_format='channels_last', dtype=resnet_lib.DEFAULT_DTYPE) # Build the graph. _ = model(images, is_training) # Return pre-pooled dense spatial features. return resnet.resnet_endpoints(model)['block_layer3']
def resnet_model(images, is_training, num_classes, resnet_size=50, return_intermediate_values=False, film_generator_fn=None, film_generator_input=None, pretrain_checkpoint=None): """Returns resnet model, optionally returning intermediate endpoint tensors. Args: images: A Tensor representing a batch [N,H,W,C] of input images. is_training: A boolean. Set to True to add operations required only when training the classifier. num_classes: Dimensionality of output logits emitted by final dense layer. resnet_size: Size of resnet. One of [18, 34, 50, 101, 152, 200]. return_intermediate_values: If True, returns a dictionary of output and intermediate activation values. film_generator_fn: Callable that returns a List (for each block layer) of Lists (per ResNet block) of FiLM conditioning vectors. film_generator_input: Embedding tensor to be passed to film_generator_fn. pretrain_checkpoint: String to initialize model weights from. Does *NOT* initialize final logits layer. ResNet checkpoints can be found here: https://github.com/tensorflow/models/tree/master/official/r1/resnet. """ # For bigger models, we want to use "bottleneck" layers if resnet_size < 50: bottleneck = False else: bottleneck = True model = resnet_lib.Model( resnet_size=resnet_size, bottleneck=bottleneck, num_classes=num_classes, num_filters=64, kernel_size=7, conv_stride=2, first_pool_size=3, first_pool_stride=2, block_sizes=_get_block_sizes(resnet_size), block_strides=[1, 2, 2, 2], resnet_version=resnet_lib.DEFAULT_VERSION, data_format='channels_last', dtype=resnet_lib.DEFAULT_DTYPE ) final_dense = model(images, is_training, film_generator_fn, film_generator_input) if pretrain_checkpoint: # Initialize variables in ResNet, excluding the final dense layer and any # optimization-specific variables (e.g. Momentum, Adam Beta). assignment_map = {} resnet_scope = _get_resnet_scope() for var in contrib_framework.get_variables( scope=resnet_scope, collection=tf.GraphKeys.TRAINABLE_VARIABLES): if 'dense' not in var.op.name: # Remove the parent scope prefix. name_in_ckpt = var.op.name.replace(resnet_scope, 'resnet_model/') assignment_map[name_in_ckpt] = var tf.train.init_from_checkpoint(pretrain_checkpoint, assignment_map) if return_intermediate_values: return resnet_endpoints(model) else: return final_dense
def resnet_model(images, is_training, num_classes, resnet_size=50, weight_decay=None, kernel_size=7, num_filters=64, return_intermediate_values=False, film_generator_fn=None, film_generator_input=None, pretrain_checkpoint=None): """Returns resnet model, optionally returning intermediate endpoint tensors. Args: images: A Tensor representing a batch [N,H,W,C] of input images. is_training: A boolean. Set to True to add operations required only when training the classifier. num_classes: Dimensionality of output logits emitted by final dense layer. resnet_size: Size of resnet. One of [18, 34, 50, 101, 152, 200]. weight_decay: L2 weight regularizer. kernel_size: Size of the convolution kernels used in the resnet model. num_filters: Number of filters used. return_intermediate_values: If True, returns a dictionary of output and intermediate activation values. film_generator_fn: Callable that returns a List (for each block layer) of Lists (per ResNet block) of FiLM conditioning vectors. film_generator_input: Embedding tensor to be passed to film_generator_fn. pretrain_checkpoint: String to initialize model weights from. Does *NOT* initialize final logits layer. ResNet checkpoints can be found here: https://github.com/tensorflow/models/tree/master/official/r1/resnet. """ # For bigger models, we want to use "bottleneck" layers if resnet_size < 50: bottleneck = False else: bottleneck = True model = resnet_lib.Model(resnet_size=resnet_size, bottleneck=bottleneck, num_classes=num_classes, num_filters=num_filters, kernel_size=kernel_size, conv_stride=2, first_pool_size=3, first_pool_stride=2, block_sizes=_get_block_sizes(resnet_size), block_strides=[1, 2, 2, 2], resnet_version=resnet_lib.DEFAULT_VERSION, data_format='channels_last', weight_decay=weight_decay, dtype=resnet_lib.DEFAULT_DTYPE) final_dense = model(images, is_training, film_generator_fn, film_generator_input) if pretrain_checkpoint: # Initialize variables in ResNet, excluding the final dense layer and any # optimization-specific variables (e.g. Momentum, Adam Beta). # When initializing on TPUs, use AbstractT2RModel.init_from_checkpoint_fn. resnet_init_from_checkpoint_fn(pretrain_checkpoint) if return_intermediate_values: return resnet_endpoints(model) else: return final_dense