Пример #1
0
def main(epochs,
         enable_function,
         buffer_size,
         batch_size,
         mode,
         growth_rate,
         output_classes,
         depth_of_model=None,
         num_of_blocks=None,
         num_layers_in_each_block=None,
         data_format='channels_last',
         bottleneck=True,
         compression=0.5,
         weight_decay=1e-4,
         dropout_rate=0.,
         pool_initial=False,
         include_top=True,
         train_mode='custom_loop',
         data_dir=None):

    model = densenet.DenseNet(mode, growth_rate, output_classes,
                              depth_of_model, num_of_blocks,
                              num_layers_in_each_block, data_format,
                              bottleneck, compression, weight_decay,
                              dropout_rate, pool_initial, include_top)
    train_obj = Train(epochs, enable_function, model)
    train_dataset, test_dataset, _ = utils.create_dataset(
        buffer_size, batch_size, data_format, data_dir)

    print('Training...')
    if train_mode == 'custom_loop':
        return train_obj.custom_loop(train_dataset, test_dataset)
    elif train_mode == 'keras_fit':
        return train_obj.keras_fit(train_dataset, test_dataset)
Пример #2
0
def main(epochs,
         enable_function,
         buffer_size,
         batch_size,
         mode,
         growth_rate,
         output_classes,
         depth_of_model=None,
         num_of_blocks=None,
         num_layers_in_each_block=None,
         data_format='channels_last',
         bottleneck=True,
         compression=0.5,
         weight_decay=1e-4,
         dropout_rate=0.,
         pool_initial=False,
         include_top=True,
         train_mode='custom_loop',
         data_dir=None):

  model = densenet.DenseNet(mode, growth_rate, output_classes, depth_of_model,
                            num_of_blocks, num_layers_in_each_block,
                            data_format, bottleneck, compression, weight_decay,
                            dropout_rate, pool_initial, include_top)
  train_obj = Train(epochs, enable_function, model)
  train_dataset, test_dataset, _ = utils.create_dataset(
      buffer_size, batch_size, data_format, data_dir)

  print('Training...')
  if train_mode == 'custom_loop':
    return train_obj.custom_loop(train_dataset, test_dataset)
  elif train_mode == 'keras_fit':
    return train_obj.keras_fit(train_dataset, test_dataset)
def main(epochs,
         enable_function,
         buffer_size,
         batch_size,
         mode,
         growth_rate,
         output_classes,
         depth_of_model=None,
         num_of_blocks=None,
         num_layers_in_each_block=None,
         data_format='channels_last',
         bottleneck=True,
         compression=0.5,
         weight_decay=1e-4,
         dropout_rate=0.,
         pool_initial=False,
         include_top=True,
         train_mode='custom_loop',
         data_dir=None,
         num_gpu=1):

    devices = ['/device:GPU:{}'.format(i) for i in range(num_gpu)]
    strategy = tf.distribute.MirroredStrategy(devices)

    with strategy.scope():
        model = densenet.DenseNet(mode, growth_rate, output_classes,
                                  depth_of_model, num_of_blocks,
                                  num_layers_in_each_block, data_format,
                                  bottleneck, compression, weight_decay,
                                  dropout_rate, pool_initial, include_top)

        trainer = Train(epochs, enable_function, model)

        train_dataset, test_dataset, metadata = utils.create_dataset(
            buffer_size, batch_size, data_format, data_dir)

        num_train_steps_per_epoch = metadata.splits[
            'train'].num_examples // batch_size
        num_test_steps_per_epoch = metadata.splits[
            'test'].num_examples // batch_size

        train_iterator = strategy.make_dataset_iterator(train_dataset)
        test_iterator = strategy.make_dataset_iterator(test_dataset)

        print('Training...')
        if train_mode == 'custom_loop':
            return trainer.custom_loop(train_iterator, test_iterator,
                                       num_train_steps_per_epoch,
                                       num_test_steps_per_epoch, strategy)
        elif train_mode == 'keras_fit':
            raise ValueError(
                '`tf.distribute.Strategy` does not support subclassed models yet.'
            )
        else:
            raise ValueError(
                'Please enter either "keras_fit" or "custom_loop" as the argument.'
            )
Пример #4
0
def main(epochs,
         enable_function,
         buffer_size,
         batch_size,
         mode,
         growth_rate,
         output_classes,
         depth_of_model=None,
         num_of_blocks=None,
         num_layers_in_each_block=None,
         data_format='channels_last',
         bottleneck=True,
         compression=0.5,
         weight_decay=1e-4,
         dropout_rate=0.,
         pool_initial=False,
         include_top=True,
         train_mode='custom_loop',
         data_dir=None,
         num_gpu=1):

  devices = ['/device:GPU:{}'.format(i) for i in range(num_gpu)]
  strategy = tf.distribute.MirroredStrategy(devices)

  with strategy.scope():
    model = densenet.DenseNet(
        mode, growth_rate, output_classes, depth_of_model, num_of_blocks,
        num_layers_in_each_block, data_format, bottleneck, compression,
        weight_decay, dropout_rate, pool_initial, include_top)

    trainer = Train(epochs, enable_function, model, batch_size, strategy)

    train_dataset, test_dataset, metadata = utils.create_dataset(
        buffer_size, batch_size, data_format, data_dir)

    num_train_steps_per_epoch = metadata.splits[
        'train'].num_examples // batch_size
    num_test_steps_per_epoch = metadata.splits[
        'test'].num_examples // batch_size

    train_iterator = strategy.make_dataset_iterator(train_dataset)
    test_iterator = strategy.make_dataset_iterator(test_dataset)

    print('Training...')
    if train_mode == 'custom_loop':
      return trainer.custom_loop(train_iterator,
                                 test_iterator,
                                 num_train_steps_per_epoch,
                                 num_test_steps_per_epoch,
                                 strategy)
    elif train_mode == 'keras_fit':
      raise ValueError(
          '`tf.distribute.Strategy` does not support subclassed models yet.')
    else:
      raise ValueError(
          'Please enter either "keras_fit" or "custom_loop" as the argument.')