예제 #1
0
def get_model_memory_usage(batch_size, model):
    """ 
    
    based on batch size, find estimated size of model in GPU memory 
    
    Arguments:
        batch_size {int} -- size of batches used for training
        model {keras.Model} -- Model of NN from Keras
    
    Returns:
        int -- estimated size in Gigabytes
    """
    import numpy as np
    import tensorflow.keras.backend as K

    # gather the number of parameters
    trainable_count = np.sum([K.count_params(w) for w in model.trainable_weights])
    non_trainable_count = np.sum([K.count_params(w) for w in model.non_trainable_weights])

    # gather shapes of inputs
    shapes_mem_count = 0
    for l in model.layers:
        single_layer_mem = 1
        for s in l.output_shape:
            if s is None or isinstance(s, tuple):
                continue
            single_layer_mem *= s
        shapes_mem_count += single_layer_mem

    # based on 4 byte for int32, calculate size
    total_memory = 4.0*batch_size*(shapes_mem_count + trainable_count + non_trainable_count)
    gbytes = np.round(total_memory / (1024.0 ** 3), 3)
    return gbytes
예제 #2
0
def get_model_memory_usage(batch_size, model):
    """estimate the memory usage of the model.
  NB: this estimates the number of bytes required
  by the variables; the actual usuage will be
  higher because of the backprop variables and
  storage of activations, etc."""
    shapes_mem_count = 0

    for l in model.layers:
        single_layer_mem = 1

        layer_output_shape = l.output_shape if isinstance(
            l.output_shape, list) else [l.output_shape]

        for shape in [x for x in layer_output_shape if x is not None]:
            if shape is None:
                continue

            for dim in [x for x in shape if x is not None]:
                single_layer_mem *= dim
                shapes_mem_count += single_layer_mem

    # FIXME: these used to be set() of model.trainable_weights, but had to be removed
    #        because of a tensorflow error since tensorflow 2.0, check the issues at some point
    trainable_count = int(
        np.sum([K.count_params(p) for p in model.trainable_weights]))
    non_trainable_count = int(
        np.sum([K.count_params(p) for p in model.non_trainable_weights]))

    return 4 * batch_size * (shapes_mem_count + trainable_count +
                             non_trainable_count)
예제 #3
0
    def call(self, x, training):
        self.global_step += 1
        if training and random.random() < 0.5:
            x = self.augmentation(x)
        features = self.backbone(x)
        # out = self.head(features)
        out = self.vision_transformer(features)
        out = tf.nn.softmax(out, axis=-1)

        if self.flag:
            trainable_count = np.sum([
                K.count_params(w)
                for w in self.vision_transformer.trainable_weights
            ])
            non_trainable_count = np.sum([
                K.count_params(w)
                for w in self.vision_transformer.non_trainable_weights
            ])

            print('Total params: {:,}'.format(trainable_count +
                                              non_trainable_count))
            print('Trainable params: {:,}'.format(trainable_count))
            print('Non-trainable params: {:,}'.format(non_trainable_count))
            self.flag = False

        return out
예제 #4
0
def get_model_memory_usage(batch_size, model):
    shapes_mem_count = 0
    internal_model_mem_count = 0
    for l in model.layers:
        layer_type = l.__class__.__name__
        if layer_type == 'Model':
            internal_model_mem_count += get_model_memory_usage(batch_size, l)
        single_layer_mem = 1
        out_shape = l.output_shape
        if type(out_shape) is list:
            out_shape = out_shape[0]
        for s in out_shape:
            if s is None:
                continue
            single_layer_mem *= s
        shapes_mem_count += single_layer_mem

    trainable_count = np.sum(
        [K.count_params(p) for p in model.trainable_weights])
    non_trainable_count = np.sum(
        [K.count_params(p) for p in model.non_trainable_weights])

    number_size = 4.0
    if K.floatx() == 'float16':
        number_size = 2.0
    if K.floatx() == 'float64':
        number_size = 8.0

    total_memory = number_size * (batch_size * shapes_mem_count +
                                  trainable_count + non_trainable_count)
    gbytes = np.round(total_memory / (1024.0**3), 3) + internal_model_mem_count
    return gbytes
예제 #5
0
def get_model_memory_usage(batch_size, model):

    shapes_mem_count = 0
    for l in model.layers:
        print(l.name)
        print(l.output_shape)
        single_layer_mem = 1
        for s in l.output_shape:
            if s is None:
                continue
            single_layer_mem *= s
        shapes_mem_count += single_layer_mem

    trainable_count = np.sum(
        [K.count_params(p) for p in set(model.trainable_weights)])
    non_trainable_count = np.sum(
        [K.count_params(p) for p in set(model.non_trainable_weights)])

    number_size = 4.0
    if K.floatx() == 'float16':
        number_size = 2.0
    if K.floatx() == 'float64':
        number_size = 8.0

    total_memory = number_size * (batch_size * shapes_mem_count +
                                  trainable_count + non_trainable_count)
    gbytes = np.round(total_memory / (1024.0**3), 3)
    return gbytes
예제 #6
0
def profile(model, log = False):
    # make lists
    layer_name = []
    layer_flops = []
    # TODO: relus
    inshape = []
    weights = []
    # run through models
    for layer in model.layers:
        if "act" in layer.get_config()["name"]:
          print ("Skipping ativation functions for now!")
           
        elif "dense" in layer.get_config()["name"] or "fc" in layer.get_config()["name"]:
            
            layer_flops.append(count_linear(layer))
            layer_name.append(layer.get_config()["name"])
            inshape.append(layer.input_shape)
            weights.append(int(np.sum([K.count_params(p) for p in (layer.trainable_weights)])))
        elif "conv" in layer.get_config()["name"] and "pad" not in layer.get_config()["name"] and "bn" not in layer.get_config()["name"] and "relu" not in layer.get_config()["name"] and "concat" not in layer.get_config()["name"]:
            layer_flops.append(count_conv2d(layer,log))
            layer_name.append(layer.get_config()["name"])
            inshape.append(layer.input_shape)
            weights.append(int(np.sum([K.count_params(p) for p in (layer.trainable_weights)])))
        elif "res" in layer.get_config()["name"] and "branch" in layer.get_config()["name"]:
            layer_flops.append(count_conv2d(layer,log))
            layer_name.append(layer.get_config()["name"])
            inshape.append(layer.input_shape)
            weights.append(int(np.sum([K.count_params(p) for p in (layer.trainable_weights)])))
            
    return layer_name, layer_flops, inshape, weights
def training(train_gen,
             valid_gen,
             model,
             bs,
             lr,
             epoch,
             outPath,
             arch_name,
             weights=None):

    opt = optimizers.Adam(learning_rate=lr)
    model.compile(optimizer=opt,
                  loss='binary_crossentropy',
                  metrics=['binary_accuracy'])

    trainable_count = np.sum(
        [K.count_params(w) for w in model.trainable_weights])
    non_trainable_count = np.sum(
        [K.count_params(w) for w in model.non_trainable_weights])
    print('Number of trainable weights in model: ', trainable_count)
    print('Number of non trainable weights in model: ', non_trainable_count)
    #print(model.summary())

    #settings to save checkpoints and when to stop training.
    trained_model_path = os.path.join(
        outPath, arch_name, arch_name + '-{epoch:04d}-{val_loss:.2f}.hdf5')
    print(trained_model_path)
    checkpoint = ModelCheckpoint(trained_model_path,
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='min')

    earlyStop = EarlyStopping(monitor="val_loss",
                              mode="min",
                              patience=5,
                              restore_best_weights=True)
    callbacks_list = [checkpoint, earlyStop]

    start_time = time.perf_counter()

    try:
        history = model.fit_generator(train_gen,
                                      validation_data=valid_gen,
                                      epochs=epoch,
                                      shuffle=True,
                                      class_weight=weights,
                                      callbacks=callbacks_list)

        elapsed = time.perf_counter() - start_time
        print('Elapsed %.3f seconds for training ' % elapsed)
        print(
            'Trained using the following parameters: arhitecture {0}, batchsize {1}, lr {2}, epochs {3}'
            .format(arch_name, bs, lr, epoch))
        print(history.history)
        plot_history(history, arch_name, outPath)

    except Exception as e:
        print('Training encountered exception {0}'.format(e))
예제 #8
0
def total_num_param(
    model
):  # Compute number of params in a model (the actual number of floats)
    trainable_count = int(
        np.sum([K.count_params(p) for p in list(model.trainable_weights)]))
    non_trainable_count = int(
        np.sum([K.count_params(p) for p in list(model.non_trainable_weights)]))
    return trainable_count + non_trainable_count
def param_count(model):

    trainable_count = int(
        np.sum([K.count_params(p) for p in set(model.trainable_weights)]))

    non_trainable_count = int(
        np.sum([K.count_params(p) for p in set(model.non_trainable_weights)]))
    return trainable_count, non_trainable_count
예제 #10
0
def counts(model):
    trainable_count = int(
        np.sum([K.count_params(p) for p in model.trainable_weights]))

    non_trainable_count = int(
        np.sum([K.count_params(p) for p in model.non_trainable_weights]))

    return (trainable_count, non_trainable_count)
예제 #11
0
def model_parameters_stats(model):
    trainable_count = np.sum(
        [K.count_params(w) for w in model.trainable_weights])
    non_trainable_count = np.sum(
        [K.count_params(w) for w in model.non_trainable_weights])

    print('Total params: {:,}'.format(trainable_count + non_trainable_count))
    print('Trainable params: {:,}'.format(trainable_count))
    print('Non-trainable params: {:,}'.format(non_trainable_count))
def trainable_parameter_count(model):
    # Breaks down parameter counts in a tf.keras or keras model.
    trainable_count = np.int(np.sum([K.count_params(w) for w in model.trainable_weights]))
    non_trainable_count = np.int(np.sum([K.count_params(w) for w in model.non_trainable_weights]))
    total_count = trainable_count + non_trainable_count

    print('Total params: {:,}'.format(total_count))
    print('Trainable params: {:,}'.format(trainable_count))
    print('Non-trainable params: {:,}'.format(non_trainable_count))
    return total_count, trainable_count, non_trainable_count
예제 #13
0
def print_trainable_counts(model):
    trainable_count = int(
        np.sum([K.count_params(p) for p in set(model.trainable_weights)]))
    non_trainable_count = int(
        np.sum([K.count_params(p) for p in set(model.non_trainable_weights)]))

    logging.info('Total params: {:,}'.format(trainable_count + non_trainable_count))
    logging.info('Trainable params: {:,}'.format(trainable_count))
    logging.info('Non-trainable params: {:,}'.format(non_trainable_count))

    return trainable_count, non_trainable_count
예제 #14
0
def get_model_params(model):
    trainable_count = np.sum(
        [K.count_params(w) for w in model.trainable_weights])
    non_trainable_count = np.sum(
        [K.count_params(w) for w in model.non_trainable_weights])
    '''
    print('Total params: {:,}'.format(trainable_count + non_trainable_count))
    print('Trainable params: {:,}'.format(trainable_count))
    print('Non-trainable params: {:,}'.format(non_trainable_count))
    '''
    return (trainable_count +
            non_trainable_count), trainable_count, non_trainable_count
예제 #15
0
def model_parameter_count(model, return_counts=False):
    '''
    Breaks down and prints out the counts of parameters of a tf.keras model
    '''
    trainable_count = np.int(
        np.sum([K.count_params(w) for w in model.trainable_weights]))
    non_trainable_count = np.int(
        np.sum([K.count_params(w) for w in model.non_trainable_weights]))
    total_count = trainable_count + non_trainable_count

    print('Total params: {:,}'.format(total_count))
    print('Trainable params: {:,}'.format(trainable_count))
    print('Non-trainable params: {:,}'.format(non_trainable_count))
    if return_counts: return total_count, trainable_count, non_trainable_count
    def on_epoch_end(self, epoch, logs=None):
        trainable_count = np.sum(
            [K.count_params(w) for w in self.model.trainable_weights])
        non_trainable_count = np.sum(
            [K.count_params(w) for w in self.model.non_trainable_weights])

        row = list()
        row.append(str(epoch))
        row.append(str(trainable_count))
        row.append(str(non_trainable_count))
        row.append(str(trainable_count + non_trainable_count))
        row.append(str(time.time() - self.start_train_time))

        csv_file = open(self.filename, 'a')
        csv_file.write(",".join(row) + "\n")
예제 #17
0
def print_parameter_summary(model) -> None:
    """
    Prints the number of trainable and non-trainable parameters in the model.
    """

    # Note: use model.summary() for a detailed summary of layers.
    trainable_count = np.sum(
        [K.count_params(w) for w in model.trainable_weights])
    non_trainable_count = np.sum(
        [K.count_params(w) for w in model.non_trainable_weights])
    total_count = trainable_count + non_trainable_count

    print(f"Total params: {total_count:,}")
    print(f"Trainable params: {trainable_count:,}")
    print(f"Non-trainable params: {non_trainable_count:,}")
예제 #18
0
 def get_num_trainable_params(self):
     num_params = 0
     layer: layers.Layer
     for layer in self._layers:
         param_counts = [count_params(n) for n in layer.trainable_variables]
         num_params += np.sum(param_counts)
     return num_params
예제 #19
0
def iterate_size(size, train_dataset, mult=1):
    train_dataset = train_dataset.batch(2)
    train_dataset = train_dataset.map(lambda x, y: (
        dataset.transform_images(x, size),
        dataset.transform_targets(y, yolo_anchors, anchors, size),
    ))
    size_alloc = []
    for x_train, _ in train_dataset.take(1):
        size_alloc += [tf.size(x_train)]
        size_alloc += [
            reduce(lambda x, y: x * y,
                   stage_1(mult)(x_train).shape)
        ]
        size_alloc += [
            reduce(lambda x, y: x * y,
                   stage_2(mult)(x_train).shape)
        ]
        size_alloc += [
            reduce(lambda x, y: x * y,
                   stage_3(mult)(x_train).shape)
        ]
        size_alloc += [
            reduce(lambda x, y: x * y,
                   stage_4(mult)(x_train).shape)
        ]
        size_alloc += [
            reduce(lambda x, y: x * y,
                   stage_5(mult)(x_train).shape)
        ]
        size_alloc += [
            reduce(lambda x, y: x * y,
                   stage_6(mult)(x_train).shape)
        ]
    return size_alloc, np.sum(
        [K.count_params(w) for w in stage_6(mult).trainable_weights])
예제 #20
0
def get_model_parameter_counts(model):
    """Calculates the number of parameters from a given model.
    # Arguments
        model: model to have parameters counted.
    # Returns
      trainable_count: integer number with trainable parameter count.
      non_trainable_count:  integer number with non trainable parameter count.
    """
    trainable_count = int(
        np.sum([backend.count_params(p)
                for p in set(model.trainable_weights)]))
    non_trainable_count = int(
        np.sum([
            backend.count_params(p) for p in set(model.non_trainable_weights)
        ]))
    return trainable_count, non_trainable_count
예제 #21
0
def get_numparams(input_size, output_size, net_kw):
    ''' Get number of parameters in any net '''
    net = Net(input_size=input_size, output_size=output_size, **net_kw)
    net.build((None, *input_size[-1::-1]))
    # net.trainable = True
    # numparams = sum([param.nelement() for param in net.parameters()])
    trainable_count = np.sum([K.count_params(w) for w in net.trainable_weights])
    return trainable_count
예제 #22
0
def get_model_memory_usage(batch_size, model):

    shapes_mem_count = 0
    for l in model.layers:
        single_layer_mem = 1
        for s in l.output_shape:
            if s is None:
                continue
            single_layer_mem *= s
        shapes_mem_count += single_layer_mem

    trainable_count = np.sum([K.count_params(p) for p in set(model.trainable_weights)])
    non_trainable_count = np.sum([K.count_params(p) for p in set(model.non_trainable_weights)])

    total_memory = 4.0*batch_size*(shapes_mem_count + trainable_count + non_trainable_count)
    gbytes = np.round(total_memory / (1024.0 ** 3), 3)
    return gbytes
예제 #23
0
def get_numparams(input_size, output_size, net_kw):
    ''' Get number of parameters in any net '''
    net = Net(input_size=input_size, output_size=output_size, **net_kw)
    # NOTE: from NCHW to NHWC. This is because of the different orders in tf.keras and PyTorch.
    net.build(tuple([None] + input_size[1:] + [input_size[0]]))
    # net.trainable = True
    trainable_count = np.sum(
        [K.count_params(w) for w in net.trainable_weights])
    return trainable_count
예제 #24
0
def main(args):
  print(f'=> Dataset: {args.dataset}')
  if args.dataset == 'mtt':
    config = MTT_CONFIG
  elif args.dataset == 'scd':
    config = SCD_CONFIG
  elif args.dataset == 'dcs':
    config = DCS_CONFIG
  else:
    raise Exception(f'Not implemented dataset: {args.dataset}')

  dataset_path = mkpath(args.data_dir, args.dataset)
  tfrecord_path = f'{dataset_path}/tfrecord'

  # Configure the model.
  model_config = ModelConfig(block=args.block, amplifying_ratio=args.amplifying_ratio, multi=args.multi,
                             num_blocks=config.num_blocks, dropout=args.dropout, activation=config.activation,
                             num_classes=config.num_classes)

  # Set the training directory.
  args.train_dir = mkpath(args.log_dir, datetime.now().strftime('%Y%m%d_%H%M%S') + f'-{args.dataset}')
  if args.name is None:
    args.name = model_config.get_signature()
  args.train_dir += '-' + args.name
  os.makedirs(args.train_dir, exist_ok=False)
  print('=> Training directory: ' + args.train_dir)

  # Create training, validation, and test datasets.
  dataset_train, dataset_val, dataset_test = create_datasets(tfrecord_path, args.batch_size, args.num_readers, config)

  model = SampleCNN(model_config)
  model_config.print_summary()

  num_params = int(sum([K.count_params(p) for p in set(model.trainable_weights)]))
  print(f'=> #params: {num_params:,}')

  for stage in range(args.num_stages):
    print(f'=> Stage {stage}')
    # Set the learning rate of current stage
    lr = args.lr * (args.lr_decay ** stage)
    # Train the network.
    train(model, lr, dataset_train, dataset_val, config, args)
    # Load the best model.
    model = tf.keras.models.load_model(f'{args.train_dir}/best.h5',
                                       custom_objects={'AudioVarianceScaling': AudioVarianceScaling, 'tf': tf})
    # Evaluate.
    rocauc, prauc, acc, f1 = evaluate(model, dataset_test, config)

  # Change the file name of the best checkpoint with the scores.
  os.rename(f'{args.train_dir}/best.h5', f'{args.train_dir}/final-auc_{rocauc:.6f}-acc_{acc:.6f}-f1_{f1:.6f}.h5')
  # Report the final scores.
  print(f'=> FINAL SCORES [{args.dataset}] {args.name}: '
        f'rocauc={rocauc:.6f}, acc={acc:.6f}, f1={f1:.6f}, prauc={prauc:.6f}')

  model_config.print_summary()

  return rocauc, prauc, acc, f1
예제 #25
0
def estimate_model_memory_usage(model):
    shapes_mem_count = 0
    for l in model.layers:
        single_layer_mem = 1
        for s in (l.output_shape[0]
                  if isinstance(l.output_shape, list) else l.output_shape):
            single_layer_mem *= s
        shapes_mem_count += single_layer_mem

    trainable_count = np.sum(
        [K.count_params(p) for p in set(model.trainable_weights)])
    non_trainable_count = np.sum(
        [K.count_params(p) for p in set(model.non_trainable_weights)])

    total_memory = 4.0 * (shapes_mem_count + trainable_count +
                          non_trainable_count)
    gbytes = np.round(total_memory / (1024.0**3), 3)
    return gbytes
예제 #26
0
def get_model_memory_usage(batch_size, model):
    shapes_mem_count = 0
    for layer in model.layers:
        shapes_mem_count += np.prod(layer.output_shape[1:])

    trainable_count = int(
        np.sum([K.count_params(weight) for weight in model.trainable_weights]))
    non_trainable_count = int(
        np.sum([
            K.count_params(weight) for weight in model.non_trainable_weights
        ]))

    total_memory = 4 * batch_size * (shapes_mem_count + trainable_count +
                                     non_trainable_count)
    gbytes = round(total_memory / (1024**3), 3)
    mbytes = round(total_memory / (1024**2), 3)

    return trainable_count, non_trainable_count, gbytes, mbytes
예제 #27
0
    def __init__(self):
        super(ClassifierHybrid, self).__init__()
        self.global_step = 0
        self.backbone = self.get_backbone()
        self.backbone.trainable = False
        trainable_count = np.sum(
            [K.count_params(w) for w in self.backbone.trainable_weights])
        non_trainable_count = np.sum(
            [K.count_params(w) for w in self.backbone.non_trainable_weights])

        print('Total params: {:,}'.format(trainable_count +
                                          non_trainable_count))
        print('Trainable params: {:,}'.format(trainable_count))
        print('Non-trainable params: {:,}'.format(non_trainable_count))
        # self.head = tf.keras.Sequential([
        #     layers.Flatten(),
        #     layers.Dense(256, activation='relu'),
        #     layers.Dense(196)
        # ])

        # self.vision_transformer = ViT(img_size=9, channels=1408, patch_size=1, num_layers=8,
        #                  num_classes=196, d_model=512, num_heads=8, d_mlp=512)

        self.vision_transformer = ViT(img_size=args.num_patches,
                                      channels=args.num_channels,
                                      patch_size=args.patch_size,
                                      num_layers=args.num_layers,
                                      num_classes=args.num_classes,
                                      d_model=args.d_model,
                                      num_heads=args.num_heads,
                                      d_mlp=args.d_mlp)
        self.prepare_datasets()
        self.flag = True
        self.augmentation = tf.keras.Sequential(
            [
                tf.keras.Input(shape=(260, 260, 3)),
                preprocessing.RandomRotation(factor=0.15),
                preprocessing.RandomTranslation(height_factor=0.1,
                                                width_factor=0.1),
                preprocessing.RandomFlip(),
                preprocessing.RandomContrast(factor=0.1),
            ],
            name="augmentation",
        )
예제 #28
0
def _log_mlflow_params(model, dataset, training_spec):
    images = dataset.image_set()
    #labels = dataset.label_set()
    mlflow.log_param('Images - Type',   images.type())
    mlflow.log_param('Images - Count',   len(images))
    mlflow.log_param('Images - Stride', training_spec.stride)
    mlflow.log_param('Images - Tile Size', len(model.layers))
    mlflow.log_param('Train - Steps', training_spec.steps)
    mlflow.log_param('Train - Loss Function', training_spec.loss)
    mlflow.log_param('Train - Epochs', training_spec.epochs)
    mlflow.log_param('Train - Batch Size', training_spec.batch_size)
    mlflow.log_param('Train - Optimizer', training_spec.optimizer)
    mlflow.log_param('Model - Layers', len(model.layers))
    mlflow.log_param('Model - Parameters - Non-Trainable',
                     np.sum([K.count_params(w) for w in model.non_trainable_weights]))
    mlflow.log_param('Model - Parameters - Trainable',
                     np.sum([K.count_params(w) for w in model.trainable_weights]))
    mlflow.log_param('Model - Shape - Output',   dataset.output_shape())
    mlflow.log_param('Model - Shape - Input',   dataset.input_shape())
예제 #29
0
def get_model_memory_usage(batch_size, model):
    import numpy as np
    from tensorflow.keras import backend as K

    shapes_mem_count = 0
    internal_model_mem_count = 0
    for l in model.layers:
        layer_type = l.__class__.__name__

        if layer_type == 'Model':
            internal_model_mem_count += get_model_memory_usage(batch_size, l)

        single_layer_mem = 1
        for s in l.output_shape:
            if isinstance(s, tuple):
                for si in s:
                    if si is None:
                        continue

                    single_layer_mem *= si

            else:
                if s is None:
                    continue

                single_layer_mem *= s

        shapes_mem_count += single_layer_mem

    trainable_count = np.sum([K.count_params(p) for p in model.trainable_weights])
    non_trainable_count = np.sum([K.count_params(p) for p in model.non_trainable_weights])
    number_size = 4.0
    if K.floatx() == 'float16':
        number_size = 2.0

    if K.floatx() == 'float64':
        number_size = 8.0

    total_memory = number_size * (batch_size * shapes_mem_count + trainable_count + non_trainable_count)
    gbytes = np.round(total_memory / (1024.0 ** 3), 3) + internal_model_mem_count
    return gbytes
예제 #30
0
 def _decorate_models(self):
     for model_key, model in self._model_dict.items():
         if not hasattr(self, model_key):
             model.compile()
             import tensorflow.keras.backend as K
             trainable_count = sum(
                 [K.count_params(w) for w in model.trainable_weights])
             if self._verbose:
                 print("%s has %d parameters (%d trainable)" %
                       (model_key, model.count_params(), trainable_count))
             setattr(self, model_key, model)
         else:
             raise RuntimeError("Duplicated model %s." % model_key)