Beispiel #1
0
def get_base_model(model, pooling=None):
    if model == 'resnet50':
        base_model = ResNet50(weights='imagenet',
                              include_top=False,
                              input_tensor=keras.layers.Input(shape=(224, 224,
                                                                     3)))

    elif model == 'vgg16':
        base_model = VGG16(include_top=False,
                           weights='imagenet',
                           pooling=pooling,
                           input_tensor=keras.layers.Input(shape=(224, 224,
                                                                  3)))

    elif model == 'vgg19':
        base_model = VGG19(weights='imagenet',
                           include_top=False,
                           input_tensor=keras.layers.Input(shape=(224, 224,
                                                                  3)))

    elif model == 'inception_v3':
        base_model = InceptionV3(weights='imagenet',
                                 include_top=False,
                                 input_tensor=keras.layers.Input(shape=(299,
                                                                        299,
                                                                        3)))

    elif model == 'xception':
        base_model = Xception(weights='imagenet',
                              include_top=False,
                              input_tensor=keras.layers.Input(shape=(299, 299,
                                                                     3)))

    elif model == 'densenet121':
        base_model = densenet121_model(img_rows=224,
                                       img_cols=224,
                                       color_type=3,
                                       num_classes=N_classes)

    elif model == 'densenet161':
        base_model = densenet161_model(img_rows=224,
                                       img_cols=224,
                                       color_type=3,
                                       num_classes=N_classes)

    elif model == 'densenet169':
        base_model = densenet169_model(img_rows=224,
                                       img_cols=224,
                                       color_type=3,
                                       num_classes=N_classes)

    else:
        assert False, '{} is not an implemented model!'.format(model)

    return base_model
def denseNet121(input_dim, nd_classes):
    weights_path = '../weights/densenet121_weights_tf.h5'

    # Test pretrained model
    # base_model = densenet121.DenseNet(reduction=0.5, classes=1000, weights_path=weights_path)
    model = densenet121.densenet121_model(img_rows=input_dim,
                                          img_cols=input_dim,
                                          color_type=3,
                                          num_classes=nd_classes,
                                          weights_path=weights_path)
    return model
    reduce_lr = ReduceLROnPlateau('val_acc',
                                  factor=0.1,
                                  patience=int(patience / 4),
                                  verbose=1)
    trained_models_path = 'models/model'
    model_names = trained_models_path + '.{epoch:02d}-{val_acc:.4f}.hdf5'
    model_checkpoint = ModelCheckpoint(model_names,
                                       monitor='val_acc',
                                       verbose=1,
                                       save_best_only=True)

    num_gpu = len(get_available_gpus())
    if num_gpu >= 2:
        with tf.device("/cpu:0"):
            model = densenet121_model(img_rows=img_height,
                                      img_cols=img_width,
                                      color_type=num_channels,
                                      num_classes=num_classes)
            if pretrained_path is not None:
                model.load_weights(pretrained_path)

        new_model = multi_gpu_model(model, gpus=num_gpu)
        # rewrite the callback: saving through the original model and not the multi-gpu model.
        model_checkpoint = MyCbk(model)
    else:
        new_model = densenet121_model(img_rows=img_height,
                                      img_cols=img_width,
                                      color_type=num_channels,
                                      num_classes=num_classes)
        if pretrained_path is not None:
            new_model.load_weights(pretrained_path)
Beispiel #4
0
    x_train = x[val_split_num:]
    y_train = y[val_split_num:]
    x_test = x[:val_split_num]
    y_test = y[:val_split_num]

    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255

    return x_train, y_train, x_test, y_test, test_img_nos, test


# get model
model = densenet121_model(img_rows=128,
                          img_cols=128,
                          color_type=3,
                          num_classes=1000)
model.load_weights('./cp_weights.hdf5')
model.compile(optimizer=SGD(lr=0.0001, momentum=0.9),
              loss='binary_crossentropy',
              metrics=['accuracy'])

# get data
img_size = 128
split_rate = 0.1
(x_train, y_train, x_test, y_test, test_img_nos,
 test) = get_data(img_size, split_rate)

datagen = ImageDataGenerator(rotation_range=30,
                             width_shift_range=0.2,
                             height_shift_range=0.2,
Beispiel #5
0
    def getDenseNet121(self, train_images, train_labels, load_saved_model,
                       model_save_path, use_pretraining, pretrained_weights_path, train_dir,
                       val_dir, fine_tuning_method, batch_size, num_epochs, optimizer, loss, initial_epoch, sample, lr=None):
        """

        :param load_saved_model: boolean (whether to just load the model from weights path)
        :param model_save_path: (final model weights path, if load_pretrained is true)
        :param pretrained_weights_path: if load_trained is false and if use_pretraining is true, the path of weights to load for pre-training
        :param train_dir: training data directory
        :param val_dir: validation data directory
        :param use_pretraining: boolean, whether to use pre-training or train from scratch
        :param fine_tuning_method: whether to use end-to-end pre-training or phase-by-phase pre-training
        :param batch_size: batch_size to use while fitting the model
        :param num_epochs: number of epochs to train the model
        :param optimizer: type of optimizer to use (sgd|adagrad)
        :param loss: type of loss to use (mse|l1)
        :param initial_epoch: starting epoch to start training
        :return: Returns the AlexNet model according to the parameters provided

        """

        print(get_time_string() + 'Creating DenseNet121 model..')

        img_rows, img_cols = 224, 224  # Resolution of inputs
        channels = 3

        if load_saved_model:
            if model_save_path is None:
                raise Exception('Unable to load trained model as model_save_path is None!')
            print(get_time_string() + 'Loading saved model from ' + model_save_path + '..')
            model = load_model(model_save_path, custom_objects={'Scale': customlayers.Scale})
        else:
            model = densenet121_model(img_rows=img_rows, img_cols=img_cols, channels=channels,
                                      num_classes=NUM_CLASSES_YEARBOOK, use_pretraining=use_pretraining,
                                      pretrained_weights_path=pretrained_weights_path,
                                      optimizer=optimizer, loss=loss,
                                      fine_tuning_method=fine_tuning_method)

        if initial_epoch >= num_epochs:
            print(get_time_string() + 'Not fitting the model since initial_epoch is >= num_epochs. Returning model..')
            return model

        # Start Fine-tuning
        print(get_time_string() + 'Fitting the model..')
        for e in range(initial_epoch, num_epochs):
            print_line()
            print('Starting epoch ' + str(e))
            print_line()
            completed = 0

            for x_chunk, y_chunk in chunks(train_images, train_labels, batch_size, DENSENET121_ARCHITECTURE):
                print(get_time_string() + 'Fitting model for chunk of size ' + str(len(x_chunk)) + '...')
                model.fit(x_chunk, y_chunk,
                          batch_size=batch_size,
                          nb_epoch=1,
                          verbose=1
                          )
                completed += len(x_chunk)
                print(get_time_string() + str(completed) + ' of ' + str(len(train_images)) + ' complete. ')

            file_name = self.getCheckpointFileName(base_model_save_path=model_save_path, epoch=e)
            print(get_time_string() + 'Saving model to ' + file_name)
            model.save(file_name)

            print(get_time_string() + 'Epoch ' + str(e) + ' complete. Evaluating on validation set..')
            evaluateYearbookFromModel(model=model, architecture=DENSENET121_ARCHITECTURE, sample=sample)

            print_line()

        # model.fit(processed_train_images, train_labels,
        #           batch_size=batch_size,
        #           nb_epoch=num_epochs,
        #           shuffle=True,
        #           verbose=1, validation_data=(processed_valid_images, valid_labels),
        #           callbacks=[self.getCheckpointer(model_save_path)],
        #           initial_epoch=initial_epoch
        #           )

        print(get_time_string() + 'Fitting complete. Returning model..')

        if model_save_path is not None:
            print(get_time_string() + 'Saving final model to ' + model_save_path + '..')
            model.save(model_save_path)

        return model