def get_model():
        if mode == '':
            # a 25 layers deep VGG-style network with batchnorm
            k = 32
            model = VGG(input_shape=x_train.shape[1:],
                        nbstages=4,
                        nblayers=[6] * 4,
                        nbfilters=[1 * k, 2 * k, 4 * k, 8 * k],
                        nbclasses=y_train.shape[1],
                        use_bias=False,
                        batchnorm_training=False,
                        kernel_initializer='he_uniform')
        elif mode == 'fast':
            k = 16
            # a 13 layers deep VGG-style network with batchnorm
            model = VGG(input_shape=x_train.shape[1:],
                        nbstages=4,
                        nblayers=[3] * 4,
                        nbfilters=[1 * k, 2 * k, 4 * k, 8 * k],
                        nbclasses=y_train.shape[1],
                        use_bias=False,
                        batchnorm_training=False,
                        kernel_initializer='he_uniform')

        weights_location = 'model_initial_weights/cifar10_initial_weights' + mode + '.h5'
        if 'cifar10_initial_weights' + mode + '.h5' not in os.listdir(
                'model_initial_weights'):
            model.save_weights(weights_location)
        else:
            model.load_weights(weights_location)

        return model
示例#2
0
 def _load_custom(self, model_id):
     # return the model if it's already been constructed
     if model_id in self.custom_models:
         return self.custom_models[model_id]
     model_dir = os.path.join(self.custom_models_dir,
                              'model_{}'.format(model_id))
     print(model_dir)
     if not os.path.exists(model_dir):
         raise OSError(
             "Directory for custom model {} does not exist!".format(
                 model_id))
     config_path = os.path.join(model_dir,
                                'config_{}.json'.format(model_id))
     weights_path = os.path.join(model_dir,
                                 'weights_{}.h5'.format(model_id))
     with open(config_path) as f:
         config = json.load(f)
     # print('Hyperparameters: ', config['hparams'], type(config['hparams']))
     # print('Train Time: {}'.format(config['train_time']))
     # train_acc, test_acc = config['train_acc'], config['test_acc']
     # print('Train Accuracy : {} | Test Accuracy {}'.format(train_acc, test_acc))
     hparams = argparse.Namespace(**config['hparams'])
     model = VGG(hparams)
     model.load_weights(weights_path)
     self.custom_models[model_id] = model
     return model
示例#3
0
def construct_model(config_path, weights_path):
    # Recover JSON contents
    with open(config_path) as config_file:
        config = json.load(config_file)
        print('Hyperparameters: ', config['hparams'], type(config['hparams']))
        print('Train Time: {}'.format(config['train_time']))
        train_acc, test_acc = config['train_acc'], config['test_acc']
        print('Train Accuracy : {} | Test Accuracy {}'.format(
            train_acc, test_acc))
        args = Namespace(**config['hparams'])
    model = VGG(args)
    model.load_weights(weights_path)
    return model
    def get_model():
        k = 32
        model = VGG(
            input_shape=x_train.shape[1:],
            nbstages=5,
            nblayers=[2] * 5,
            nbfilters=[1 * k, 2 * k, 4 * k, 8 * k, 16 * k],
            nbclasses=y_train.shape[1],
            use_bias=False,
            batchnorm_training=False,  #use_batchnorm = False,
            kernel_initializer='he_uniform',
            batchnorm_momentum=0.9
        )  ### because training sometimes stops after very few epochs (~15)

        weights_location = 'model_initial_weights/tinyImagenet_initial_weights_batchnorm.h5'
        if 'tinyImagenet_initial_weights_batchnorm.h5' not in os.listdir(
                'model_initial_weights'):
            model.save_weights(weights_location)
        else:
            model.load_weights(weights_location)

        return model