def benchmark(weights_filename, r, verbose = "True"):
    filepath = 'model/weights/' + weights_filename # filepath of weights
    num_images = 729 # number of testing images to benchmark on (<=729)
    if verbose == "True":
        verbose = True
    else:
        verbose = False
    
    # Compile model
    opt = tf.keras.optimizers.Adam(learning_rate=0.001)
    # Peak Signal-to-Noise Ratio
    def PSNR(y_true, y_pred):
        max_pixel = 1.0
        return tf.image.psnr(y_true, y_pred, max_val=max_pixel)
    model = espcn(r)
    model.compile(optimizer=opt, loss='mse', metrics=[PSNR])
    # Initialize testing generator
    testing_generator = DataGenerator('LRbicx' + str(r), batch_size = 1, dictionary = "test")
    # Load weights
    model.load_weights(filepath)
    # Calculate average PSNR of all testing data
    average_psnr = 0
    for i in range(0, num_images):
        lr, hr = testing_generator.__getitem__(i)
        sr = model.predict(lr)
        result = psnr(sr[0], hr[0])
        average_psnr += result
        if verbose:
            print('Image: ' + str(i) + ', PSNR: ' + str(result) + ', Average: ' + str(average_psnr/(i+1)))
    print("Average PSNR: " + str(average_psnr/num_images))
Esempio n. 2
0
            xs = xs.astype('float32') / 255.0
            ys = ys.astype('float32') / 255.0
            indices = list(range(xs.shape[0]))
            n_count = 1
        for _ in range(epoch_num):
            #shuffle
            np.random.shuffle(indices)
            for i in range(0, len(indices), batch_size):
                batch_x = xs[indices[i:i + batch_size]]
                batch_y = ys[indices[i:i + batch_size]]
                yield batch_x, batch_y


if __name__ == '__main__':
    # get the espcn model
    espcn = espcn_model.espcn(scale_factor=args.scale_factor)
    model = espcn()

    #Printing the summary of the model to see that it doesn't contain quant aware layers\
    model.summary()

    model.compile(optimizer=Adam(args.lr), loss='mse')

    #set keras callback function to save the model
    checkpointer = ModelCheckpoint(os.path.join(
        save_dir, 'model_weights_{epoch:03d}.h5'),
                                   verbose=1,
                                   save_weights_only=True,
                                   save_freq='epoch',
                                   period=args.save_every)
Esempio n. 3
0
import model as espcn_model
from tensorflow.keras.models import load_model
import tensorflow_model_optimization as tfmot
import tensorflow as tf
print("TENSORFLOW",tf.__version__)
import logging
from tensorflow.compat.v1 import graph_util
from tensorflow.python.keras import backend as K
from tensorflow import keras
from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2



#Loading the base model here
espcn = espcn_model.espcn(scale_factor=4)
base_model = espcn()
base_model.load_weights('./models/espcn/saved_weights/model_weights_001.h5')

# Helper function uses `quantize_annotate_layer` to annotate that only the 
# CONV2D layers should be quantized.
def apply_quantization_to_conv(layer):
  if isinstance(layer, tf.keras.layers.Conv2D):
    return tfmot.quantization.keras.quantize_annotate_layer(layer)
  return layer

# Use `tf.keras.models.clone_model` to apply `apply_quantization_to_conv` 
# to the layers of the model.
annotated_model = tf.keras.models.clone_model(
    base_model,
    clone_function=apply_quantization_to_conv,
)
Esempio n. 4
0
import model as espcn_model
from tensorflow.keras.models import load_model
import tensorflow_model_optimization as tfmot
import tensorflow as tf
print("Tensorflow Version",tf.__version__)

#Loading the base model
espcn = espcn_model.espcn(scale_factor=4,loader=True)
base_model = espcn()

#Loading the pretrained weights
base_model.load_weights('./models/espcn/weights_only/model_weights_010.h5')

#Helper function to quantize a model
def apply_quantization_to_conv(layer):
  if isinstance(layer, tf.keras.layers.Conv2D):
    return tfmot.quantization.keras.quantize_annotate_layer(layer)
  return layer

# Use `tf.keras.models.clone_model` to apply `apply_quantization_to_conv` 
# to the layers of the model.
annotated_model = tf.keras.models.clone_model(
    base_model,
    clone_function=apply_quantization_to_conv,
)

q_aware_model = tfmot.quantization.keras.quantize_apply(annotated_model)

# print the network structure of model
q_aware_model.summary()
                                       batch_size=batch_size)
    model.fit_generator(generator=training_generator, epochs=epochs, verbose=1)
    # Save weights
    # Filepath where the weights will be saved to
    filepath = 'model/weights/r' + str(r) + 'bs' + str(
        batch_size) + 'epochs' + str(epochs) + 'weights.h5'
    model.save_weights(filepath)
    print("Saved weights at : " + filepath)


if __name__ == "__main__":
    if len(sys.argv) == 4:
        # Parameters
        r = int(sys.argv[1])
        batch_size = int(sys.argv[2])
        epochs = int(sys.argv[3])
        # Compile model
        opt = tf.keras.optimizers.Adam(learning_rate=0.001)

        # Peak Signal-to-Noise Ratio
        def PSNR(y_true, y_pred):
            max_pixel = 1.0
            return tf.image.psnr(y_true, y_pred, max_val=max_pixel)

        model = espcn(r)
        model.compile(optimizer=opt, loss='mse', metrics=[PSNR])
        train(model, r, batch_size, epochs)
    else:
        print(
            "Correct usage: python training.py [upscale_factor] [batch_size] [epochs]"
        )