Exemple #1
0
def test_fw_iter(IteratorClass, args):
    iterator_name = IteratorClass.__module__ + "." + IteratorClass.__name__
    print("Start testing {}".format(iterator_name))
    sess = None
    daliop = None
    dali_train_iter = None
    images = []
    labels = []

    pipes = [
        RN50Pipeline(batch_size=args.batch_size,
                     num_threads=args.workers,
                     device_id=n,
                     num_gpus=args.gpus,
                     data_paths=data_paths,
                     prefetch=PREFETCH,
                     fp16=args.fp16,
                     nhwc=args.nhwc) for n in range(args.gpus)
    ]
    [pipe.build() for pipe in pipes]
    iters = args.iters
    if args.iters < 0:
        iters = pipes[0].epoch_size("Reader")
        assert (all(pipe.epoch_size("Reader") == iters for pipe in pipes))
        iters_tmp = iters
        iters = iters // args.batch_size
        if iters_tmp != iters * args.batch_size:
            iters += 1
        iters_tmp = iters

        iters = iters // args.gpus
        if iters_tmp != iters * args.gpus:
            iters += 1

    if iterator_name == "nvidia.dali.plugin.tf.DALIIterator":
        daliop = IteratorClass()
        for dev in range(args.gpus):
            with tf.device('/gpu:%i' % dev):
                if args.fp16:
                    out_type = tf.float16
                else:
                    out_type = tf.float32
                image, label = daliop(pipeline=pipes[dev],
                                      shapes=[(args.batch_size, 3, 224, 224),
                                              ()],
                                      dtypes=[out_type, tf.int32])
                images.append(image)
                labels.append(label)
        gpu_options = GPUOptions(per_process_gpu_memory_fraction=0.5)
        config = ConfigProto(gpu_options=gpu_options)
        sess = Session(config=config)

    end = time.time()
    for i in range(args.epochs):
        if i == 0:
            print("Warm up")
        else:
            print("Test run " + str(i))
        data_time = AverageMeter()

        if iterator_name == "nvidia.dali.plugin.tf.DALIIterator":
            assert sess != None
            for j in range(iters):
                res = sess.run([images, labels])
                data_time.update(time.time() - end)
                if j % args.print_freq == 0:
                    print(
                        "{} {}/ {}, avg time: {} [s], worst time: {} [s], speed: {} [img/s]"
                        .format(iterator_name, j + 1, iters, data_time.avg,
                                data_time.max_val,
                                args.gpus * args.batch_size / data_time.avg))
                end = time.time()
        else:
            dali_train_iter = IteratorClass(pipes,
                                            pipes[0].epoch_size("Reader"))
            j = 0
            for it in iter(dali_train_iter):
                data_time.update(time.time() - end)
                if j % args.print_freq == 0:
                    print(
                        "{} {}/ {}, avg time: {} [s], worst time: {} [s], speed: {} [img/s]"
                        .format(iterator_name, j + 1, iters, data_time.avg,
                                data_time.max_val,
                                args.gpus * args.batch_size / data_time.avg))
                end = time.time()
                j = j + 1
                if j > iters:
                    break
Exemple #2
0
from console_classify import read_img, inference
import argparse
import cv2
import tensorflow as tf
from tensorflow.python.saved_model import tag_constants
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input, decode_predictions
from tensorflow.keras.preprocessing import image

from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import Session
import tensorflow.compat.v1.keras.backend as K

config = ConfigProto()
config.gpu_options.allow_growth = True
config.log_device_placement = True
session = Session(config=config)
K.set_session(
    session)  # set this TensorFlow session as the default session for Keras

# %%bash
# mkdir ./data
# wget  -O ./data/img0.JPG "https://d17fnq9dkz9hgj.cloudfront.net/breed-uploads/2018/08/siberian-husky-detail.jpg?bust=1535566590&width=630"
# wget  -O ./data/img1.JPG "https://www.hakaimagazine.com/wp-content/uploads/header-gulf-birds.jpg"
# wget  -O ./data/img2.JPG "https://www.artis.nl/media/filer_public_thumbnails/filer_public/00/f1/00f1b6db-fbed-4fef-9ab0-84e944ff11f8/chimpansee_amber_r_1920x1080.jpg__1920x1080_q85_subject_location-923%2C365_subsampling-2.jpg"
# wget  -O ./data/img3.JPG "https://www.familyhandyman.com/wp-content/uploads/2018/09/How-to-Avoid-Snakes-Slithering-Up-Your-Toilet-shutterstock_780480850.jpg"

parser = argparse.ArgumentParser()
parser.add_argument('model_dir', help="SavedModel Directory")
args = parser.parse_args()

model_path = args.model_dir
from tensorflow.keras.preprocessing import image
import json
from tensorflow import Graph
from tensorflow.compat.v1 import Session
from io import BytesIO
from six.moves import urllib

img_height, img_width = 224, 224
with open('./models/modelo.json', 'r') as f:
    labelInfo = f.read()

labelInfo = json.loads(labelInfo)

model_graph = Graph()
with model_graph.as_default():
    tf_session = Session()
    with tf_session.as_default():
        model = load_model('./models/pesos.h5')


def index(request):
    context = {'a': 1}
    return render(request, 'index.html', context)


def predictImage(request):
    print(request)
    print(request.POST.dict())
    fileObj = request.FILES['filePath']
    fs = FileSystemStorage()
    filePathName = fs.save(fileObj.name, fileObj)
 def create_tf_var(tensor: np.ndarray, name: str, session: tf.Session):
     tf_dtype = tf.dtypes.as_dtype(tensor.dtype)
     tf_var = tf.get_variable(dtype=tf_dtype, shape=tensor.shape, name=name, initializer=tf.zeros_initializer())
     session.run(tf.variables_initializer([tf_var]))
     session.run(tf_var)
     return tf_var
Exemple #5
0
def runModels(ModelFuncPointer,
            ModelName,
            TrainDir,
            TrainGenerator,
            ValidationGenerator,
            TestGenerator,
            resultsFile=None,
            numEpochs=10,
            epochSteps=100,
            validationSteps=1,
            network=None,
            nCPU = 1,
            gpuID = 0):


    os.environ["CUDA_VISIBLE_DEVICES"]=str(gpuID)

    ## The following code block appears necessary for running with tf2 and cudnn
    from tensorflow.compat.v1 import ConfigProto
    from tensorflow.compat.v1 import Session
    config = ConfigProto()
    config.gpu_options.allow_growth = True
    Session(config=config)
    ###

    if(resultsFile == None):

        resultsFilename = os.path.basename(trainFile)[:-4] + ".p"
        resultsFile = os.path.join("./results/",resultsFilename)

    x,y = TrainGenerator.__getitem__(0)
    model = ModelFuncPointer(x,y)

    # Early stopping and saving the best weights
    callbacks_list = [
            EarlyStopping(
                monitor='val_loss',
                verbose=1,
                min_delta=0.01,
                patience=100),
            ModelCheckpoint(
                filepath=network[1],
                monitor='val_loss',
                save_best_only=True),
            TerminateOnNaN()
            ]

    if nCPU > 1:
        history = model.fit(TrainGenerator,
            steps_per_epoch=epochSteps,
            epochs=numEpochs,
            validation_data=ValidationGenerator,
            callbacks=callbacks_list,
            use_multiprocessing=True,
            max_queue_size=nCPU,
            workers=nCPU)
    else:
        history = model.fit(TrainGenerator,
            steps_per_epoch=epochSteps,
            epochs=numEpochs,
            validation_data=ValidationGenerator,
            callbacks=callbacks_list,
            use_multiprocessing=False)

    # Write the network
    if(network != None):
        ##serialize model to JSON
        model_json = model.to_json()
        with open(network[0], "w") as json_file:
            json_file.write(model_json)

    # Load json and create model
    if(network != None):
        jsonFILE = open(network[0],"r")
        loadedModel = jsonFILE.read()
        jsonFILE.close()
        model=model_from_json(loadedModel)
        model.load_weights(network[1])
    else:
        print("Error: model and weights not loaded")
        sys.exit(1)

    x,y = TestGenerator.__getitem__(0)
    predictions = model.predict(x)

    history.history['loss'] = np.array(history.history['loss'])
    history.history['val_loss'] = np.array(history.history['val_loss'])
    history.history['predictions'] = np.array(predictions)
    history.history['Y_test'] = np.array(y)
    history.history['name'] = ModelName

    print("results written to: ",resultsFile)
    pickle.dump(history.history, open( resultsFile, "wb" ))

    return None



from keras.models import *
from keras.optimizers import *
from keras.layers import *
from keras.metrics import *
from keras.regularizers import *
from keras.callbacks import *
from tensorflow.compat.v1 import ConfigProto , Session
from tensorflow.compat.v1.keras import backend as K
config = ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction= 0.95
K.set_session(Session(config=config) )


if retrain == False:
 
    #word2vec model to be trained
    input_target = Input((1,) , name='target_in')
    input_context = Input((1,) , name='context_in')
    embedding = Embedding( nterms, vector_dim, input_length=1, name='embedding' , embeddings_initializer='glorot_uniform' )
    target = embedding(input_target)
    target = Reshape((vector_dim, 1), name='target')(target)

    context = embedding(input_context)
    context = Reshape((vector_dim, 1) , name='context' )(context)

    dot_product = dot([target, context] , axes=1 , normalize = False)
Exemple #7
0
        daliop = IteratorClass()
        for dev in range(args.gpus):
            with tf.device('/gpu:%i' % dev):
                if args.fp16:
                    out_type = tf.float16
                else:
                    out_type = tf.float32
                image, label = daliop(pipeline=pipes[dev],
                                      shapes=[(args.batch_size, 3, 224, 224),
                                              ()],
                                      dtypes=[out_type, tf.int32])
                images.append(image)
                labels.append(label)
        gpu_options = GPUOptions(per_process_gpu_memory_fraction=0.8)
        config = ConfigProto(gpu_options=gpu_options)
        sess = Session(config=config)

    end = time.time()
    for i in range(args.epochs):
        if i == 0:
            print("Warm up")
        else:
            print("Test run " + str(i))
        data_time = AverageMeter()

        if iterator_name == "tf.DALIIterator":
            assert sess != None
            for j in range(iters):
                res = sess.run([images, labels])
                data_time.update(time.time() - end)
                if j % args.print_freq == 0:
from pickle import load
from numpy import array
from keras.models import load_model
from random import choice
from tensorflow.compat.v1 import Session
from keras import backend

from project.config import get_objects, session_factory
from project.model import tag_model, pattern_model, response_model, context_model

session = Session()
backend.set_session(session)


def get_data_from_db():
    # fetch tags
    fetched = get_objects(tag_model.Tag)

    forma = {"tag": "", "patterns": [], "responses": [], "context": []}
    result = {'intents': []}
    for item in fetched:
        forma['tag'] = str(item.tag).encode("utf8").decode("utf8")
        # fetch patterns
        new_session = session_factory()
        patterns = new_session.query(
            pattern_model.Pattern).filter_by(tag_id=item.id_tag).all()
        # add patterns to intents
        for pattern in patterns:
            forma['patterns'].append(
                str(pattern.pattern).encode("utf8").decode("utf8"))
Exemple #9
0
class Network(object):
    def __init__(self, dimensions, batch_size, initialize_loss=True, lr=0.0001, lr_stair_width=10, lr_decay=0.95):
        self.batch_size = batch_size
        self.dimensions = dimensions
        self.scale_factor = 2
        self.layer_params = []
        self.inputs = placeholder(
            tf.float32, [batch_size, dimensions[1], dimensions[0], 3], name='input_images'
        )
        scaled_inputs = self.inputs / 256.0
        print("inputs shape: " + str(self.inputs.get_shape()))

        resized = resize_bicubic(
            scaled_inputs,
            [dimensions[1] * self.scale_factor, dimensions[0] * self.scale_factor],
            name="scale_bicubic")

        self.layer_params.append({
            'filter_count': 64 * 3,
            'filter_shape': [9, 9]
        })
        patch_extraction_layer = self.conv_layer("patch_extraction", self.layer_params[-1], resized)
        self.layer_params.append({
            'filter_count': 32 * 3,
            'filter_shape': [1, 1]
        })
        non_linear_mapping_layer = self.conv_layer("non_linear_mapping_layer", self.layer_params[-1],
                                                   patch_extraction_layer)
        self.layer_params.append({
            'filter_count': 3,
            'filter_shape': [5, 5]
        })
        self.output = self.conv_layer("reconstruction_layer", self.layer_params[-1],
                                      non_linear_mapping_layer, linear=True)

        if initialize_loss:
            self.real_images = placeholder(tf.float32,
                                           [self.batch_size,
                                            dimensions[1] * self.scale_factor,
                                            dimensions[0] * self.scale_factor,
                                            3],
                                           name='real_images')
            self.loss = self.get_loss()
            self.summary = tf.summary.scalar("loss", self.loss)
            self.epoch = placeholder(tf.int32, name='epoch')
            self.learning_rate = exponential_decay(
                lr, self.epoch, lr_stair_width, lr_decay, staircase=True)
            self.optimized = AdamOptimizer(
                self.learning_rate,
                beta1=0.9, beta2=0.999, epsilon=1e-08).minimize(self.loss)
        self.sess = Session()
        self.saver = Saver()

    def initialize(self):
        init = tf.compat.v1.global_variables_initializer()
        self.sess.run(init)

    @staticmethod
    def weight_variable(shape, stddev=0.1):
        initial = truncated_normal(shape, stddev=stddev)
        return tf.Variable(initial)

    @staticmethod
    def bias_variable(shape, initial=0.1):
        initial = tf.constant(initial, shape=shape)
        return tf.Variable(initial)

    def conv_layer(self, name, params, data, weight_stddev=0.1, bias_init=0.1, linear=False):
        with tf.compat.v1.variable_scope(name):
            weights = self.weight_variable(params['filter_shape'] + [data.get_shape().as_list()[3],
                                           params['filter_count']], stddev=weight_stddev)
            biases = self.bias_variable([params['filter_count']], initial=bias_init)
            padded_data = tf.pad(data, [[0, 0],
                                        [(params['filter_shape'][0] - 1) // 2,
                                         (params['filter_shape'][0] - 1) // 2],
                                        [(params['filter_shape'][1] - 1) // 2,
                                         (params['filter_shape'][1] - 1) // 2],
                                        [0, 0]], "SYMMETRIC")
            conv = tf.nn.conv2d(padded_data, weights, strides=[1, 1, 1, 1], padding='VALID')
            if not linear:
                params['output'] = tf.nn.relu(conv + biases)
            else:
                params['output'] = conv + biases
            params['biases'] = biases
            params['weights'] = weights
            return params['output']

    def get_loss(self):
        return tf.reduce_sum(tf.nn.l2_loss(self.output - tf.raw_ops.Div(x=self.real_images, y=256.0)))

    def get_batch_size(self):
        return self.batch_size

    def get_scale_factor(self):
        return self.scale_factor

    def get_dimensions(self):
        return self.dimensions

    def train_step(self, images, target_images, epoch=0):
        _, loss, lr = self.sess.run([self.optimized, self.loss, self.learning_rate], feed_dict={
            self.inputs: np.array(images),
            self.real_images: np.array(target_images),
            self.epoch: epoch
        })
        return loss, lr

    def validation_step(self, images, target_images):
        loss = self.sess.run([self.loss], feed_dict={
            self.inputs: np.array(images),
            self.real_images: np.array(target_images)
        })
        return loss

    def inference(self, images):
        return self.sess.run(self.output * 256.0, feed_dict={
            self.inputs: np.array(images)
        })

    def save(self):
        save_path = self.saver.save(self.sess, os.path.abspath("network_params"))
        return save_path

    def load(self, path="network_params"):
        self.saver.restore(self.sess, os.path.abspath(path))