Exemplo n.º 1
0
def adapt_model_to_new_input(model, input_shape, old_input_shape, verbose):
    old_config = model.get_config()
    config = old_config.copy()
    # Adapt input shape
    config['layers'][0]['input_shape'] = input_shape
    # Adapt maxpool layer
    pool_size = [
        l.pool_size[0] for l in model.layers if l.name == "maxpooling2d"
    ]
    global_stride = np.prod(pool_size[0:-1])
    new_pool_size = int(pool_size[-1] +
                        (input_shape[1] - old_input_shape[1]) / global_stride)
    if verbose:
        print "Input shape :", input_shape
        print "Poolsize :", pool_size[-1], "-> (%d,%d)" % (new_pool_size,
                                                           new_pool_size)
    maxpool_pos = [
        i for i, l in enumerate(model.layers) if l.name == "maxpooling2d"
    ]
    config['layers'][maxpool_pos[-1]]['pool_size'] = (new_pool_size,
                                                      new_pool_size)
    # Compile model
    new_model = model_from_config(config)
    # Set weights
    for i, l in enumerate(new_model.layers):
        l.set_weights(model.layers[i].get_weights())
    return new_model
Exemplo n.º 2
0
def load_model(filepath,
               inserted_layers=None,
               custom_objects=None,
               initial_inputs=None,
               new_output_layers=None):
    """loads model like keras load_model, updates the input layer,
        as well inserts extra layers after the input
        
        Args:
            filepath: path to the keras model
            inserted_layers: list of list of layers to insert. layers can either be 
                layer config objects, or the layers themselves.
            custom_objects: dict, with key,val : (class_name,class)
            initial_inputs: initial input for the model, can be tf.Tensor or np.array
            new_output_layers: New output layers for the model, any layers occuring after
                these layers are removed. Note: buggy.
        Returns:
            A keras model with inserted layers and layers removed
    """
    if new_output_layers is not None:
        warn('using new_output_layers is buggy')

    K.set_learning_phase(False)

    #Make sure inserted_layers is a list of lists (added layers for each input)
    if inserted_layers is not None:
        if not isinstance(inserted_layers[0], list):
            inserted_layers = list(inserted_layers)
        added_objects = convert_layer_to_config(inserted_layers)
    else:
        added_objects = {}
    #Make sure initial_inputs is a list of inputs
    if (initial_inputs is not None and not isinstance(initial_inputs, list)):
        initial_inputs = [initial_inputs]

    #Make sure new_output_layers is a list of outputs
    if (new_output_layers is not None
            and not isinstance(new_output_layers, list)):
        new_output_layers = [new_output_layers]

    if not custom_objects:
        custom_objects = {}
    custom_objects = {**base_layers_dict, **custom_objects, **added_objects}

    with h5py.File(filepath, mode='r') as f:
        # instantiate model
        model_config = f.attrs.get('model_config')
        if model_config is None:
            raise ValueError('No model found in config file.')

        model_config = json.loads(model_config.decode('utf-8'))

        update_config(model_config['config'], inserted_layers,
                      new_output_layers, initial_inputs)
        model = model_from_config(model_config, custom_objects=custom_objects)
        if 'layer_names' not in f.attrs and 'model_weights' in f:
            f = f['model_weights']

        saving.load_weights_from_hdf5_group_by_name(f, model.layers)
        return model
Exemplo n.º 3
0
Arquivo: base.py Projeto: bmcfee/crema
    def _instantiate(self, rsc):

        # First, load the pump
        with open(resource_filename(__name__, os.path.join(rsc, 'pump.pkl')),
                  'rb') as fd:
            self.pump = pickle.load(fd)

        # Now load the model
        with open(
                resource_filename(__name__,
                                  os.path.join(rsc, 'model_spec.pkl')),
                'rb') as fd:
            spec = pickle.load(fd)
            self.model = model_from_config(
                spec,
                custom_objects={k: layers.__dict__[k]
                                for k in layers.__all__})

        # And the model weights
        self.model.load_weights(
            resource_filename(__name__, os.path.join(rsc, 'model.h5')))

        # And the version number
        with open(
                resource_filename(__name__, os.path.join(rsc, 'version.txt')),
                'r') as fd:
            self.version = fd.read().strip()
Exemplo n.º 4
0
    def __init__(
            self,
            allele,
            model_directory=CLASS1_MODEL_DIRECTORY,
            max_ic50=MAX_IC50):
        self.max_ic50 = max_ic50
        if not exists(model_directory) or len(listdir(model_directory)) == 0:
            raise ValueError(
                "No MHC prediction models found in %s" % (model_directory,))
        original_allele_name = allele
        allele = self.allele = normalize_allele_name(allele)
        if self.allele not in _allele_model_cache:
            json_filename = self.allele + ".json"
            json_path = join(model_directory, json_filename)
            if not exists(json_path):
                raise ValueError("Unsupported allele: %s" % (
                    original_allele_name,))

            hdf_filename = self.allele + ".hdf"
            hdf_path = join(model_directory, hdf_filename)

            if not exists(hdf_path):
                raise ValueError("Missing model weights for allele %s" % (
                    original_allele_name,))

            with open(json_path, "r") as f:
                config_dict = json.load(f)
            self.model = model_from_config(config_dict)
            self.model.load_weights(hdf_path)
            _allele_model_cache[self.allele] = self.model
            self.model.compile(loss="mse", optimizer="rmsprop")
        else:
            self.model = _allele_model_cache[self.allele]
Exemplo n.º 5
0
def clone_model(model, custom_objects={}):
    config = {
        'class_name': model.__class__.__name__,
        'config': model.get_config(),
    }
    clone = model_from_config(config, custom_objects=custom_objects)
    clone.set_weights(model.get_weights())
    return clone
Exemplo n.º 6
0
def clone_model(model, custom_objects={}):
    # Requires Keras 1.0.7 since get_config has breaking changes.
    config = {
        'class_name': model.__class__.__name__,
        'config': model.get_config(),
    }
    clone = model_from_config(config, custom_objects=custom_objects)
    clone.set_weights(model.get_weights())
    return clone
Exemplo n.º 7
0
def clone_model(model, custom_objects={}):
    # Requires Keras 1.0.7 since get_config has breaking changes.
    config = {
        'class_name': model.__class__.__name__,
        'config': model.get_config(),
    }
    clone = model_from_config(config, custom_objects=custom_objects)
    clone.set_weights(model.get_weights())
    return clone
Exemplo n.º 8
0
    def build_model(self):
        # We're using the DDQN network architecture from Mnih at al. (Nature, 2015).
        # We'll create three models:
        #
        #   A) "base model" which describes the fundamental network architecture.
        #
        #   B) "model", which has the same architecture as "base model". We'll be training
        #      this model with Backprop.
        #
        #   C) "target model", which also has the same architecture as "base model", and
        #      which is used to predict the expected discount future reward for a given
        #      action. We'll periodically copy the weights from "model" to "target model"
        #
        # To reduce the number of forward passes we'll have to perform per iteration, we'll
        # use the Keras functional API to define a secondary input layer, which is used to mask
        # the nodes in the output layer which are not relevant to the action tht the agent has
        # taken.

        self.base_model = Sequential()
        self.base_model.add(
            Lambda(lambda x: x / 255.0, input_shape=self.atari_shape))
        self.base_model.add(Conv2D(32, (8, 8), strides=(4, 4)))
        self.base_model.add(Activation('relu'))
        self.base_model.add(Conv2D(64, (4, 4), strides=(2, 2)))
        self.base_model.add(Activation('relu'))
        self.base_model.add(Conv2D(64, (3, 3), strides=(1, 1)))
        self.base_model.add(Activation('relu'))
        self.base_model.add(Flatten())
        self.base_model.add(Dense(512))
        self.base_model.add(Activation('relu'))
        self.base_model.add(Dense(self.num_actions))
        self.base_model.add(Activation('linear'))

        mask = Input(shape=(self.num_actions, ))
        y_pred = self.base_model.output
        masked = Multiply()([y_pred, mask])
        self.model = Model(inputs=[self.base_model.input, mask],
                           outputs=[masked])

        # target_model is never trained - it's always merely a clone of model, used for predictions only
        config = {
            'class_name': self.model.__class__.__name__,
            'config': self.model.get_config(),
        }
        self.target_model = model_from_config(config)
        self.target_model.compile(
            optimizer='sgd', loss=huber_loss
        )  # optimizer and loss are never used, so are set arbitrarily

        # Mnih et al. uses RMSprop... some other implementations use Adam
        # model.compile(optimizer=Adam(lr=1e-4), loss='mse')
        self.model.compile(optimizer=Adam(lr=0.000025),
                           loss=huber_loss)  # from keras-rl


#         self.model.compile(optimizer=RMSprop(lr=0.00025, rho=0.95, epsilon=0.01), loss=huber_loss)
Exemplo n.º 9
0
    def compile(self, optimizer, loss_func, output='.'):
        """Setup all of the TF graph variables/ops.

        This is inspired by the compile method on the
        keras.models.Model class.

        This is a good place to create the target network, setup your
        loss function and any placeholders you might need.
        
        You should use the mean_huber_loss function as your
        loss_function. You can also experiment with MSE and other
        losses.

        The optimizer can be whatever class you want. We used the
        keras.optimizers.Optimizer class. Specifically the Adam
        optimizer.
        """

        # Create folders for the log, models, and videos
        if self.phase == 'train':
            global model_path, model_file, log_path, video_capture_path
            model_path = os.path.join(output, model_path)
            model_file = os.path.join(model_path, model_file)
            log_path = os.path.join(output, log_path)
            create_directory(model_path)
            create_directory(log_path)
        elif self.phase == 'video':
            video_capture_path = os.path.join(output, video_capture_path)
            create_directory(video_capture_path)
        
        # Initialize target network
        with tf.name_scope('Target'):
            if self.target_fixing:
                config = {
                    'class_name': self.q_network.__class__.__name__,
                    'config': self.q_network.get_config(),
                }
                self.target_network = model_from_config(config)
                self.target_network.set_weights(self.q_network.get_weights())
            else:
                self.target_network = self.q_network

        # Calculate individual Huber loss (Keras calculates the mean)
        with tf.name_scope('Lambda'):
            target = Input(shape=(self.num_actions,))
            action_mask = Input(shape=(self.num_actions,))
            error = lambda x: K.sum(loss_func(x[0] * x[1], x[2], self.max_grad), axis=-1)
            output = Lambda(error, output_shape=(self.num_actions,))([self.q_network.output, action_mask, target])

        self.extended_q_network = Model(input=[self.q_network.input, target, action_mask], output=output)

        # Compile all networks
        with tf.name_scope('Loss'):
            self.q_network.compile(optimizer=optimizer, loss=loss_func)
            self.target_network.compile(optimizer=optimizer, loss=loss_func)
            self.extended_q_network.compile(optimizer=optimizer, loss=lambda y_true, y_pred: y_pred)
Exemplo n.º 10
0
def clone_model(model, custom_objects=None):
    from keras.models import model_from_config
    custom_objects = custom_objects or {}
    config = {
        'class_name': model.__class__.__name__,
        'config': model.get_config(),
    }
    clone = model_from_config(config, custom_objects=custom_objects)
    clone.set_weights(model.get_weights())
    return clone
Exemplo n.º 11
0
def deserialize_keras_model(header, frames):
    from keras.models import model_from_config
    n = 0
    weights = []
    for head, length in zip(header['headers'], header['nframes']):
        x = deserialize(head, frames[n:n + length])
        weights.append(x)
        n += length
    model = model_from_config(header)
    model.set_weights(weights)
    return model
Exemplo n.º 12
0
def deserialize_keras_model(header, frames):
    from keras.models import model_from_config
    n = 0
    weights = []
    for head, length in zip(header['headers'], header['nframes']):
        x = deserialize(head, frames[n: n + length])
        weights.append(x)
        n += length
    model = model_from_config(header)
    model.set_weights(weights)
    return model
Exemplo n.º 13
0
def keras_model_deep_copy(keras_model):
    config = keras_model.get_config()
    if isinstance(keras_model, Sequential):
        new_model = Sequential.from_config(config)
    else:
        new_model = model_from_config(config)
    shuffle_weights(new_model)
    loss = keras_model.loss
    metrics = keras_model.metrics
    optimizer = keras_model.optimizer
    new_model.compile(optimizer, loss, metrics)
    return new_model
Exemplo n.º 14
0
    def get_local_network(self):
        config = {
            'class_name': self.model.__class__.__name__,
            'config': self.model.get_config()
        }

        clone = model_from_config(
            config, custom_objects={'HiddenStateLSTM': HiddenStateLSTM})
        clone.set_weights(self.model.get_weights())

        local_network = TetrisNet(self.configs)
        local_network.model = clone

        return local_network
Exemplo n.º 15
0
def clone_model(model, custom_objects={}):
    # todo : change simpler logic
    """
    model_copy = keras.models.clone_model(model)
    model_copy.set_weights(model.get_weights())
    """
    # Requires Keras 1.0.7 since get_config has breaking changes.
    config = {
        'class_name': model.__class__.__name__,
        'config': model.get_config(),
    }
    clone = model_from_config(config, custom_objects=custom_objects)
    clone.set_weights(model.get_weights())
    return clone
Exemplo n.º 16
0
def load_model(path, nlp):
    with (path / 'model_config.json').open() as file_:
        _json = file_.read()
        config_dict = json.loads(_json)
        image_embedding_function = config_dict.get(
            IMAGE_EMBEDDING_FUNCTION_KEY, None)
        if IMAGE_EMBEDDING_FUNCTION_KEY in config_dict:
            del config_dict[IMAGE_EMBEDDING_FUNCTION_KEY]
        model = model_from_config(config_dict)
    with (path / 'model_weights').open('rb') as file_:
        lstm_weights = pickle.load(file_)
    embeddings = get_embeddings(nlp.vocab)
    model.set_weights([embeddings] + lstm_weights)
    return model, config_dict, image_embedding_function
Exemplo n.º 17
0
def clone_model(model):
    """Clone model
    Returns
    -------
    keras.models.Model
      The cloned model.
    """

    config = {
        'class_name': model.__class__.__name__,
        'config': model.get_config(),
    }
    clone = model_from_config(config)
    clone.set_weights(model.get_weights())
    return clone
Exemplo n.º 18
0
def adapt_graph_to_new_input(model,
                             input_shape,
                             old_input_shape,
                             verbose=False):
    old_config = model.get_config()
    config = old_config.copy()
    # Adapt input shape
    config['input_config'][0]['input_shape'] = input_shape
    # Get names of each layer
    keys = model.nodes.keys()
    # Get the flatten layer
    flatten_keys = [k for k in keys if type(model.nodes[k]) is Flatten]
    # Get the name of the layer just before Flatten
    feature_key = keys[keys.index(flatten_keys[-1]) - 1]
    # Adapt pool size
    if config['nodes'][feature_key]["name"] == "Activation":
        # Merge case : multiple pooling size to adapt
        parents = get_parents(feature_key, config)
        for par in parents:
            # Store old pool size
            old_pool_size = config['nodes'][par]["pool_size"]
            # Compute Global stride
            global_stride = 1
            iteration = par
            count = 0
            while iteration != 'input' and count < 100:
                count += 1
                iteration = get_parents(iteration, config)
                if iteration != 'input' and config['nodes'][iteration][
                        "name"] == "MaxPooling2D":
                    global_stride *= config['nodes'][iteration]["pool_size"][0]
            # Compute new pool size
            new_pool_size = int(old_pool_size[0] +
                                (input_shape[1] - old_input_shape[1]) /
                                global_stride)
            if verbose:
                print "Input shape :", input_shape
                print "Poolsize :", old_pool_size, "-> (%d,%d)" % (
                    new_pool_size, new_pool_size)
            config['nodes'][par]["pool_size"] = (new_pool_size, new_pool_size)
            config['nodes'][par]["strides"] = (new_pool_size, new_pool_size)
    else:
        raise Exception("Not implemented")
    new_model = model_from_config(config)
    # Set weights
    for i, name in enumerate(new_model.nodes.keys()):
        new_model.nodes[name].set_weights(model.nodes[name].get_weights())
    return new_model
Exemplo n.º 19
0
def get_deconvnet_vgg16(filename):
    import h5py
    import json
    from keras.models import model_from_config
    with h5py.File(filename, 'r') as f:
        model_config = f.attrs.get('model_config')
        if model_config is None:
            raise ValueError('No model found in config file.')
        model_config = json.loads(model_config.decode('utf-8'))
        for layer in model_config['config']['layers']:
            if layer['class_name'] == u'MaxPooling2D':
                layer['class_name'] = u'MaxPooling2DWithArgMax'
        model = model_from_config(model_config, custom_objects=custom_objects)
    f.close()
    model.load_weights(filename)
    model.get_layer('block1_pool1')
Exemplo n.º 20
0
def get_arch(info, doCompile=True):

    if isinstance(info, str):
        info = get_info(info)

    modelDef = os.path.join(NET_PATH, info['modelDef'][0])
    modelType = info['modelType'][0]
    winLen = int(info['winLen'][0])
    offset = info['offset'][0]
    scale = info['scale'][0]

    model = json.load(open(modelDef, 'r'))
    if doCompile == True:
        model = model_from_config(model)

    return model, modelType, winLen, offset, scale
Exemplo n.º 21
0
def predict(model_db, data, custom_objects=None):
    """A function to predict given a model, datasets, custom objects and
    batch size"""
    from keras.models import model_from_config
    from utils import sliced
    from databasesetup import get_models
    import numpy as np

    if custom_objects == None:
        custom_objects = []
    # get the models collection
    models = get_models()

    # check if the predict function is already compiled
    if model_db['hashed_mod'] in COMPILED_MODELS:
        pred_function = COMPILED_MODELS[model_db['hashed_mod']]
        model_json = model_db['keras_model']
        model_name = model_json.get('class_name')
    else:
        # get the model in the DB
        # model_db = models.find_one({'hashed_mod': hashed_mod})
        model_json = model_db['keras_model']

        model_json.pop('optimizer')
        # load model
        model = model_from_config(model_json, custom_objects=custom_objects)
        model_name = model_json.get('name')

        # load the weights
        model.load_weights(model_db['params_dump'])

        # build the prediction function
        pred_function = build_predict_func(model)
        COMPILED_MODELS[model_db['hashed_mod']] = pred_function

    # predict according to the input/output type
    if model_name == 'Graph':
        input_order = model_json.get('input_order')
        pred = pred_function([np.array(data[n]) for n in input_order])
    elif model_name == 'Sequential':
        # unpack data
        X = data['X']
        pred = pred_function([X])
    else:
        raise NotImplementedError('This type of model is not supported')

    return pred
Exemplo n.º 22
0
def export(sess,previos_model,export_path,export_version):
    K.set_learning_phase(0)  # all new operations will be in test mode from now on

    # serialize the model and get its weights, for quick re-building
    config = previous_model.get_config()
    weights = previous_model.get_weights()

    # re-build a model where the learning phase is now hard-coded to 0
    new_model = model_from_config(config)
    new_model.set_weights(weights)

    saver = tf.train.Saver(sharded=True)
    model_exporter = exporter.Exporter(saver)
    signature = exporter.classification_signature(input_tensor=model.input,
                                                          scores_tensor=model.output)
    model_exporter.init(sess.graph.as_graph_def(),
                                    default_graph_signature=signature)
    model_exporter.export(export_path, tf.constant(export_version), sess)
Exemplo n.º 23
0
def fix_model_file(fp, od=Path('.')):
    assert fp.exists()
    if not od.is_dir():
        od.mkdir(parents=True, exist_ok=True)
    op = str(od / fp.name)
    fp = str(fp)

    with h5py.File(fp) as h5:
        config = json.loads(
            h5.attrs.get("model_config").decode('utf-8').replace(
                'input_dtype', 'dtype'))
    with tf.Session('') as sess:
        model = model_from_config(config)
        model.load_weights(fp)
        model.save(op)
        del model
    del sess
    print(op)
Exemplo n.º 24
0
def get_model_and_input(modelTag, inFile, inType, epoch=None):

    info = get_info(modelTag)
    modelWeights = get_weights(info, epoch)
    model, modelType, winLen, offset, scale = get_arch(info, doCompile=False)

    feaStream = get_input(inFile,
                          inType,
                          modelType,
                          offset=offset,
                          scale=scale,
                          winLen=winLen)

    model['layers'][0]['batch_input_shape'] = feaStream.shape
    model = model_from_config(model)
    model.load_weights(modelWeights)

    return model, feaStream
Exemplo n.º 25
0
def adapt_model_to_new_input(model, input_shape, old_input_shape, verbose):
    old_config = model.get_config()
    config = old_config.copy()
    # Adapt input shape
    config['layers'][0]['input_shape']=input_shape
    # Adapt maxpool layer
    pool_size = [l.pool_size[0] for l in model.layers if l.name=="maxpooling2d"]
    global_stride = np.prod(pool_size[0:-1])
    new_pool_size = int(pool_size[-1]+(input_shape[1]-old_input_shape[1])/global_stride)
    if verbose:
        print "Input shape :", input_shape
        print "Poolsize :", pool_size[-1], "-> (%d,%d)"%(new_pool_size,new_pool_size)
    maxpool_pos = [i for i,l in enumerate(model.layers) if l.name=="maxpooling2d"]
    config['layers'][maxpool_pos[-1]]['pool_size']=(new_pool_size,new_pool_size)
    # Compile model
    new_model = model_from_config(config)
    # Set weights
    for i,l in enumerate(new_model.layers):
        l.set_weights(model.layers[i].get_weights())
    return new_model
Exemplo n.º 26
0
def adapt_graph_to_new_input(model, input_shape, old_input_shape, verbose=False):
    old_config = model.get_config()
    config = old_config.copy()
    # Adapt input shape
    config['input_config'][0]['input_shape'] = input_shape
    # Get names of each layer
    keys = model.nodes.keys()
    # Get the flatten layer
    flatten_keys =[k for k in keys if  type(model.nodes[k]) is Flatten]
    # Get the name of the layer just before Flatten
    feature_key = keys[keys.index(flatten_keys[-1])-1]
    # Adapt pool size
    if config['nodes'][feature_key]["name"] == "Activation":
        # Merge case : multiple pooling size to adapt
        parents = get_parents(feature_key, config)
        for par in parents:
            # Store old pool size
            old_pool_size = config['nodes'][par]["pool_size"]
            # Compute Global stride
            global_stride = 1
            iteration = par
            count = 0
            while iteration!='input' and count < 100:
                count += 1
                iteration = get_parents(iteration, config)
                if iteration != 'input' and config['nodes'][iteration]["name"] == "MaxPooling2D":
                    global_stride *= config['nodes'][iteration]["pool_size"][0]
            # Compute new pool size
            new_pool_size = int(old_pool_size[0]+(input_shape[1]-old_input_shape[1])/global_stride)
            if verbose:
                print "Input shape :", input_shape
                print "Poolsize :", old_pool_size, "-> (%d,%d)"%(new_pool_size,new_pool_size)
            config['nodes'][par]["pool_size"] = (new_pool_size,new_pool_size)
            config['nodes'][par]["strides"] = (new_pool_size,new_pool_size)
    else:
        raise Exception("Not implemented")
    new_model = model_from_config(config)
    # Set weights
    for i,name in enumerate(new_model.nodes.keys()):
        new_model.nodes[name].set_weights(model.nodes[name].get_weights())
    return new_model
Exemplo n.º 27
0
def load_keras_model_from_disk(
        model_json_path,
        weights_hdf_path,
        name=None):
    """
    Loads a model from two files on disk: a JSON configuration and HDF5 weights.

    Parameters
    ----------
    model_json_path : str

    weights_hdf_path : str

    name : str, optional

    Returns a Keras model.
    """

    if not exists(model_json_path):
        raise ValueError("Model file %s (name = %s) not found" % (
            model_json_path, name,))

    with open(model_json_path, "r") as f:
        config_dict = json.load(f)

    if isinstance(config_dict, list):
        # not sure if this is a Keras bug but depending on the model I get back
        # either a list or a dict, the list is only usable with a Sequential
        # model
        model = Sequential.from_config(config_dict)
    else:
        model = model_from_config(config_dict)

    if weights_hdf_path is not None:
        if not exists(weights_hdf_path):
            raise ValueError(
                "Missing model weights file %s (name = %s)" % (weights_hdf_path, name))
        model.load_weights(weights_hdf_path)
    return model
Exemplo n.º 28
0
def model_from_config(config):
    assert 'class_name' in config, 'Missing model class!'

    # fetch all members of module 'pydl.models'
    classes = dict(inspect.getmembers(sys.modules['pydl.models'], inspect.isclass))
    return k_models.model_from_config(config, classes)
Exemplo n.º 29
0
    if dataset == "mnist":
        num_classes = 10
        (x_train, y_train), (x_test, y_test) = mnist.load_data()
        x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
        x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
    elif dataset == "imagenet_inceptionv3":
        model = keras.applications.inception_v3.InceptionV3(include_top=True, weights='imagenet', input_shape=(299, 299, 3), input_tensor=None, pooling=None)
        model_name_v1 = 'imagenet_inceptionV3_v1.hdf5'
        model_name_v2 = 'imagenet_inceptionV3_v2.hdf5'
        model_path = os.path.join(save_dir, model_name_v1)
        model.save(model_path)
        with h5py.File(model_path) as h5:
            config = json.loads(h5.attrs.get("model_config").decode('utf-8').replace('input_dtype', 'dtype'))
        with tf.Session('') as sess:
            model = model_from_config(config)
            model.load_weights(model_path)
            model.compile(loss='categorical_crossentropy', optimizer=keras.optimizers.Adam(lr=1e-4), metrics=['accuracy'])
            model_path = os.path.join(save_dir, model_name_v2)
            model.save(model_path)
            del model
        del sess
        print("InceptionV3 Model has been successfully downloaded and saved.")
        exit()
    elif dataset == "imagenet_xception":
        model = keras.applications.xception.Xception(include_top=True, weights='imagenet', input_shape=(299, 299, 3), input_tensor=None, pooling=None)
        model_name_v1 = 'imagenet_xception_v1.hdf5'
        model_name_v2 = 'imagenet_xception_v2.hdf5'
        model_path = os.path.join(save_dir, model_name_v1)
        model.save(model_path)
        with h5py.File(model_path) as h5:
Exemplo n.º 30
0
# IPython log file
import sys

sys.path.append('/home/mccolgan/PyCharm Projects/keras')
sys.path.insert(0, '/home/mccolgan/local/lib/python2.7/site-packages/')
from scipy.io import wavfile
import numpy as np

from keras.models import model_from_config
from keras.layers.convolutional import Convolution1D, UpSample1D

import json

generator_params = json.load(open('generator.json'))
generator_params['layers'][0]['input_length'] = 4096
generator_params['layers'][0]['input_shape'][0] = 4096
gen = model_from_config(generator_params,
                        custom_objects={'UpSample1D': UpSample1D})
gen.load_weights('generator.h5')
zmb = np.random.normal(0., 1, size=(32, 4096, 16)).astype('float32')
fakes = gen.predict(zmb).squeeze()
for n in range(16):
    wavfile.write('fake_big' + str(n + 1) + '.wav', 44100, fakes[n, :])
def my_load_model(filepath, custom_objects=None, compile=True):
    """Loads a model saved via `save_model`.

    # Arguments
        filepath: String, path to the saved model.
        custom_objects: Optional dictionary mapping names
            (strings) to custom classes or functions to be
            considered during deserialization.
        compile: Boolean, whether to compile the model
            after loading.

    # Returns
        A Keras model instance. If an optimizer was found
        as part of the saved model, the model is already
        compiled. Otherwise, the model is uncompiled and
        a warning will be displayed. When `compile` is set
        to False, the compilation is omitted without any
        warning.

    # Raises
        ImportError: if h5py is not available.
        ValueError: In case of an invalid savefile.
    """
    if h5py is None:
        raise ImportError('`load_model` requires h5py.')

    if not custom_objects:
        custom_objects = {}

    def convert_custom_objects(obj):
        """Handles custom object lookup.

        # Arguments
            obj: object, dict, or list.

        # Returns
            The same structure, where occurrences
                of a custom object name have been replaced
                with the custom object.
        """
        if isinstance(obj, list):
            deserialized = []
            for value in obj:
                deserialized.append(convert_custom_objects(value))
            return deserialized
        if isinstance(obj, dict):
            deserialized = {}
            for key, value in obj.items():
                deserialized[key] = convert_custom_objects(value)
            return deserialized
        if obj in custom_objects:
            return custom_objects[obj]
        return obj

    with h5py.File(filepath, mode='r') as f:
        # instantiate model
        model_config = f.attrs.get('model_config')
        if model_config is None:
            raise ValueError('No model found in config file.')
        model_config = json.loads(model_config.decode('utf-8'))
        model = model_from_config(model_config, custom_objects=custom_objects)

        # set weights
        topology.load_weights_from_hdf5_group(f['model_weights'], model.layers)

        # Early return if compilation is not required.
        if not compile:
            return model

        # instantiate optimizer
        training_config = f.attrs.get('training_config')
        if training_config is None:
            warnings.warn('No training configuration found in save file: '
                          'the model was *not* compiled. Compile it manually.')
            return model
        training_config = json.loads(training_config.decode('utf-8'))
        optimizer_config = training_config['optimizer_config']
        optimizer = optimizers.deserialize(optimizer_config,
                                           custom_objects=custom_objects)

        # Recover loss functions and metrics.
        loss = convert_custom_objects(training_config['loss'])
        metrics = convert_custom_objects(training_config['metrics'])
        sample_weight_mode = training_config['sample_weight_mode']
        loss_weights = training_config['loss_weights']
        model = multi_gpu_model(model, gpus=3, cpu_relocation=True)

        # Compile model.
        model.compile(optimizer=optimizer,
                      loss=loss,
                      metrics=metrics,
                      loss_weights=loss_weights,
                      sample_weight_mode=sample_weight_mode)

        # Set optimizer weights.
        if 'optimizer_weights' in f:
            # Build train function (to get weight updates).
            if isinstance(model, Sequential):
                model.model._make_train_function()
            else:
                model._make_train_function()
            optimizer_weights_group = f['optimizer_weights']
            optimizer_weight_names = [
                n.decode('utf8')
                for n in optimizer_weights_group.attrs['weight_names']
            ]
            optimizer_weight_values = [
                optimizer_weights_group[n] for n in optimizer_weight_names
            ]
            try:
                model.optimizer.set_weights(optimizer_weight_values)
            except ValueError:
                warnings.warn('Error in loading the saved optimizer '
                              'state. As a result, your model is '
                              'starting with a freshly initialized '
                              'optimizer.')
    return model
Exemplo n.º 32
0
def load_model(json_path):
    model = model_from_config(open(json_path).read())
    print(model)
    return model
Exemplo n.º 33
0
    else:
        model.add(
            Dense(prefilter_train.shape[1], nb_classes, activation=activation))

    model.add(Activation('softmax'))

    model.get_config(verbose=1)
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    model.fit(prefilter_train,
              Y_train,
              batch_size=batch_size,
              nb_epoch=nb_epoch,
              show_accuracy=False,
              verbose=0,
              validation_data=(prefilter_test, Y_test))

    score = model.evaluate(prefilter_test,
                           Y_test,
                           verbose=0,
                           show_accuracy=True)
    print('\nscore:', score)

    print('Loss change:', (score[0] - classical_score[0]) / classical_score[0],
          '%')
    print('Accuracy change:',
          (score[1] - classical_score[1]) / classical_score[1], '%')

    # check serialization
    config = autoencoder.get_config(verbose=1)
    autoencoder = model_from_config(config)
Exemplo n.º 34
0
import skil
from keras.models import model_from_config
import json

# Load Keras model you want to train
with open('keras_config.json', 'r') as f:
    model = model_from_config(json.load(f))
    model.compile(loss='categorical_crossentropy', optimizer='sgd')

# Create a SKIL model from it
skil_server = skil.Skil()
ws = skil.WorkSpace(skil_server)
experiment = skil.Experiment(ws)
model = skil.Model(model, model_id='keras_mnist_mlp_42',
                   name='keras', experiment=experiment)

# Register compute and storage resources.
s3 = skil.resources.storage.S3(
    skil_server, 's3_resource', 'bucket_name', 'region')
emr = skil.resources.compute.EMR(
    skil_server, 'emr_cluster', 'region', 'credential_uri', 'cluster_id')

# Define your general training setup
training_config = skil.jobs.TrainingJobConfiguration(
    skil_model=model, num_epochs=10, eval_type='ROC_MULTI_CLASS',
    storage_resource=s3, compute_resource=emr,
    data_set_provider_class='MnistProvider',
    eval_data_set_provider_class='MnistProvider',
    output_path='.')

# Optionally specify a distributed training config.
Exemplo n.º 35
0
    prefilter_test = autoencoder.predict(X_test, verbose=0)
    print("prefilter_train: ", prefilter_train.shape)
    print("prefilter_test: ", prefilter_test.shape)

    # Classify results from Autoencoder
    print("Building classical fully connected layer for classification")
    model = Sequential()
    if autoencoder_type == 'lstm':
        model.add(TimeDistributedDense(8, nb_classes, activation=activation))
        model.add(Flatten())
    elif autoencoder_type == 'classical':
        model.add(Dense(prefilter_train.shape[1], nb_classes, activation=activation))
    else:
        model.add(Dense(prefilter_train.shape[1], nb_classes, activation=activation))

    model.add(Activation('softmax'))

    model.get_config(verbose=1)
    model.compile(loss='categorical_crossentropy', optimizer='adam')
    model.fit(prefilter_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=(prefilter_test, Y_test))

    score = model.evaluate(prefilter_test, Y_test, verbose=0, show_accuracy=True)
    print('\nscore:', score)

    print('Loss change:', (score[0] - classical_score[0])/classical_score[0], '%')
    print('Accuracy change:', (score[1] - classical_score[1])/classical_score[1], '%')

    # check serialization
    config = autoencoder.get_config(verbose=1)
    autoencoder = model_from_config(config)
Exemplo n.º 36
0
def load_model(data):
    # instantiate model
    model_config = data['model_config']
    if model_config is None:
        raise ValueError('No model found in config file.')

    model = model_from_config(model_config)
    if hasattr(model, 'flattened_layers'):
        # Support for legacy Sequential/Merge behavior.
        flattened_layers = model.flattened_layers
    else:
        flattened_layers = model.layers

    filtered_layers = []
    for layer in flattened_layers:
        weights = layer.weights
        if weights:
            filtered_layers.append(layer)

    flattened_layers = filtered_layers

    layer_names = data['layer_names']
    filtered_layer_names = []
    for name in layer_names:
        weight_dict = data['model_weights'][name]
        weight_names = weight_dict['weight_names']
        if len(weight_names):
            filtered_layer_names.append(name)
    layer_names = filtered_layer_names
    if len(layer_names) != len(flattened_layers):
        raise ValueError('You are trying to load a weight file '
                         'containing ' + str(len(layer_names)) +
                         ' layers into a model with ' +
                         str(len(flattened_layers)) + ' layers.')

    # We batch weight value assignments in a single backend call
    # which provides a speedup in TensorFlow.
    weight_value_tuples = []
    for k, name in enumerate(layer_names):
        weight_dict = data['model_weights'][name]
        weight_names = weight_dict['weight_names']
        weight_values = weight_dict['weight_values']
        layer = flattened_layers[k]
        symbolic_weights = layer.weights
        if len(weight_values) != len(symbolic_weights):
            raise ValueError('Layer #' + str(k) + ' (named "' + layer.name +
                             '" in the current model) was found to '
                             'correspond to layer ' + name +
                             ' in the save file. '
                             'However the new layer ' + layer.name +
                             ' expects ' + str(len(symbolic_weights)) +
                             ' weights, but the saved weights have ' +
                             str(len(weight_values)) + ' elements.')
        if layer.__class__.__name__ == 'Convolution1D':
            # This is for backwards compatibility with
            # the old Conv1D weights format.
            w = weight_values[0]
            shape = w.shape
            if shape[:2] != (layer.filter_length,
                             1) or shape[3] != layer.nb_filter:
                # Legacy shape:
                # (self.nb_filter, input_dim, self.filter_length, 1)
                assert shape[0] == layer.nb_filter and shape[2:] == (
                    layer.filter_length, 1)
                w = np.transpose(w, (2, 3, 1, 0))
                weight_values[0] = w
        weight_value_tuples += zip(symbolic_weights, weight_values)
    K.batch_set_value(weight_value_tuples)

    # instantiate optimizer
    training_config = data.get('training_config')
    if training_config is None:
        warnings.warn('No training configuration found in save file: '
                      'the model was *not* compiled. Compile it manually.')
        return model
    optimizer_config = training_config['optimizer_config']
    optimizer = optimizer_from_config(optimizer_config)

    # recover loss functions and metrics
    loss = training_config['loss']
    metrics = training_config['metrics']
    sample_weight_mode = training_config['sample_weight_mode']
    loss_weights = training_config['loss_weights']

    # compile model
    model.compile(optimizer=optimizer,
                  loss=loss,
                  metrics=metrics,
                  loss_weights=loss_weights,
                  sample_weight_mode=sample_weight_mode)

    # set optimizer weights
    if 'optimizer_weights' in data:
        # build train function (to get weight updates)
        if isinstance(model, Sequential):
            model.model._make_train_function()
        else:
            model._make_train_function()
        optimizer_weights_dict = data['optimizer_weights']
        optimizer_weight_names = optimizer_weights_dict['weight_names']
        optimizer_weight_values = optimizer_weights_dict['weight_values']
        model.optimizer.set_weights(optimizer_weight_values)
    return model
Exemplo n.º 37
0
# IPython log file
import sys
sys.path.append('/home/mccolgan/PyCharm Projects/keras')
sys.path.insert(0,'/home/mccolgan/local/lib/python2.7/site-packages/')
from scipy.io import wavfile
import numpy as np

from keras.models import model_from_config
from keras.layers.convolutional import Convolution1D, UpSample1D

import json
generator_params = json.load(open('generator.json'))
generator_params['layers'][0]['input_length'] = 4096
generator_params['layers'][0]['input_shape'][0] = 4096
gen = model_from_config(generator_params,custom_objects={'UpSample1D':UpSample1D})
gen.load_weights('generator.h5')
zmb = np.random.normal(0., 1, size=(32, 4096, 16)).astype('float32')
fakes = gen.predict(zmb).squeeze()
for n in range(16):
    wavfile.write('fake_big'+str(n+1)+'.wav',44100,fakes[n,:])
Exemplo n.º 38
0
            output_dim=embedding_dim))
        model.add(Flatten())
        model.add(Dropout(p=0.25))
        model.add(Dense(
            input_dim=embedding_dim * input_length,
            output_dim=1, activation="linear"))
        model.compile(loss="mse", optimizer="sgd")
        model.fit(X_train_index, Y_train, verbose=0)
        print("model weights before", model.get_weights())
        pred_before = model.predict(X_test_index)
        print("pred_before", pred_before)

        with open(json_path, "w") as f:
            f.write(model.to_json())

        model.save_weights(hdf_path, overwrite=True)

    with open(json_path, "r") as f:
        json_dict = json.load(f)

    model2 = model_from_config(json_dict)
    print(
        "weights before load",
        model2.get_weights())
    model2.load_weights(hdf_path)
    print(
        "weights after load",
        model2.get_weights())

    print("pred after load", model2.predict(X_test_index))
Exemplo n.º 39
0
    """

    from keras import backend as K

    # all new operations will be in test mode from now on
    K.set_learning_phase(0)

    # serialize the model and get its weights, for quick re-building
    config = model.get_config()
    weights = model.get_weights()

    # re-build a model where the learning phase is now hard-coded to 0
    from keras.models import model_from_config

    new_model = model_from_config(config)
    new_model.set_weights(weights)

    import tensorflow as tf
    import sys
    sys.path.insert(0, '/Users/dan.dixey/Desktop/QBiz/serving')
    # Unable to Import THIS!! why?
    from tensorflow_serving.session_bundle import exporter

    sess = K.get_session()

    export_path = './Serving'  # where to save the exported graph
    export_version = 0o0000001  # version number (integer)

    saver = tf.train.Saver(sharded=True)
    model_exporter = exporter.Exporter(saver)
Exemplo n.º 40
0
    def compile(self, optimizer, loss_func, metrics=[]):
        """Setup all of the TF graph variables/ops.

        This is inspired by the compile method on the
        keras.models.Model class.

        This is a good place to create the target network, setup your
        loss function and any placeholders you might need.
        
        You should use the mean_huber_loss function as your
        loss_function. You can also experiment with MSE and other
        losses.

        The optimizer can be whatever class you want. We used the
        keras.optimizers.Optimizer class. Specifically the Adam
        optimizer.
        """
        self.optimizer = optimizer
        self.loss_func = loss_func
        metrics += [mean_max_q, mean_max_tq]
        #metrics += [q_pred_m, mean_max_q]
        #metrics += [q_pred_d_m]

        #Add the duelling network layers.
        if self.duelling_network:

            layer = self.model.layers[-2]
            duel_layer = Dense(3 + 1, activation='linear')(layer.output)
            duel_output = Lambda(lambda a: K.expand_dims(a[:, 0], dim=-1) +
                                 a[:, 1:] - K.mean(a[:, 1:], keepdims=True),
                                 output_shape=(3, ),
                                 name='Duel-layer')(duel_layer)
            model = Model(input=self.model.input, output=duel_output)
            self.model = model

            print(model.summary())

        # create target network with random optimizer
        config = {
            'class_name': self.model.__class__.__name__,
            'config': self.model.get_config(),
        }
        self.target = model_from_config(config,
                                        custom_objects={})  #custom_objects)
        self.target.set_weights(self.model.get_weights())
        self.target.compile(optimizer='adam', loss='mse')
        self.model.compile(optimizer='adam', loss='mse')

        #Update the target network using soft updates.
        if self.target_update_freq < 1.:
            updates = get_soft_target_model_updates(self.target, self.model,
                                                    self.target_update_freq)
            optimizer = UpdatesOptimizer(optimizer, updates)

        #TODO: target model weights update sperately while updating network
        self.max_grad = 1.0

        def masked_error(args):
            y_true, y_pred, mask = args
            #loss = loss_func(y_true, y_pred, self.max_grad)
            loss = loss_func(y_pred, y_true, self.max_grad)
            loss *= mask  # apply element-wise mask
            #print loss
            return K.sum(loss, axis=-1)

        y_pred = self.model.output
        y_true = Input(name='y_true', shape=(self.model.output_shape[1], ))
        mask = Input(name='mask', shape=(self.model.output_shape[1], ))
        # since we using mask we need seperate layer
        loss_out = Lambda(masked_error, output_shape=(1, ),
                          name='loss')([y_pred, y_true, mask])

        trainable_model = Model(input=[self.model.input] + [y_true, mask],
                                output=[loss_out, y_pred])
        prop_metrics = {trainable_model.output_names[1]: metrics}

        # TODO not sure why this is needed
        losses = [
            lambda y_true, y_pred: y_pred,  # loss is computed in Lambda layer
            lambda y_true, y_pred: K.zeros_like(
                y_pred),  # we only include this for the metrics
        ]
        trainable_model.compile(optimizer=optimizer,
                                loss=losses,
                                metrics=prop_metrics)
        self.trainable_model = trainable_model
        self.writer = tf.summary.FileWriter("logs/" + self.model_name)
        self.load_weights('DQN-weights-600000.h5')
Exemplo n.º 41
0
def load_model(filepath, custom_objects=None, lr=None):
    """Loads a model saved via `save_model`.

    # Arguments
        filepath: String, path to the saved model.
        custom_objects: Optional dictionary mapping names
            (strings) to custom classes or functions to be
            considered during deserialization.

    # Returns
        A Keras model instance. If an optimizer was found
        as part of the saved model, the model is already
        compiled. Otherwise, the model is uncompiled and
        a warning will be displayed.

    # Raises
        ImportError: if h5py is not available.
        ValueError: In case of an invalid savefile.
    """
    if h5py is None:
        raise ImportError('`save_model` requires h5py.')

    if not custom_objects:
        custom_objects = {}

    def convert_custom_objects(obj):
        """Handles custom object lookup.

        # Arguments
            obj: object, dict, or list.

        # Returns
            The same structure, where occurences
                of a custom object name have been replaced
                with the custom object.
        """
        if isinstance(obj, list):
            deserialized = []
            for value in obj:
                if value in custom_objects:
                    deserialized.append(custom_objects[value])
                else:
                    deserialized.append(value)
            return deserialized
        if isinstance(obj, dict):
            deserialized = {}
            for key, value in obj.items():
                if value in custom_objects:
                    deserialized[key] = custom_objects[value]
                else:
                    deserialized[key] = value
            return deserialized
        if obj in custom_objects:
            return custom_objects[obj]
        return obj

    f = h5py.File(filepath, mode='r')

    # instantiate model
    model_config = f.attrs.get('model_config')
    if model_config is None:
        raise ValueError('No model found in config file.')
    model_config = json.loads(model_config.decode('utf-8'))
    model = model_from_config(model_config, custom_objects=custom_objects)

    # set weights
    topology.load_weights_from_hdf5_group(f['model_weights'], model.layers)

    # instantiate optimizer
    training_config = f.attrs.get('training_config')
    if training_config is None:
        warnings.warn('No training configuration found in save file: '
                      'the model was *not* compiled. Compile it manually.')
        f.close()
        return model
    training_config = json.loads(training_config.decode('utf-8'))
    optimizer_config = training_config['optimizer_config']
    if lr:
        optimizer_config['config']['lr'] = lr
    optimizer = optimizers.deserialize(optimizer_config,
                                       custom_objects=custom_objects)

    # Recover loss functions and metrics.
    loss = convert_custom_objects(training_config['loss'])
    metrics = convert_custom_objects(training_config['metrics'])
    sample_weight_mode = training_config['sample_weight_mode']
    loss_weights = training_config['loss_weights']

    # Compile model.
    model.compile(optimizer=optimizer,
                  loss=loss,
                  metrics=metrics,
                  loss_weights=loss_weights,
                  sample_weight_mode=sample_weight_mode)

    # Set optimizer weights.
    if 'optimizer_weights' in f:
        # Build train function (to get weight updates).
        if isinstance(model, Sequential):
            model.model._make_train_function()
        else:
            model._make_train_function()
        optimizer_weights_group = f['optimizer_weights']
        optimizer_weight_names = [n.decode('utf8') for n in optimizer_weights_group.attrs['weight_names']]
        optimizer_weight_values = [optimizer_weights_group[n] for n in optimizer_weight_names]
        model.optimizer.set_weights(optimizer_weight_values)
    f.close()
    return model
Exemplo n.º 42
0
    """

    from keras import backend as K

    # all new operations will be in test mode from now on
    K.set_learning_phase(0)

    # serialize the model and get its weights, for quick re-building
    config = model.get_config()
    weights = model.get_weights()

    # re-build a model where the learning phase is now hard-coded to 0
    from keras.models import model_from_config

    new_model = model_from_config(config)
    new_model.set_weights(weights)

    import tensorflow as tf
    import sys
    sys.path.insert(0, '/Users/dan.dixey/Desktop/QBiz/serving')
    # Unable to Import THIS!! why?
    from tensorflow_serving.session_bundle import exporter

    sess = K.get_session()

    export_path = './Serving'  # where to save the exported graph
    export_version = 0o0000001  # version number (integer)

    saver = tf.train.Saver(sharded=True)
    model_exporter = exporter.Exporter(saver)