コード例 #1
0
ファイル: test_core.py プロジェクト: dotzlab/keras
def test_lambda():
    from keras.utils.layer_utils import layer_from_config
    Lambda = core.Lambda

    layer_test(Lambda,
               kwargs={'function': lambda x: x + 1},
               input_shape=(3, 2))

    # test serialization with function
    def f(x):
        return x + 1

    ld = Lambda(f)
    config = ld.get_config()
    ld = layer_from_config({'class_name': 'Lambda', 'config': config})

    ld = Lambda(lambda x: K.concatenate([K.square(x), x]),
                output_shape=lambda s: tuple(list(s)[:-1] + [2 * s[-1]]))
    config = ld.get_config()
    ld = Lambda.from_config(config)

    # test serialization with output_shape function
    def f(x):
        return K.concatenate([K.square(x), x])

    def f_shape(s):
        return tuple(list(s)[:-1] + [2 * s[-1]])

    ld = Lambda(f, output_shape=f_shape)
    config = ld.get_config()
    ld = layer_from_config({'class_name': 'Lambda', 'config': config})
コード例 #2
0
ファイル: ensemble_methods.py プロジェクト: nitbix/toupee
    def _residual_block(self, injection_index, new_layers, m, member_number):
        #get output shape of last layer before injection from m
        if injection_index > 0:
            input_shape = m.layers[injection_index - 1].output_shape
        else:
            input_shape = m.input_shape
        #make input
        input_layer = Input(shape = input_shape[1:], name = "Input_BARN_{0}".format(member_number))
        #make real layers
        real_layers = input_layer
        for i,l in enumerate(new_layers):
            l['config']['name'] = "BARN-incremental-{0}-{1}".format(
                member_number, i)
            real_layers = layer_from_config(l)(real_layers)
        #make skip layer
        stride_width = input_shape[2] / real_layers._keras_shape[2]
        stride_height = input_shape[3] / real_layers._keras_shape[3]
        equal_channels = real_layers._keras_shape[1] == input_shape[1]
        shortcut = input_layer
        # 1 X 1 conv if shape is different. Else identity.
        if (stride_width > 1 or stride_height > 1 or not equal_channels) and stride_width > 0 and stride_height > 0:
            shortcut = Convolution2D(nb_filter=real_layers._keras_shape[1], nb_row=1, nb_col=1,
                                     subsample=(stride_width, stride_height),
                                     init="he_normal",
                                     border_mode="same",
                                     name="shortcut_BARN_{0}".format(member_number))(input_layer)

        #make merge
        merge_layer = merge([real_layers,shortcut], mode="sum", name = "merge_BARN_{0}".format(member_number))
        #make model
        model = Model(input=input_layer,output=merge_layer,
                name="Model_BARN_{0}".format(member_number))
        #make config
        return {"class_name": "Model", "config": model.get_config()}
コード例 #3
0
ファイル: test_gan.py プロジェクト: nebw/beras
def test_gan_get_config(tmpdir):
    z_shape = (1, 8, 8)

    z = Input(z_shape, name='z')
    g_out = Convolution2D(10, 2, 2, activation='relu', border_mode='same')(z)
    generator = Container(z, g_out)
    f, r = Input(z_shape, name='f'), Input(z_shape, name='r')

    dis_input = merge([f, r], mode='concat', concat_axis=1)
    dis_conv = Convolution2D(5, 2, 2, activation='relu')(dis_input)
    dis_flatten = Flatten()(dis_conv)
    dis = Dense(1, activation='sigmoid')(dis_flatten)
    discriminator = Container([f, r], gan_outputs(dis))

    gan = GAN(generator, discriminator, z_shape, z_shape)
    weights_fname = str(tmpdir.mkdir("weights").join("{}.hdf5"))
    gan.save_weights(weights_fname)
    true_config = gan.get_config()

    import json
    with open(os.path.join(TEST_OUTPUT_DIR, "true_config.json"), 'w+') as f:
        json.dump(true_config, f, indent=2)

    gan_from_config = layer_from_config(true_config, custom_objects={
        'GAN': GAN,
        'Split': Split,
    })

    with open(os.path.join(TEST_OUTPUT_DIR, "loaded_config.json"), 'w+') as f:
        json.dump(gan_from_config.get_config(), f, indent=2)
    gan_from_config.load_weights(weights_fname)
コード例 #4
0
ファイル: models.py プロジェクト: bjerva/keras
def model_from_config(config, custom_objects=None):
    from keras.utils.layer_utils import layer_from_config
    if isinstance(config, list):
        raise TypeError('`model_fom_config` expects a dictionary, not a list. '
                        'Maybe you meant to use '
                        '`Sequential.from_config(config)`?')
    return layer_from_config(config, custom_objects=custom_objects)
コード例 #5
0
ファイル: models.py プロジェクト: AlexHung780312/keras
def model_from_config(config, custom_objects={}):
    from keras.utils.layer_utils import layer_from_config
    if isinstance(config, list):
        raise Exception('model_fom_config expects a dictionary.'
                        'To load an old-style config use the appropiate'
                        '`load_config` method on Sequential or Graph')
    return layer_from_config(config, custom_objects=custom_objects)
コード例 #6
0
    def from_config(cls, config):
        # Use layer build function to initialise new NeuralGraphOutput
        inner_layer_config = config.pop('inner_layer_config')
        create_inner_layer_fn = lambda: layer_from_config(
            deepcopy(inner_layer_config))

        layer = cls(create_inner_layer_fn, **config)
        return layer
コード例 #7
0
def insert_layer(model, new_layer, index):
    res = Sequential()
    for i, layer in enumerate(model.layers):
        if i == index: res.add(new_layer)
        copied = layer_from_config(wrap_config(layer))
        res.add(copied)
        copied.set_weights(layer.get_weights())
    return res
コード例 #8
0
ファイル: models.py プロジェクト: AllenTsui/keras
def model_from_json(json_string, custom_objects={}):
    '''Parses a JSON model configuration file
    and returns a model instance.
    '''
    import json
    from keras.utils.layer_utils import layer_from_config
    config = json.loads(json_string)
    return layer_from_config(config, custom_objects=custom_objects)
コード例 #9
0
def model_from_json(json_string, custom_objects={}):
    '''Parses a JSON model configuration file
    and returns a model instance.
    '''
    import json
    from keras.utils.layer_utils import layer_from_config
    config = json.loads(json_string)
    return layer_from_config(config, custom_objects=custom_objects)
コード例 #10
0
def model_from_yaml(yaml_string, custom_objects={}):
    '''Parses a yaml model configuration file
    and returns a model instance.
    '''
    import yaml
    from keras.utils.layer_utils import layer_from_config
    config = yaml.load(yaml_string)
    return layer_from_config(config, custom_objects=custom_objects)
コード例 #11
0
ファイル: utils.py プロジェクト: EliaKunz/courses
def insert_layer(model, new_layer, index):
    res = Sequential()
    for i,layer in enumerate(model.layers):
        if i==index: res.add(new_layer)
        copied = layer_from_config(wrap_config(layer))
        res.add(copied)
        copied.set_weights(layer.get_weights())
    return res
コード例 #12
0
ファイル: models.py プロジェクト: AllenTsui/keras
def model_from_yaml(yaml_string, custom_objects={}):
    '''Parses a yaml model configuration file
    and returns a model instance.
    '''
    import yaml
    from keras.utils.layer_utils import layer_from_config
    config = yaml.load(yaml_string)
    return layer_from_config(config, custom_objects=custom_objects)
コード例 #13
0
ファイル: utils.py プロジェクト: tboquet/scheduler
def model_from_dict_w_opt(model_dict, custom_objects=None):
    """Builds a model from a serialized model using `to_dict_w_opt`

    Args:
        model_dict(dict): a serialized Keras model
        custom_objects(dict, optionnal): a dictionnary mapping custom objects
            names to custom objects (Layers, functions, etc.)

    Returns:
        A Keras.Model which is compiled if the information about the optimizer
        is available.

    """
    if custom_objects is None:
        custom_objects = {}

    model = layer_from_config(model_dict['config'],
                              custom_objects=custom_objects)

    if 'optimizer' in model_dict:
        metrics = model_dict.get("metrics")
        model_name = model_dict['config'].get('class_name')
        # if it has an optimizer, the model is assumed to be compiled
        loss = model_dict.get('loss')

        # if a custom loss function is passed replace it in loss
        for l in loss:
            for c in custom_objects:
                if loss[l] == c:
                    loss[l] = custom_objects[c]

        optimizer_params = dict([(
            k, v) for k, v in model_dict.get('optimizer').items()])
        optimizer_name = optimizer_params.pop('name')
        optimizer = optimizers.get(optimizer_name, optimizer_params)

        if model_name == "Sequential":
            sample_weight_mode = model_dict.get('sample_weight_mode')
            model.compile(loss=loss,
                          optimizer=optimizer,
                          sample_weight_mode=sample_weight_mode,
                          metrics=metrics)
        elif model_name == "Graph":
            sample_weight_modes = model_dict.get('sample_weight_modes', None)
            loss_weights = model_dict.get('loss_weights', None)
            model.compile(loss=loss,
                          optimizer=optimizer,
                          sample_weight_modes=sample_weight_modes,
                          loss_weights=loss_weights)
        elif model_name == "Model":
            sample_weight_mode = model_dict.get('sample_weight_mode')
            loss_weights = model_dict.get('loss_weights', None)
            model.compile(loss=loss,
                          optimizer=optimizer,
                          sample_weight_mode=sample_weight_mode,
                          loss_weights=loss_weights,
                          metrics=metrics)
    return model
コード例 #14
0
ファイル: models.py プロジェクト: xindongzhang/keras
def model_from_config(config, custom_objects={}):
    from keras.utils.layer_utils import layer_from_config

    if isinstance(config, list):
        raise Exception(
            "`model_fom_config` expects a dictionary, not a list. "
            "Maybe you meant to use `Sequential.from_config(config)`?"
        )
    return layer_from_config(config, custom_objects=custom_objects)
コード例 #15
0
ファイル: models.py プロジェクト: xindongzhang/keras
 def get_or_create_layer(layer_data):
     if layer_data["class_name"] == "Sequential":
         return Sequential.from_config(layer_data["config"], layer_cache=layer_cache)
     name = layer_data["config"].get("name")
     if name in layer_cache:
         return layer_cache[name]
     layer = layer_from_config(layer_data)
     layer_cache[name] = layer
     return layer
コード例 #16
0
 def from_config(cls, config):
     from keras.utils.layer_utils import layer_from_config
     layers = config['layers']
     compiled_layers = []
     for layer_dict in layers:
         layer_compiled = layer_from_config(layer_dict)
         compiled_layers.append(layer_compiled)
     del config['layers']
     return cls(layers=compiled_layers, **config)
コード例 #17
0
def load_model(fname, custom_objects={}):
    """
    Loads the model and weights from ``fname``. Counterpart to :py:func:`save_model`.
    """
    json_config = get_hdf5_attr(fname, 'model').decode('utf-8')
    config = json.loads(json_config)
    model = layer_from_config(config, custom_objects)
    model.load_weights(fname)
    return model
コード例 #18
0
ファイル: models.py プロジェクト: xiaozhuge080/keras
    def from_config(cls, config, layer_cache=None):
        '''Supports legacy formats
        '''
        from keras.utils.layer_utils import layer_from_config
        from keras.layers import Merge
        assert type(config) is list

        if not layer_cache:
            layer_cache = {}

        def normalize_legacy_config(conf):
            if 'class_name' not in conf:
                class_name = conf['name']
                name = conf.get('custom_name')
                conf['name'] = name
                new_config = {
                    'class_name': class_name,
                    'config': conf,
                }
                return new_config
            return conf

        # the model we will return
        model = cls()

        def get_or_create_layer(layer_data):
            if layer_data['class_name'] == 'Sequential':
                return Sequential.from_config(layer_data['config'],
                                              layer_cache=layer_cache)
            name = layer_data['config'].get('name')
            if name in layer_cache:
                return layer_cache[name]
            layer = layer_from_config(layer_data)
            layer_cache[name] = layer
            return layer

        first_layer = config[0]
        first_layer = normalize_legacy_config(first_layer)
        if first_layer['class_name'] == 'Merge':
            merge_inputs = []
            first_layer_config = first_layer['config']
            for merge_input_config in first_layer_config.pop('layers'):
                merge_input = layer_from_config(merge_input_config)
                merge_inputs.append(merge_input)
            first_layer_config['layers'] = merge_inputs
            merge = Merge.from_config(first_layer_config)
            model.add(merge)
        else:
            layer = get_or_create_layer(first_layer)
            model.add(layer)

        for conf in config[1:]:
            conf = normalize_legacy_config(conf)
            layer = get_or_create_layer(conf)
            model.add(layer)
        return model
コード例 #19
0
ファイル: models.py プロジェクト: AnishShah/keras
    def from_config(cls, config, layer_cache=None):
        '''Supports legacy formats
        '''
        from keras.utils.layer_utils import layer_from_config
        from keras.layers import Merge
        assert type(config) is list

        if not layer_cache:
            layer_cache = {}

        def normalize_legacy_config(conf):
            if 'class_name' not in conf:
                class_name = conf['name']
                name = conf.get('custom_name')
                conf['name'] = name
                new_config = {
                    'class_name': class_name,
                    'config': conf,
                }
                return new_config
            return conf

        # the model we will return
        model = cls()

        def get_or_create_layer(layer_data):
            if layer_data['class_name'] == 'Sequential':
                return Sequential.from_config(layer_data['config'],
                                              layer_cache=layer_cache)
            name = layer_data['config'].get('name')
            if name in layer_cache:
                return layer_cache[name]
            layer = layer_from_config(layer_data)
            layer_cache[name] = layer
            return layer

        first_layer = config[0]
        first_layer = normalize_legacy_config(first_layer)
        if first_layer['class_name'] == 'Merge':
            merge_inputs = []
            first_layer_config = first_layer['config']
            for merge_input_config in first_layer_config.pop('layers'):
                merge_input = layer_from_config(merge_input_config)
                merge_inputs.append(merge_input)
            first_layer_config['layers'] = merge_inputs
            merge = Merge.from_config(first_layer_config)
            model.add(merge)
        else:
            layer = get_or_create_layer(first_layer)
            model.add(layer)

        for conf in config[1:]:
            conf = normalize_legacy_config(conf)
            layer = get_or_create_layer(conf)
            model.add(layer)
        return model
コード例 #20
0
 def from_config(cls, config):
     model_config = config['model']
     del config['model']
     rc = cls(**config)
     from . import cells
     rc.model = Sequential()
     for layer_config in model_config:
         layer = layer_from_config(layer_config, cells.__dict__)
         rc.add(layer)
     return rc
コード例 #21
0
 def get_or_create_layer(layer_data):
     if layer_data['class_name'] == 'Sequential':
         return Sequential.from_config(layer_data['config'],
                                       layer_cache=layer_cache)
     name = layer_data['config'].get('name')
     if name in layer_cache:
         return layer_cache[name]
     layer = layer_from_config(layer_data)
     layer_cache[name] = layer
     return layer
コード例 #22
0
    def from_config(cls, config):
        # TODO: test legacy support
        from keras.utils.layer_utils import layer_from_config

        def normalize_legacy_config(conf):
            if 'class_name' not in conf:
                class_name = conf['name']
                name = conf.get('custom_name')
                conf['name'] = name
                new_config = {
                    'class_name': class_name,
                    'config': conf,
                }
                return new_config
            return conf

        graph = cls()
        inputs = config.get('input_config')
        for input in inputs:
            graph.add_input(**input)

        nodes = config.get('node_config')
        for node in nodes:
            layer_config = config['nodes'][node['name']]
            layer_config = normalize_legacy_config(layer_config)
            if 'layer' in node:
                # for add_shared_node
                node['layer'] = layer_from_config(node['layer'])
            else:
                layer = layer_from_config(layer_config)
                node['layer'] = layer

            node['create_output'] = False  # outputs will be added below
            if layer_config.get('shared'):
                graph.add_shared_node(**node)
            else:
                graph.add_node(**node)

        outputs = config.get('output_config')
        for output in outputs:
            graph.add_output(**output)
        return graph
コード例 #23
0
ファイル: test_core.py プロジェクト: wufy1992/keras
def test_lambda():
    from keras.utils.layer_utils import layer_from_config
    Lambda = core.Lambda

    layer_test(Lambda,
               kwargs={'function': lambda x: x + 1},
               input_shape=(3, 2))

    layer_test(Lambda,
               kwargs={
                   'function': lambda x, a, b: x * a + b,
                   'arguments': {
                       'a': 0.6,
                       'b': 0.4
                   }
               },
               input_shape=(3, 2))

    # test serialization with function
    def f(x):
        return x + 1

    ld = Lambda(f)
    config = ld.get_config()
    ld = layer_from_config({'class_name': 'Lambda', 'config': config})

    ld = Lambda(lambda x: K.concatenate([K.square(x), x]),
                output_shape=lambda s: tuple(list(s)[:-1] + [2 * s[-1]]))
    config = ld.get_config()
    ld = Lambda.from_config(config)

    # test serialization with output_shape function
    def f(x):
        return K.concatenate([K.square(x), x])

    def f_shape(s):
        return tuple(list(s)[:-1] + [2 * s[-1]])

    ld = Lambda(f, output_shape=f_shape)
    config = ld.get_config()
    ld = layer_from_config({'class_name': 'Lambda', 'config': config})
コード例 #24
0
ファイル: models.py プロジェクト: zyzzhaoyuzhe/keras
    def from_config(cls, config):
        '''Supports legacy formats
        '''
        from keras.utils.layer_utils import layer_from_config
        from keras.layers import Merge
        assert type(config) is list

        def normalize_legacy_config(conf):
            if 'class_name' not in conf:
                class_name = conf['name']
                name = conf.get('custom_name')
                conf['name'] = name
                new_config = {
                    'class_name': class_name,
                    'config': conf,
                }
                return new_config
            return conf

        model = cls()

        first_layer = config[0]
        first_layer = normalize_legacy_config(first_layer)
        if first_layer['class_name'] == 'Merge':
            merge_inputs = []
            first_layer_config = first_layer['config']
            for merge_input_config in first_layer_config.pop('layers'):
                merge_input = layer_from_config(merge_input_config)
                merge_inputs.append(merge_input)
            first_layer_config['layers'] = merge_inputs
            merge = Merge.from_config(first_layer_config)
            model.add(merge)
        else:
            layer = layer_from_config(first_layer)
            model.add(layer)

        for conf in config[1:]:
            conf = normalize_legacy_config(conf)
            layer = layer_from_config(conf)
            model.add(layer)
        return model
コード例 #25
0
ファイル: models.py プロジェクト: BarnetteME1/keras
    def from_config(cls, config):
        '''Supports legacy formats
        '''
        from keras.utils.layer_utils import layer_from_config
        from keras.layers import Merge
        assert type(config) is list

        def normalize_legacy_config(conf):
            if 'class_name' not in conf:
                class_name = conf['name']
                name = conf.get('custom_name')
                conf['name'] = name
                new_config = {
                    'class_name': class_name,
                    'config': conf,
                }
                return new_config
            return conf

        model = cls()

        first_layer = config[0]
        first_layer = normalize_legacy_config(first_layer)
        if first_layer['class_name'] == 'Merge':
            merge_inputs = []
            first_layer_config = first_layer['config']
            for merge_input_config in first_layer_config.pop('layers'):
                merge_input = layer_from_config(merge_input_config)
                merge_inputs.append(merge_input)
            first_layer_config['layers'] = merge_inputs
            merge = Merge.from_config(first_layer_config)
            model.add(merge)
        else:
            layer = layer_from_config(first_layer)
            model.add(layer)

        for conf in config[1:]:
            conf = normalize_legacy_config(conf)
            layer = layer_from_config(conf)
            model.add(layer)
        return model
コード例 #26
0
ファイル: engine.py プロジェクト: LiuFang816/SALSTM_py_data
	def from_config(cls, config):
		model_config = config['model']
		del config['model']
		rc = cls(**config)
		from . import cells
		rc.model = Sequential()
		for layer_config in model_config:
			if 'config' in layer_config and 'name' in layer_config['config']:
				del layer_config['config']['name']
			layer = layer_from_config(layer_config, cells.__dict__)
			rc.add(layer)
		return rc
コード例 #27
0
ファイル: models.py プロジェクト: xindongzhang/keras
    def from_config(cls, config, layer_cache=None):
        """Supports legacy formats
        """
        from keras.utils.layer_utils import layer_from_config
        from keras.layers import Merge

        assert type(config) is list

        if not layer_cache:
            layer_cache = {}

        def normalize_legacy_config(conf):
            if "class_name" not in conf:
                class_name = conf["name"]
                name = conf.get("custom_name")
                conf["name"] = name
                new_config = {"class_name": class_name, "config": conf}
                return new_config
            return conf

        # the model we will return
        model = cls()

        def get_or_create_layer(layer_data):
            if layer_data["class_name"] == "Sequential":
                return Sequential.from_config(layer_data["config"], layer_cache=layer_cache)
            name = layer_data["config"].get("name")
            if name in layer_cache:
                return layer_cache[name]
            layer = layer_from_config(layer_data)
            layer_cache[name] = layer
            return layer

        first_layer = config[0]
        first_layer = normalize_legacy_config(first_layer)
        if first_layer["class_name"] == "Merge":
            merge_inputs = []
            first_layer_config = first_layer["config"]
            for merge_input_config in first_layer_config.pop("layers"):
                merge_input = layer_from_config(merge_input_config)
                merge_inputs.append(merge_input)
            first_layer_config["layers"] = merge_inputs
            merge = Merge.from_config(first_layer_config)
            model.add(merge)
        else:
            layer = get_or_create_layer(first_layer)
            model.add(layer)

        for conf in config[1:]:
            conf = normalize_legacy_config(conf)
            layer = get_or_create_layer(conf)
            model.add(layer)
        return model
コード例 #28
0
ファイル: learning.py プロジェクト: coopie/speech_ml
def build_model_from_config(model_h5, cutoff_layer_name=None, number_of_layers=None, verbosity=1):
    """Build a model from a model snapshot up to `number_of_layers` layers, or `cutoff_layer_name` is reached.

    TODO: option to pass in input size to the rebuilding of the network.
    """
    def log(level, *message):
        if verbosity >= level:
            print(message)

    if cutoff_layer_name is None and number_of_layers is None:
        raise RuntimeError('You must either name a cutoff layer or the number of layers to cut to')

    config_json_str = model_h5.attrs.get('model_config').decode('UTF-8')
    config = json.loads(config_json_str)

    layers_config = config['config']['layers']

    log(1, 'getting first layer')
    input_layer = layer_from_config(layers_config[0])
    input_img = Input(input_layer.input_shape[1:])
    model = input_img

    if number_of_layers is None:
        number_of_layers = float('inf')

    # Build the model until we reach the stopping point
    for i, layer_config in enumerate(layers_config[1:]):
        if i >= number_of_layers:
            break
        model = layer_from_config(layer_config)(model)
        if layer_config['name'] == cutoff_layer_name:
            break
        log(1, 'shape:', model.get_shape())


    return model, input_img
コード例 #29
0
    def __init__(self, inner_layer_arg, **kwargs):
        # Initialise based on one of the three initialisation methods

        # Case 1: Check if inner_layer_arg is conv_width
        if isinstance(inner_layer_arg, (int, long)):
            self.conv_width = inner_layer_arg
            dense_layer_kwargs, kwargs = filter_func_args(
                layers.Dense.__init__, kwargs, overrule_args=['name'])
            self.create_inner_layer_fn = lambda: layers.Dense(
                self.conv_width, **dense_layer_kwargs)

        # Case 2: Check if an initialised keras layer is given
        elif isinstance(inner_layer_arg, layers.Layer):
            assert inner_layer_arg.built == False, 'When initialising with a keras layer, it cannot be built.'
            _, self.conv_width = inner_layer_arg.get_output_shape_for(
                (None, None))
            # layer_from_config will mutate the config dict, therefore create a get fn
            self.create_inner_layer_fn = lambda: layer_from_config(
                dict(class_name=inner_layer_arg.__class__.__name__,
                     config=inner_layer_arg.get_config()))

        # Case 3: Check if a function is provided that returns a initialised keras layer
        elif callable(inner_layer_arg):
            example_instance = inner_layer_arg()
            assert isinstance(
                example_instance, layers.Layer
            ), 'When initialising with a function, the function has to return a keras layer'
            assert example_instance.built == False, 'When initialising with a keras layer, it cannot be built.'
            _, self.conv_width = example_instance.get_output_shape_for(
                (None, None))
            self.create_inner_layer_fn = inner_layer_arg

        else:
            raise ValueError(
                'NeuralGraphHidden has to be initialised with 1). int conv_widht, 2). a keras layer instance, or 3). a function returning a keras layer instance.'
            )

        super(NeuralGraphHidden, self).__init__(**kwargs)
コード例 #30
0
ファイル: keras_backend.py プロジェクト: tboquet/python-alp
def model_from_dict_w_opt(model_dict, custom_objects=None):
    """Builds a model from a serialized model using `to_dict_w_opt`

    Args:
        model_dict(dict): a serialized Keras model
        custom_objects(dict, optionnal): a dictionnary mapping custom objects
            names to custom objects (Layers, functions, etc.)

    Returns:
        A Keras.Model which is compiled if the information about the optimizer
        is available.

    """
    from keras import optimizers
    from keras.utils.layer_utils import layer_from_config

    if custom_objects is None:
        custom_objects = dict()

    custom_objects = {k: deserialize(**custom_objects[k])
                      for k in custom_objects}

    for k in custom_objects:
        if inspect.isfunction(custom_objects[k]):
            custom_objects[k] = custom_objects[k]()

    model = layer_from_config(model_dict['config'],
                              custom_objects=custom_objects)

    if 'optimizer' in model_dict:
        metrics = model_dict.get("metrics", [])
        ser_metrics = model_dict.get("ser_metrics", [])
        for k in custom_objects:
            if inspect.isfunction(custom_objects[k]):
                function_name = custom_objects[k].__name__
                if k in ser_metrics or function_name in ser_metrics:
                    metrics.append(custom_objects[k])
        model_name = model_dict['config'].get('class_name')
        # if it has an optimizer, the model is assumed to be compiled
        loss = model_dict.get('loss')

        # if a custom loss function is passed replace it in loss
        for l in loss:
            for c in custom_objects:
                if loss[l] == c:
                    loss[l] = custom_objects[c]

        optimizer_params = dict([(
            k, v) for k, v in model_dict.get('optimizer').items()])
        optimizer_name = optimizer_params.pop('name')
        optimizer = optimizers.get(optimizer_name, optimizer_params)

        if model_name == "Sequential":
            sample_weight_mode = model_dict.get('sample_weight_mode')
            model.compile(loss=loss,
                          optimizer=optimizer,
                          sample_weight_mode=sample_weight_mode,
                          metrics=metrics)
        elif model_name == "Model":
            sample_weight_mode = model_dict.get('sample_weight_mode')
            loss_weights = model_dict.get('loss_weights', None)
            model.compile(loss=loss,
                          optimizer=optimizer,
                          sample_weight_mode=sample_weight_mode,
                          loss_weights=loss_weights,
                          metrics=metrics)
        else:  # pragma: no cover
            raise Exception('{} model, must be in Sequential, '
                            'Model'.format(model_name))

    return model
コード例 #31
0
ファイル: wrappers.py プロジェクト: keunwoochoi/keras
 def from_config(cls, config):
     from keras.utils.layer_utils import layer_from_config
     layer = layer_from_config(config.pop('layer'))
     return cls(layer, **config)
コード例 #32
0
ファイル: utils.py プロジェクト: arrgonaut/fastai_1
def copy_layer(layer):
    return layer_from_config(wrap_config(layer))
コード例 #33
0
ファイル: wrappers.py プロジェクト: zjffdu/keras
 def from_config(cls, config):
     from keras.utils.layer_utils import layer_from_config
     layer = layer_from_config(config.pop('layer'))
     return cls(layer, **config)
コード例 #34
0
ファイル: utils_own.py プロジェクト: tranvukhanh/fast-ai
def copy_layer(layer): return layer_from_config(wrap_config(layer))


def copy_layers(layers): return [copy_layer(layer) for layer in layers]
コード例 #35
0
 def from_config(cls, config):
     from keras.utils.layer_utils import layer_from_config
     merge_mode = layer_from_config(config.pop('merge_mode'))
     residual = super(Residual, cls).from_config(config)
     residual.merge_mode = merge_mode
     return residual
コード例 #36
0
for model_idx in range(len(all_models)):
    for i, layer in enumerate(all_layers[model_idx]):
        config = layer.get_config()
        print(config)
        if "batch_input_shape" in config and \
          one_input_length in config["batch_input_shape"]:
            # first specification of batch_input_shape
            config["batch_input_shape"] = (1, 1, one_input_length)

        if isinstance(layer, Recurrent):
            # if it's a recurrent layer, make it stateful
            config["stateful"] = True

        all_layers[model_idx][i] = layer_from_config({
            "class_name": type(layer),
            "config": config
        })

all_models = [Sequential(layers) for layers in all_layers]
for model_idx in range(len(all_models)):
    all_models[model_idx].set_weights(all_weights[model_idx])
    # print(all_models[model_idx].summary())

year_count, day_count = 0, -1
days_in_stk_yr = 252
total_cash = 0
traders = [Trader(init_cash=TEST_CASH, ticker=tick) for tick in test_secs]
prev_trader_values = [0] * len(traders)

# split into trading years
for year in [
コード例 #37
0
ファイル: heatmap.py プロジェクト: juanlp/heatmaps
def from_config(layer, config_dic):
    config_correct = {}
    config_correct['class_name'] = type(layer)
    config_correct['config'] = config_dic
    return layer_from_config(config_correct)
コード例 #38
0
def model_from_config(config, custom_objects={}):
    from keras.utils.layer_utils import layer_from_config
    if isinstance(config, list):
        raise Exception('`model_fom_config` expects a dictionary, not a list. '
                        'Maybe you meant to use `Sequential.from_config(config)`?')
    return layer_from_config(config, custom_objects=custom_objects)
コード例 #39
0
ファイル: models.py プロジェクト: 123fengye741/keras
def model_from_config(config, custom_objects={}):
    from keras.utils.layer_utils import layer_from_config
    return layer_from_config(config, custom_objects=custom_objects)
コード例 #40
0
ファイル: layers.py プロジェクト: JinXinDeep/noah
 def from_config(cls, config, custom_objects={}):
     from keras.utils.layer_utils import layer_from_config
     rnn_cell = layer_from_config(config.pop('rnn_cell'), custom_objects)
     attention = layer_from_config(config.pop('attention'), custom_objects)
     embedding = layer_from_config(config.pop('embedding'), custom_objects)
     return cls(rnn_cell, attention, embedding, **config)
コード例 #41
0
ファイル: utils.py プロジェクト: EliaKunz/courses
def copy_layer(layer): return layer_from_config(wrap_config(layer))


def copy_layers(layers): return [copy_layer(layer) for layer in layers]
コード例 #42
0
 def create_inner_layer_fn():
     return layer_from_config(deepcopy(inner_layer_config))
コード例 #43
0
ファイル: keras_backend.py プロジェクト: tboquet/python-alp
def model_from_dict_w_opt(model_dict, custom_objects=None):
    """Builds a model from a serialized model using `to_dict_w_opt`

    Args:
        model_dict(dict): a serialized Keras model
        custom_objects(dict, optionnal): a dictionnary mapping custom objects
            names to custom objects (Layers, functions, etc.)

    Returns:
        A Keras.Model which is compiled if the information about the optimizer
        is available.

    """
    from keras import optimizers
    from keras.utils.layer_utils import layer_from_config

    if custom_objects is None:
        custom_objects = dict()

    custom_objects = {
        k: deserialize(**custom_objects[k])
        for k in custom_objects
    }

    for k in custom_objects:
        if inspect.isfunction(custom_objects[k]):
            custom_objects[k] = custom_objects[k]()

    model = layer_from_config(model_dict['config'],
                              custom_objects=custom_objects)

    if 'optimizer' in model_dict:
        metrics = model_dict.get("metrics", [])
        ser_metrics = model_dict.get("ser_metrics", [])
        for k in custom_objects:
            if inspect.isfunction(custom_objects[k]):
                function_name = custom_objects[k].__name__
                if k in ser_metrics or function_name in ser_metrics:
                    metrics.append(custom_objects[k])
        model_name = model_dict['config'].get('class_name')
        # if it has an optimizer, the model is assumed to be compiled
        loss = model_dict.get('loss')

        # if a custom loss function is passed replace it in loss
        for l in loss:
            for c in custom_objects:
                if loss[l] == c:
                    loss[l] = custom_objects[c]

        optimizer_params = dict([
            (k, v) for k, v in model_dict.get('optimizer').items()
        ])
        optimizer_name = optimizer_params.pop('name')
        optimizer = optimizers.get(optimizer_name, optimizer_params)

        if model_name == "Sequential":
            sample_weight_mode = model_dict.get('sample_weight_mode')
            model.compile(loss=loss,
                          optimizer=optimizer,
                          sample_weight_mode=sample_weight_mode,
                          metrics=metrics)
        elif model_name == "Model":
            sample_weight_mode = model_dict.get('sample_weight_mode')
            loss_weights = model_dict.get('loss_weights', None)
            model.compile(loss=loss,
                          optimizer=optimizer,
                          sample_weight_mode=sample_weight_mode,
                          loss_weights=loss_weights,
                          metrics=metrics)
        else:  # pragma: no cover
            raise Exception('{} model, must be in Sequential, '
                            'Model'.format(model_name))

    return model
コード例 #44
0
def model_from_config(config, custom_objects={}):
    from keras.utils.layer_utils import layer_from_config
    return layer_from_config(config, custom_objects=custom_objects)
コード例 #45
0
              loss='binary_crossentropy',
              loss_weights=None,
              sample_weight_mode=None)
model.fit({
    'a1': data_a1,
    'a2': data_a2
}, {
    'b1': data_b1,
    'b2': data_b2
},
          nb_epoch=10,
          verbose=1,
          validation_data=None,
          class_weight=None,
          sample_weight=None)
model.fit_generator(
    generator,
    samples_per_epoch=1000,
    nb_epoch=10,
    verbose=1,
    validation_data=None,
    nb_val_samples=None,
    class_weight={},
)

#=====================================
from keras.utils.layer_utils import layer_from_config
config = layer.get_config()
layer = layer_from_config(config)
#=====================================