def _compile_model(self, model): optimizer = load_class_from_config(self.optimizer_config) if (isinstance(self.loss, str)): loss = self.loss else: loss = load_class_from_config(self.loss) model.compile(optimizer=optimizer, loss=loss, metrics=self.metrics)
def _add_nodes(self, graph): from momma_dragonn.loaders import load_class_from_config for node_config in self.nodes_config: the_class = node_config["class"] the_kwargs = node_config["kwargs"] layer = load_class_from_config( {'class': the_class, 'kwargs': the_kwargs}, extra_kwargs={}) add_node_kwargs = {} for a_key in node_config: if a_key not in ["class", "kwargs", "input_name"]: add_node_kwargs[a_key] = node_config[a_key] if (isinstance(node_config["input_name"],list)): add_node_kwargs['inputs'] = node_config["input_name"] elif (isinstance(node_config["input_name"], str)): add_node_kwargs['input'] = node_config["input_name"] else: raise RuntimeError("Unsupported type for input_name: " +str(node_config["input_name"])) graph.add_node( layer, **add_node_kwargs)
def _get_uncompiled_model(self, seed): #it is important that keras is only imported here so that #the random seed can be set by the model trainer BEFORE the import import numpy as np np.random.seed(seed) import keras from keras.models import Sequential model = Sequential() for layer_config in self.layers_config: model.add(load_class_from_config(layer_config)) return model
def _get_shared_layers(self): shared_layers = {} for name, shared_layer_config in self.shared_layers_config.items(): assert_attributes_in_config(shared_layer_config, ['class', 'kwargs']) assert 'name' not in shared_layer_config['kwargs'],\ ("Don't declare 'name' within the kwargs; will use the dictionary" +" key for that. At: "+str(shared_layer_config)) shared_layer = load_class_from_config(shared_layer_config, extra_kwargs={'name': name}) if (name in shared_layers): raise RuntimeError("Duplicated shared layer: " + str(name)) shared_layers[name] = shared_layer return shared_layers
def _get_uncompiled_model(self, seed): #it is important that keras is only imported here so that #the random seed can be set by the model trainer BEFORE the import import numpy as np np.random.seed(seed) import keras from keras.models import Sequential model = Sequential() if (self.pretrained_model_config is not None): pretrained_model_weights = self.pretrained_model_config["weight_file"] pretrained_model_json = self.pretrained_model_config["json_file"] last_layer_to_take =\ self.pretrained_model_config["last_layer_to_take"] last_layer_to_fix =\ (self.pretrained_model_config["last_layer_to_fix"] if "last_layer_to_fix" in self.pretrained_model_config else None) if (pretrained_model_json is not None): from keras.models import model_from_json pre_model =\ model_from_json(open(pretrained_model_json).read()) pre_model.load_weights(pretrained_model_weights) else: from keras.models import load_model pre_model = load_model(pretrained_model_weights) if (last_layer_to_take is None): last_layer_to_take = -1 if (last_layer_to_take < 0): last_layer_to_take = len(pre_model.layers)+last_layer_to_take for idx,a_layer in enumerate(pre_model.layers): if (idx <= last_layer_to_take): if (last_layer_to_fix is not None): if idx <= ((len(pre_model.layers)+last_layer_to_fix) if last_layer_to_fix else last_layer_to_take): a_layer.trainable=False model.add(a_layer) for layer_config in self.layers_config: model.add(load_class_from_config(layer_config)) return model
def _parse_loss(self): if (isinstance(self.loss, str)): return self.loss else: return load_class_from_config(self.loss)
def _get_uncompiled_model(self, seed): #it is important that keras is only imported here so that #the random seed can be set by the model trainer BEFORE the import import numpy as np np.random.seed(seed) import keras #first load all the shared layers, indexed by their name #every shared layer must be given a name so we can refer to #it later on! shared_layers = self._get_shared_layers() name_to_tensor = {} for node_name, node_config in self.nodes_config.items(): assert_attributes_in_config(node_config, ["layer"]) #if layer is not an input layer, collect all the input tensors if (isinstance(node_config['layer'],str) or node_config['layer']['class'].endswith(".Input")==False): assert_attributes_in_config(node_config, ["input_node_names"]) input_node_names = node_config['input_node_names'] if (isinstance(input_node_names, list)): input_tensors = [] for input_node_name in input_node_names: assert input_node_name in name_to_tensor,\ (input_node_name +" hasn't been declared already; declared " +"node names are: "+str(name_to_tensor.keys())) input_tensors.append(name_to_tensor[input_node_name]) elif (isinstance(input_node_names, str)): assert input_node_names in name_to_tensor,\ (input_node_names+" for "+str(node_config)+" hasn't been" +" declared already; declared " +"node names are: "+str(name_to_tensor.keys())) input_tensors = name_to_tensor[input_node_names] else: raise RuntimeError("Unsupported type for input_node_names: " +str(type(input_node_names))) #now load the layer. layer_config = node_config['layer'] #if 'layer_config' is just a string, it should refer to the #name of a shared layer if (isinstance(layer_config, str)): assert layer_config in shared_layers,\ (layer_config+" not in shared_layers; shared_layers are: " +str(shared_layers.keys())) node_tensor = shared_layers[layer_config](input_tensors) #if it's a dictionary, can either be a layer declaration or #a merge function elif (isinstance(layer_config, dict)): assert_attributes_in_config(layer_config, ['class', 'kwargs']) assert 'name' not in layer_config['kwargs'],\ ("Don't declare 'name' within the kwargs; " +" will use the dictionary key for that. At: " +str(self.shared_layers_config)) layer_config_class = layer_config['class'] #when it's a merge function, we need to pass in input_tensors #as the inputs argument if layer_config_class.endswith('.merge'): node_tensor = load_class_from_config( layer_config, extra_kwargs={ 'name': node_name, 'inputs': input_tensors}) #otherwise, we call the layer object on the input #tensor after it has been instantiated elif (layer_config_class.endswith('.Input')): node_tensor = load_class_from_config(layer_config, extra_kwargs={'name': node_name}) else: node_tensor = (load_class_from_config(layer_config, extra_kwargs={'name': node_name}) (input_tensors)) else: raise RuntimeError("Unsupported type for node_layer_config " +str(type(layer_config))) #record the node tensor according to node_name name_to_tensor[node_name] = node_tensor for name in self.input_names+self.output_names: if name not in name_to_tensor: raise RuntimeError("No node with name: "+name +" declared. Node names are: "+str(name_to_tensor.keys())) from keras.models import Model model = Model(input=[name_to_tensor[x] for x in self.input_names], output=[name_to_tensor[x] for x in self.output_names]) return model
def _parse_loss(self): parsed_loss_dictionary =\ dict((key, (val if isinstance(val,dict)==False else load_class_from_config(val))) for (key,val) in self.loss_dictionary.items()) return parsed_loss_dictionary
def _compile_model(self, model): optimizer = load_class_from_config(self.optimizer_config) model.compile(optimizer=optimizer, loss=self._parse_loss(),metrics=self.metrics)