コード例 #1
0
    def __init__(self, steps=1, lr=0.0001, decay=0.00001):

        #Models
        self.D = None
        self.S = None
        self.G = None

        self.GE = None
        self.SE = None

        self.DM = None
        self.AM = None

        #Config
        self.LR = lr
        self.steps = steps
        self.beta = 0.999

        #Init Models
        self.discriminator()
        self.generator()

        self.GMO = Adam(lr=self.LR, beta_1=0, beta_2=0.999)
        self.DMO = Adam(lr=self.LR, beta_1=0, beta_2=0.999)

        self.GE = clone_model(self.G)
        self.GE.set_weights(self.G.get_weights())

        self.SE = clone_model(self.S)
        self.SE.set_weights(self.S.get_weights())
コード例 #2
0
 def clone(self):
     clone = copy(self)
     if isinstance(self._model, list):
         clone._model = [clone_model(model) for model in self._model]
     else:
         clone._model = clone_model(self._model)
     return clone
コード例 #3
0
	def __init__(self, env_name, env, Actor_net, Critic_net, act_dim, obs_dim ,lam = 0.97, Actor_lr = 0.0001, 
				gamma = 0.99, delta = 0.01, Critic_lr = 0.001, render = False, epoch_steps = 2000, 
				value_train_iterations = 5, memory_size = 100000, polyak_const = 0.995, minibatch_size = 100):
		
		self.env_name = env_name
		self.env = env
		self.gamma = gamma
		self.Actor = Actor_net
		self.Critic = Critic_net
		self.act_dim = act_dim
		self.obs_dim = obs_dim
		
		self.Actor_optimizer = optimizers.Adam(lr = Actor_lr)
		self.Critic_optimizer = optimizers.Adam(lr = Critic_lr)

		self.render = render
		self.lam = lam
		self.value_train_iterations = value_train_iterations

		self.Experience = namedtuple('Experience', ['states','actions', 'rewards', 'next_states', 'dones'])
		self.memory_size = memory_size
		self.memory = ReplayMemory(self.memory_size)

		self.Target_Actor = models.clone_model(self.Actor)
		self.Target_Critic = models.clone_model(self.Critic)

		self.minibatch_size = minibatch_size
		self.polyak_const = polyak_const
		self.act_limit = self.env.action_space.high[0]
コード例 #4
0
    def __init__(self,
                 name,
                 global_network_P,
                 global_network_dqn,
                 global_optimizer_P,
                 global_optimizer_dqn,
                 global_counter,
                 env,
                 max_global_steps,
                 returns_list,
                 n_steps=10,
                 gamma=0.99):
        self.name = name
        self.global_network_P = global_network_P
        self.global_network_dqn = global_network_dqn
        self.global_optimizer_P = global_optimizer_P
        self.global_optimizer_dqn = global_optimizer_dqn
        self.global_counter = global_counter
        self.env = env
        self.allow_submit = env.allow_submit
        self.state = self.env.reset()
        self.max_global_steps = max_global_steps
        self.global_step = 0
        self.returns_list = returns_list
        self.n_steps = n_steps
        self.gamma = gamma
        self.noise = OUActionNoise(
            mu=np.zeros(env.continuous_action_space.shape[0]))
        self.n_discrete_actions = env.discrete_action_space.n
        self.start_time = time.time()

        self.local_param_model = clone_model(global_network_P)
        self.local_param_model.set_weights(global_network_P.get_weights())
        self.local_dqn_model = clone_model(global_network_dqn)
        self.local_dqn_model.set_weights(global_network_dqn.get_weights())
コード例 #5
0
 def copy(self, prev_scholar):
     self.sol = models.clone_model(prev_scholar.sol)
     self.sol.set_weights(prev_scholar.sol.get_weights())
     self.gen = models.clone_model(prev_scholar.gen)
     self.gen.set_weights(prev_scholar.gen.get_weights())
     self.disc = models.clone_model(prev_scholar.disc)
     self.disc.set_weights(prev_scholar.disc.get_weights())
コード例 #6
0
ファイル: alpha_nnet.py プロジェクト: vvagias/AlphaSnake-Zero
 def copy_and_compile(self, learning_rate=0.0001, TPU=None):
     boundaries = [20, 40, 60, 80, 100]
     values = [0.0] * (len(boundaries) + 1)
     n = learning_rate
     for i in range(len(boundaries)):
         values[i] = n
         n *= 0.25
     if TPU:
         with TPU.scope():
             nnet_copy = AlphaNNet()
             nnet_copy.v_net = clone_model(self.v_net)
             nnet_copy.v_net.build(self.v_net.layers[0].input_shape)
             nnet_copy.v_net.set_weights(self.v_net.get_weights())
             lr = schedules.PiecewiseConstantDecay(boundaries, values)
             nnet_copy.v_net.compile(optimizer=Adam(learning_rate=lr),
                                     loss='mean_squared_error')
     else:
         nnet_copy = AlphaNNet()
         nnet_copy.v_net = clone_model(self.v_net)
         nnet_copy.v_net.build(self.v_net.layers[0].input_shape)
         nnet_copy.v_net.set_weights(self.v_net.get_weights())
         lr = schedules.PiecewiseConstantDecay(boundaries, values)
         nnet_copy.v_net.compile(optimizer=Adam(learning_rate=lr),
                                 loss='mean_squared_error')
     return nnet_copy
コード例 #7
0
    def __init__(self,
                 capacity=100,
                 max_step=0,
                 networks=None,
                 batch_size=32,
                 update=1,
                 backend='Tensorflow',
                 verbose=1,
                 **kwargs):
        # Initialize parameters
        super(DDPGAgent, self).__init__(**kwargs)
        self.memory = deque(maxlen=capacity)
        self.batch_size = batch_size
        self.target_update = update

        self.max_step = max_step
        self.batch_size = batch_size
        if 0 < update < 1:
            self.target_update = update  # Soft update
        elif update >= 1:
            self.target_update = int(update)  # Hard update
        else:
            raise ValueError(
                'Target update should be greater than 0. (0, 1) for soft update, [1, inf] for hard update.'
            )
        self.backend = backend.upper()
        self.verbose = verbose

        # Initialize the agent
        try:
            self.load_brain(self.brain)
        except:
            assert len(networks) == 2
            actor_path = networks[0]['PATH']
            critic_path = networks[1]['PATH']
            if self.backend == 'TENSORFLOW':
                self.Actor = TFutils.ModelBuilder(actor_path)
                self.Critic = TFutils.ModelBuilder(critic_path)

        try:
            if self.backend == 'TENSORFLOW':
                self.ActorTarget = clone_model(self.Actor)
                self.CriticTarget = clone_model(self.Critic)
                actor_optimizer = TFutils.get_optimizer(
                    name=networks[0]['OPTIMIZER'],
                    learning_rate=float(networks[0]['LEARNING_RATE']))
                critic_optimizer = TFutils.get_optimizer(
                    name=networks[1]['OPTIMIZER'],
                    learning_rate=float(networks[1]['LEARNING_RATE']))
                self.ActorOptimizer = actor_optimizer
                self.Critic.compile(optimizer=critic_optimizer, loss='mse')
                self._init_action_train_fn()
        except:
            print('Test mode, fail to initialize the network otherwise')
コード例 #8
0
 def __init__(self, input_shape=None, action_num=None, alpha=0.0001,
              beta=0.0005, gamma=0.99, eta=10, entropy_coef=0.1, entropy_decay=0.99,
              actor_loss_epsilon=0.2, actor_file=None, critic_file=None,
              training=True):
     if actor_file == None:
         if input_shape == None or action_num == None:
             raise Exception('input_shape and action_num are required when no actor file is specified.')
         self.actor = self._get_actor(input_shape, action_num, [64])
     else:
         self.actor = load_model(actor_file)
     if training:
         self.grad_tape = tf.GradientTape(persistent=True)
         self.experiences = []
         self.gamma = gamma
         self.eta = eta
         self.entropy_coef = entropy_coef
         self.entropy_decay = entropy_decay
         self.actor_loss_epsilon = actor_loss_epsilon
         self.actor_optimizer = Adam(learning_rate=alpha)
         self.critic_optimizer = Adam(learning_rate=beta)
         self.critic_loss_func = MeanSquaredError()
         #self.icm = IntrinsicCuriosityModule(
         #    input_shape,
         #    action_num,
         #    64
         #)
         if critic_file == None:
             if input_shape == None:
                 raise Exception('input_shape is required when no critic file is specified.')
             self.critic = self._get_critic(input_shape, [64])
         else:
             self.critic = load_model(critic_file)
         self.prev_actor = clone_model(self.actor)
         self.prev_actor.set_weights(self.actor.get_weights())
コード例 #9
0
    def test_parsing(self, _model_2, _config):

        # Parsing removes BatchNorm layers, so we make a copy of the model.
        input_model = models.clone_model(_model_2)
        input_model.set_weights(_model_2.get_weights())
        input_model.compile(_model_2.optimizer.__class__.__name__,
                            _model_2.loss, _model_2.metrics)

        num_to_test = 10000
        batch_size = 100
        _config.set('simulation', 'batch_size', str(batch_size))
        _config.set('simulation', 'num_to_test', str(num_to_test))

        _, testset = get_dataset(_config)
        dataflow = testset['dataflow']

        model_lib = import_module('snntoolbox.parsing.model_libs.' +
                                  _config.get('input', 'model_lib') +
                                  '_input_lib')
        model_parser = model_lib.ModelParser(input_model, _config)
        model_parser.parse()
        model_parser.build_parsed_model()
        _, acc, _ = model_parser.evaluate(batch_size,
                                          num_to_test,
                                          dataflow=dataflow)
        _, target_acc = _model_2.evaluate(dataflow,
                                          steps=int(num_to_test / batch_size))
        assert acc == target_acc
コード例 #10
0
    def build_model(self, model, gpus=1, **compile_kwargs):
        """
        Compile a Keras Functional model.

        :param model: keras.models.Model: Keras functional model
        :param gpus: int: number of GPU units on which to parallelize the Keras model
        :param compile_kwargs: kwargs passed to the 'compile' method of the Keras model
        """
        # Test the parameters
        if type(gpus) is not int:
            raise TypeError("'gpus' argument must be an int")
        # Self-explanatory
        util.make_keras_picklable()
        # Build a model, either on a single GPU or on a CPU to control multiple GPUs
        self.base_model = model
        self._n_steps = len(model.outputs)
        if gpus > 1:
            import tensorflow as tf
            with tf.device('/cpu:0'):
                self.base_model = models.clone_model(self.base_model)
            self.model = multi_gpu_model(self.base_model, gpus=gpus)
            self.gpus = gpus
        else:
            self.model = self.base_model
        self.model.compile(**compile_kwargs)
コード例 #11
0
    def _fit_terminal_node(self, classes, node):
        self.print('\n\n', '-' * 50, sep='')
        self.print(f"Fitting terminal node with classes {classes}")

        mask = create_mask(self.y, classes)
        y = self.y[mask].copy()
        encoder = OneHotEncoder(categories='auto', sparse=False)
        encoder.fit(y.reshape(-1, 1))
        y = encoder.transform(y.reshape(-1, 1))

        self.encoders[node.name] = encoder
        self.class_maps[node.name] = dict(zip(classes, classes))

        model = self._build_model(
            self.units, (len(classes), ), self.input_shape,
            clone_model(self.backbone) if self.backbone is not None else None)
        model.fit(self.X[mask],
                  y,
                  epochs=self.end_fit,
                  verbose=self.verbose > 1,
                  batch_size=self.batch_size)
        self.models[node.name] = model

        for a_class in classes:
            self.node_counter += 1
            self.node_to_class[self.node_counter] = a_class
            Node(self.node_counter, parent=node)
            self.node_to_classes[self.node_counter] = [a_class]
コード例 #12
0
    def __init__(self,
                 actions,
                 starting_mem_len,
                 max_mem_len,
                 starting_epsilon,
                 learn_rate,
                 epsilon_decay=.9 / 100000,
                 gamma=0.95,
                 directory=None,
                 debug=False):
        self.gamma = gamma
        self.actions = actions

        self.eps = starting_epsilon
        self.epsilon_decay = epsilon_decay

        self.epsilon_min = .05
        self.memory = Memory(max_mem_len)
        self.lr = learn_rate

        if directory:
            self.model = self._load_model(directory)
        else:
            self.model = self._build_model()

        self.model_target = clone_model(self.model)
        self.total_timesteps = 0
        self.starting_mem_len = starting_mem_len / 100
        self.starting_mem_len = self.starting_mem_len * 100
        self.learn_steps = 0
コード例 #13
0
def apply_pruning(model: Any) -> Any:
    """
    Aplica pruning a un modelo de tensorflow.
    Args:
        model:  Model de TensorFlow al que se aplica pruning.

    Returns:
        Model de TensorFlow con pruning aplicado.
    """
    def apply_pruning_to_layer(layer: Any) -> Any:
        """
        Aplica low magnitude pruning a las capas compatibles.
        Args:
            layer:  Layer de TensorFlow a la que se le aplica pruning.

        Returns:
            Layer de TensorFlow a la que se ha aplicado pruning si es compatible.
        """
        for prunable_layer in PRUNABLE_LAYERS:
            if isinstance(layer, prunable_layer):
                return tfmot.sparsity.keras.prune_low_magnitude(layer)
        return layer

    pruned_model = clone_model(model, clone_function=apply_pruning_to_layer)

    return pruned_model
コード例 #14
0
	def __init__(self, env_name, env, policy_net, value_net = None,lam = 0.97, value_lr = 0.01, gamma = 0.99, delta = 0.01, 
					cg_damping = 0.001, cg_iters = 10, residual_tol = 1e-5, backtrack_coeff = 0.6, policy_lr = 0.01, 
					backtrack_iters = 10, render = False,local_steps_per_epoch = 2000, value_train_iterations = 5):
		self.env_name = env_name
		self.envs = []
		self.gamma = gamma
		self.cg_iters = cg_iters
		self.cg_damping = cg_damping
		self.residual_tol = residual_tol
		self.model = policy_net
		self.tmp_model = models.clone_model(self.model)
		self.value_net = value_net
		self.policy_optimizer = optimizers.Adam(lr=policy_lr)
		if self.value_net:
			self.value_optimizer = optimizers.Adam(lr=value_lr)
			self.value_net.compile(self.value_optimizer, loss = losses.MSE)
		self.delta = delta
		self.backtrack_coeff = backtrack_coeff
		self.backtrack_iters = backtrack_iters
		self.render = render
		self.local_steps_per_epoch = local_steps_per_epoch
		self.lam = lam
		self.value_train_iterations = value_train_iterations
		self.N_PATHS = 15
		self.N_THREADS = 2
		self.epsilon = 0.2
		for i in range(self.N_PATHS):
			self.envs.append(copy.deepcopy(env))
コード例 #15
0
ファイル: mnist.py プロジェクト: mbenhamd/innvestigate
def _load_pretrained_net(modelname, new_input_shape):
    filename = PRETRAINED_MODELS[modelname]["file"]
    urlname = PRETRAINED_MODELS[modelname]["url"]
    #model_path = get_file(fname=filename, origin=urlname) #TODO: FIX! corrupts the file?
    model_path = os.path.expanduser(
        '~') + "/.tensorflow.keras/models/" + filename

    #workaround the more elegant, but dysfunctional solution.
    if not os.path.isfile(model_path):
        model_dir = os.path.dirname(model_path)
        if not os.path.isdir(model_dir):
            os.makedirs(model_dir)
        os.system("wget {} &&  mv -v {} {}".format(urlname, filename,
                                                   model_path))

    model = load_model(model_path)
    #create replacement input layer with new shape.
    model.layers[0] = tensorflow.keras.layers.InputLayer(
        input_shape=new_input_shape, name="input_1")
    for l in model.layers:
        l.name = "%s_workaround" % l.name
    model = tensorflow.keras.models.Sequential(layers=model.layers)

    model_w_sm = clone_model(model)

    #NOTE: perform forward pass to fix a tensorflow.keras 2.2.0 related issue with improper weight initialization
    #See: https://github.com/albermax/innvestigate/issues/88
    x_dummy = np.zeros(new_input_shape)[None, ...]
    model_w_sm.predict(x_dummy)

    model_w_sm.set_weights(model.get_weights())
    model_w_sm.add(tensorflow.keras.layers.Activation("softmax"))
    return model, model_w_sm
コード例 #16
0
    def __init__(self, state_size, strategy="t-dqn", reset_every=1000, pretrained=False, model_name=None):
        self.strategy = strategy

        # agent config
        self.state_size = state_size    	# normalized previous days
        self.action_size = 3           		# [sit, buy, sell], 0 hold, 1 buy, 2 sell 
        self.model_name = model_name
        self.inventory = []
        self.memory = deque(maxlen=10000)
        self.first_iter = True

        # model config
        self.model_name = model_name
        self.gamma = 0.95 # affinity for long term reward
        self.epsilon = 1.0
        self.epsilon_min = 0.01
        self.epsilon_decay = 0.995
        self.learning_rate = 0.001
        self.loss = huber_loss
        self.custom_objects = {"huber_loss": huber_loss}  # important for loading the model from memory
        self.optimizer = Adam(lr=self.learning_rate)

        if pretrained and self.model_name is not None:
            self.model = self.load()
        else:
            self.model = self._model()

        # strategy config
        if self.strategy in ["t-dqn", "double-dqn"]:
            self.n_iter = 1
            self.reset_every = reset_every

            # target network
            self.target_model = clone_model(self.model)
            self.target_model.set_weights(self.model.get_weights())
コード例 #17
0
def apply_wrapper_to_layer(model_to_clone,
                           layer_names,
                           wrapper,
                           sprasity_sched,
                           clone=False):
    if clone:
        model = clone_model(model_to_clone)
        model.set_weights(model_to_clone.get_weights())
    else:
        model = model_to_clone

    layers = [l for l in model.layers]

    if not isinstance(model.layers[0],
                      tf.python.keras.engine.input_layer.InputLayer):
        prunned_model_layers = []
        for layer in model.layers:
            if layer.name in layer_names:
                prunned_model_layers.append(wrapper(layer, sprasity_sched))
            else:
                prunned_model_layers.append(layer)
        new_model = Sequential(prunned_model_layers)
    else:
        in_shape = model.layers[0].input_shape[0]
        layers = layers[1:]
        inp = Input(shape=in_shape[1:])
        x = inp
        for layer in layers:
            if layer.name in layer_names:
                x = wrapper(layer, sprasity_sched)(x)
            else:
                x = layer(x)

        new_model = Model(inp, x)
    return new_model
コード例 #18
0
def initialize_sparse_model(trained_model, pruned_model_with_mask, pm):
    """
        Given a filename (or a model) with weights and a pruned model with its mask, returns a new model with weights in filename and pruned with mask
    """
    model = clone_model(trained_model)
    model.set_weights(trained_model.get_weights())

    sparcity = 1 - pm
    sprasity_sched = ConstantSparsity(
        sparcity,
        0,  # Do sparcity calculation in the first step
        end_step=0,
        frequency=10000000)

    prunned_model_layers = []
    for i, layer in enumerate(pruned_model_with_mask.layers):
        if isinstance(layer, pruning_wrapper.PruneLowMagnitude):
            l_weights = model.layers[i].get_weights()
            l_weights[0] = l_weights[0] * layer.pruning_vars[0][1].numpy()
            model.layers[i].set_weights(l_weights)
            prunned_model_layers.append(
                prune_low_magnitude(model.layers[i], sprasity_sched))
        else:
            prunned_model_layers.append(model.layers[i])
    prunned_model = Sequential(prunned_model_layers)
    prunned_model.compile(optimizer=optimizers.SGD(lr=0),
                          loss='sparse_categorical_crossentropy',
                          metrics='accuracy')
    return prunned_model
コード例 #19
0
    def TrainModel(self, protein, model):
        Params = self.ModelParams[protein]
        #copy the model
        Model = clone_model(model)

        #read training data and
        #get the data
        with open('temporary/DataForCNN_' + protein + '.txt', 'rb') as fp:
            FullData = pickle.load(fp)

        #get the scores
        Y = FullData['Y']

        #reshape the data so the model could load it
        X_train = np.asarray(FullData['X'])
        X_train = np.swapaxes(X_train, 1, 2)
        Y_train = np.array(Y)

        #there is an option to not use the structur data
        if not self.struct:
            X_train = X_train[:, 0:4, :]

        X_train = np.swapaxes(X_train, 2, 1)
        #fit the model
        Model.compile(optimizer='adam', loss='mse')
        Model.fit(X_train,
                  Y_train,
                  batch_size=Params['batchsize'],
                  epochs=Params['epochs'])

        #save the trained model
        filename = protein + '_model.h5'
        Model.save(filename)

        return Model
コード例 #20
0
ファイル: DDPGAgent.py プロジェクト: elliotwoods/MuscleMemory
    def init_critic(self, options):
        # create the critic
        x = state_input = layers.Input(shape=(options['state_count'], ))

        for hidden_layer_size in options['critic_state_hidden_layers']:
            x = layers.Dense(hidden_layer_size, activation="relu")(x)
            x = layers.BatchNormalization()(x)

        state_x = x

        x = action_input = layers.Input(shape=(options['action_count'], ))

        for hidden_layer_size in options['critic_action_hidden_layers']:
            x = layers.Dense(hidden_layer_size, activation="relu")(x)
            x = layers.BatchNormalization()(x)

        action_x = x

        x = layers.Concatenate()([state_x, action_x])

        for hidden_layer_size in options['critic_final_hidden_layers']:
            x = layers.Dense(hidden_layer_size, activation="relu")(x)
            x = layers.BatchNormalization()(x)

        x = layers.Dense(1, activation='linear')(x)

        self.critic_model = tf.keras.Model([state_input, action_input], x)
        self.critic_model_target = models.clone_model(self.critic_model)
コード例 #21
0
ファイル: test_models.py プロジェクト: davisden/snn_toolbox
    def test_normalizing(self, _model_2, _config):

        # Parsing removes BatchNorm layers, so we make a copy of the model.
        input_model = models.clone_model(_model_2)
        input_model.set_weights(_model_2.get_weights())
        input_model.compile(_model_2.optimizer.__class__.__name__,
                            _model_2.loss, _model_2.metrics)

        num_to_test = 10000
        batch_size = 100
        _config.set('simulation', 'batch_size', str(batch_size))
        _config.set('simulation', 'num_to_test', str(num_to_test))

        normset, testset = get_dataset(_config)
        x_test = testset['x_test']
        y_test = testset['y_test']
        x_norm = normset['x_norm']

        model_lib = import_module('snntoolbox.parsing.model_libs.' +
                                  _config.get('input', 'model_lib') +
                                  '_input_lib')
        model_parser = model_lib.ModelParser(input_model, _config)
        model_parser.parse()
        parsed_model = model_parser.build_parsed_model()

        normalize_parameters(parsed_model, _config, x_norm=x_norm)

        _, acc, _ = model_parser.evaluate(batch_size, num_to_test, x_test,
                                          y_test)
        _, target_acc = _model_2.evaluate(x_test, y_test, batch_size)
        assert acc == target_acc
コード例 #22
0
def load_model(filename=None,
               model=None,
               weights_file=None,
               custom_objects={}):
    """Loads model architecture from JSON and instantiates the model.
        filename: path to JSON file specifying model architecture
        model:    (or) a Keras model to be cloned
        weights_file: path to HDF5 file containing model weights
	custom_objects: A Dictionary of custom classes used in the model keyed by name"""
    import_keras()
    from tensorflow.keras.models import model_from_json, clone_model
    if filename is not None:
        with open(filename) as arch_f:
            json_str = arch_f.readline()
            new_model = model_from_json(json_str,
                                        custom_objects=custom_objects)
        logging.info(f"Load model from filename")
    elif model is not None:
        new_model = clone_model(model)
        logging.info(f"Load model from model")
    elif weights_file is not None:
        new_model.load_weights(weights_file)
        logging.info(f"Load model from weights_file")
    else:
        logging.error(
            f"Cannot load model: filename, model and weights_file are None")
    return new_model
コード例 #23
0
ファイル: test_model.py プロジェクト: kabartay/OrcaNet
def dropout_test():
    def dropout_model(rate=0.):
        inp = layers.Input((10,))
        out = layers.Dropout(rate)(inp)
        model = Model(inp, out)
        return model

    def get_layer_output(model, xs, which=-1):
        l_out = K.function([model.layers[0].input, K.learning_phase()],
                           [model.layers[which].output])
        # output in train mode = 1
        layer_output = l_out([xs, 1])[0]
        return layer_output

    model0 = dropout_model(0.)
    model1 = dropout_model(0.99)
    xs = np.ones((3, 10))

    print("no drop\n", get_layer_output(model0, xs))
    print("\nmax drop\n", get_layer_output(model1, xs))
    model1.layers[-1].rate = 0.
    print("\nchanged max drop to zero\n", model1.layers[-1].get_config())
    print(get_layer_output(model1, xs))
    model1_clone = clone_model(model1)
    print("\n clone changed model\n", get_layer_output(model1_clone, xs))
コード例 #24
0
ファイル: dropout_model.py プロジェクト: thfuchs/tsRNN
def dropout_model(model, dropout):
    """
    Create a keras function to predict with dropout
    Credits to https://github.com/keras-team/keras/issues/8826 and to 
    sfblake: https://medium.com/hal24k-techblog/how-to-generate-neural-network-
    confidence-intervals-with-keras-e4c0b78ebbdf
    
    model : keras model
    dropout : fraction dropout to apply to all layers
    
    Returns
    model_new : model with updated dropout rate
    """

    # 1. Use keras.models.clone_model
    model_new = clone_model(model)

    # 2. change dropout rate
    for layer in model_new.layers:
        if isinstance(layer, Dropout):
            layer.rate = dropout

    # 3. Compile the model
    # model_new.compile(optimizer="Adam", loss="mse")

    # 4. set_weights of cloned model with get_weights
    model_new.set_weights(model.get_weights())

    return model_new
コード例 #25
0
ファイル: callbacks.py プロジェクト: muhi-zatar/PV-monitor
    def on_epoch_end(self, epoch, logs=None):
        self.current_epoch += 1
        if (epoch + 1) % self.eval_period == 0:
            results = evaluate_model(self.model, self.data_generator,
                                     self.hparams)
            f1_eval = {}

            f1_eval["sum"] = sum([
                results[x + "_f1"] for x in self.data_generator.selected_tasks
            ])
            f1_eval["geo"] = reduce(lambda x, y: x * y, [
                results[x + "_f1"] for x in self.data_generator.selected_tasks
            ])

            for k in f1_eval:
                if f1_eval[k] > self.best_models[k]["score"]:
                    print(
                        "\n",
                        "*** New Best {} {} Evaluation: ".format(k, self.name),
                        results)
                    self.best_models[k]["results"] = results
                    self.best_models[k]["score"] = f1_eval[k]
                    self.best_models[k]["model"] = clone_model(self.model)
                    self.best_models[k]["model"].set_weights(
                        self.model.get_weights())
                    self.best_models[k]["epoch"] = self.current_epoch
コード例 #26
0
ファイル: tf.py プロジェクト: uschmidt83/CSBDeep
    def export_to_dir(dirname):
        if len(model.inputs) > 1 or len(model.outputs) > 1:
            warnings.warn('Found multiple input or output layers.')

        def _export(model):
            if IS_TF_1:
                from tensorflow import saved_model
                from keras.backend import get_session
            else:
                from tensorflow.compat.v1 import saved_model
                from tensorflow.compat.v1.keras.backend import get_session

            builder = saved_model.builder.SavedModelBuilder(dirname)
            # use name 'input'/'output' if there's just a single input/output layer
            inputs  = dict(zip(model.input_names,model.inputs))   if len(model.inputs)  > 1 else dict(input=model.input)
            outputs = dict(zip(model.output_names,model.outputs)) if len(model.outputs) > 1 else dict(output=model.output)
            signature = saved_model.signature_def_utils.predict_signature_def(inputs=inputs, outputs=outputs)
            signature_def_map = { saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature }
            builder.add_meta_graph_and_variables(get_session(), [saved_model.tag_constants.SERVING], signature_def_map=signature_def_map)
            builder.save()

        if IS_TF_1:
            _export(model)
        else:
            from tensorflow.keras.models import clone_model
            weights = model.get_weights()
            with tf.Graph().as_default():
                # clone model in new graph and set weights
                _model = clone_model(model)
                _model.set_weights(weights)
                _export(_model)

        if meta is not None and len(meta) > 0:
            save_json(meta, os.path.join(dirname,'meta.json'))
コード例 #27
0
ファイル: model.py プロジェクト: huynhngoc/deoxys
    def _gradient_backprop_eager(self,
                                 grad_fn,
                                 layer_name,
                                 images,
                                 mode='max',
                                 output_index=0,
                                 loss_fn=None):
        # save current weight
        weights = self.model.get_weights()

        new_model = clone_model(self.model)
        # Apply weights
        new_model.set_weights(weights)

        for layer in new_model.layers:
            if 'activation' in layer.get_config():
                if 'relu' in layer.activation.__name__:
                    layer.activation = grad_fn

        guided_model = KerasModel(new_model.inputs,
                                  new_model.get_layer(layer_name).output)

        img_tensor = tf.Variable(tf.cast(images, K.floatx()))
        with tf.GradientTape() as tape:
            tape.watch(img_tensor)
            output = guided_model(img_tensor)

            loss = self._get_backprop_loss(output, mode, output_index, loss_fn)

        grads = tape.gradient(loss, img_tensor)

        del guided_model
        del new_model

        return grads.numpy()
コード例 #28
0
 def copy_model(self):
     """
     Returns a copy of the networks model compiled.
     """
     model_copy = clone_model(self.model)
     self.compile_model(model_copy)
     model_copy.set_weights(self.model.get_weights())
     return model_copy
コード例 #29
0
def copy_model(model, optimiser, set_weights, learning_rate=0.001):
    model_copy = clone_model(model)
    opt = Adam(learning_rate=learning_rate)

    model_copy.compile(loss=cross_entropy, optimizer=opt, metrics=["acc"])
    if set_weights:
        model_copy.set_weights(model.get_weights())
    return model_copy
コード例 #30
0
 def create_model_clone(self, model):
     model_copy = clone_model(model)
     model_copy.build(
         model.layers[0].input_shape
     )  # replace 10 with number of variables in input layer
     model_copy.compile(optimizer=optimizer, loss=loss, metrics=['mae'])
     model_copy.set_weights(model.get_weights())
     return model_copy