def testLinearSolverConstruction(self, x1, y1, x2, y2, a, b):
     """Verifies that a TFLinearEquationSolver is constructed correctly."""
     solver = clustering_centroids.TFLinearEquationSolver(
         float(x1), float(y1), float(x2), float(y2))
     solver_a = solver.a
     self.assertAlmostEqual(K.batch_get_value([solver_a])[0], a)
     self.assertAlmostEqual(K.batch_get_value([solver.b])[0], b)
Esempio n. 2
0
 def F(inputs):
     self.count += 1
     R = fast_train_function(inputs)
     if self.count % self.k == 0:
         K.batch_get_value(slow_updates)
         K.batch_get_value(copy_updates)
     return R
Esempio n. 3
0
 def apply_ema_weights(self):
     """
     store origin model weights, then apply the ema_weights
     to model
     """
     self.old_weights = K.batch_get_value(self.model.weights)
     ema_weights = K.batch_get_value(self.ema_weights)
     K.batch_set_value(zip(self.model.weights, ema_weights))
def create_generator(config):
    dataset = create_dataset(config)
    iter = dataset.make_one_shot_iterator()
    batch = iter.get_next()

    while True:
        yield K.batch_get_value(batch)
Esempio n. 5
0
def save_optimizer_weights(model, filepath, epoch, batch):
    symbolic_weights = getattr(model.optimizer, 'weights')
    if symbolic_weights is None:
        return
    weight_values = K.batch_get_value(symbolic_weights)
    with open(filepath + '_optimizer.pkl', 'wb') as f:
        pickle.dump((weight_values, epoch, batch), f)
 def testCDFValues(self, weights, point, probability):
     """Verifies that TFCumulativeDistributionFunction yields the expected output for the inputs provided."""
     cdf_calc = clustering_centroids.TFCumulativeDistributionFunction(
         weights)
     self.assertAlmostEqual(
         probability,
         K.batch_get_value([cdf_calc.get_cdf_value(point)])[0])
 def testLinearSolverSolveForY(self, x1, y1, x2, y2, x, y):
   solver = clustering_centroids.TFLinearEquationSolver(float(x1),
                                                        float(y1),
                                                        float(x2),
                                                        float(y2))
   for_y = solver.solve_for_y(x)
   self.assertAlmostEqual(K.batch_get_value([for_y])[0], y)
def save_opt_weights(model, filepath):
    with h5py.File(filepath, 'w') as f:
        # Save optimizer weights.
        symbolic_weights = getattr(model.optimizer, 'weights')
        if symbolic_weights:
            optimizer_weights_group = f.create_group('optimizer_weights')
            weight_values = K.batch_get_value(symbolic_weights)
            weight_names = []
            for i, (w, val) in enumerate(zip(symbolic_weights, weight_values)):
                # Default values of symbolic_weights is /variable for theano
                if K.backend() == 'theano':
                    if hasattr(w, 'name') and w.name != "/variable":
                        name = str(w.name)
                    else:
                        name = 'param_' + str(i)
                else:
                    if hasattr(w, 'name') and w.name:
                        name = str(w.name)
                    else:
                        name = 'param_' + str(i)
                weight_names.append(name.encode('utf8'))
            optimizer_weights_group.attrs['weight_names'] = weight_names
            for name, val in zip(weight_names, weight_values):
                param_dset = optimizer_weights_group.create_dataset(
                    name, val.shape, dtype=val.dtype)
                if not val.shape:
                    # scalar
                    param_dset[()] = val
                else:
                    param_dset[:] = val
Esempio n. 9
0
 def testClusterCentroids(self, weights, number_of_clusters, centroids):
   dbci = clustering_centroids.DensityBasedCentroidsInitialisation(
       weights,
       number_of_clusters
   )
   calc_centroids = K.batch_get_value([dbci.get_cluster_centroids()])[0]
   self.assertSequenceAlmostEqual(centroids, calc_centroids, places=4)
Esempio n. 10
0
        def save_integrated_agent(self, vissim_working_directory, model_name, agent_type, Session_ID, episode):

                if self.type == 'AC':
                        Weights_Filename = os.path.join(vissim_working_directory, model_name, "Agents_Results", agent_type, Session_ID,"Agent{}".format(self.ID),'Episode'+ str(episode) +'Agent'+str(self.ID)+'_Weights'+'.h5')
                        Optimizer_Filename = os.path.join(vissim_working_directory, model_name, "Agents_Results", agent_type, Session_ID,"Agent{}".format(self.ID),'Episode'+ str(episode) +'Agent'+str(self.ID)+'_Optimizer'+'.h5')
                        print('Saving architecture, weights, optimizer state for agent-{}'.format(self.ID))

                        symbolic_weights = getattr(self.model.optimizer, 'weights')
                        weight_values = K.batch_get_value(symbolic_weights)
                        with open(Optimizer_Filename, 'wb') as f:
                                pickle.dump(weight_values, f)

                        # little change to save weight instead of the all agent
                        self.model.save_weights(Weights_Filename)

                else :
                        folder =  os.path.join(vissim_working_directory, model_name, "Agents_Results", agent_type, Session_ID, "Agent{}".format(self.ID))
                        if not os.path.exists(folder):
                                os.makedirs(folder)
                        Filename = os.path.join(folder,'Episode'+ str(episode) +'Agent'+str(self.ID)+'.h5')
                        print('Saving architecture, weights and optimizer state for agent-{}'.format(self.ID))
                        self.model.save(Filename)

                Memory_Filename = os.path.join(vissim_working_directory, model_name, "Agents_Results", agent_type, Session_ID,"Agent{}".format(self.ID),'Episode'+ str(episode) +'Agent'+str(self.ID)+'_Memory'+'.p')
                print('Dumping agent-{} memory into pickle file'.format(self.ID))
                pickle.dump(self.memory, open(Memory_Filename, 'wb'))
                Training_Progress_Filename = os.path.join(vissim_working_directory, model_name, "Agents_Results", agent_type, Session_ID,"Agent{}".format(self.ID),'Episode'+ str(episode) +'Agent'+str(self.ID)+'_Train'+'.p')
                print('Dumping Training Results into pickle file.')
                pickle.dump(self.reward_storage, open(Training_Progress_Filename, 'wb'))
                Loss_Filename = os.path.join(vissim_working_directory, model_name, "Agents_Results", agent_type, Session_ID,"Agent{}".format(self.ID),'Episode'+ str(episode) +'Agent'+str(self.ID)+'_Loss'+'.p')
                print('Dumping Loss Results into pickle file.')
                pickle.dump(self.loss, open(Loss_Filename, 'wb'))
Esempio n. 11
0
        def best_agent(self, vissim_working_directory, model_name, agent_type, Session_ID):

                # Chech if suitable folder exists
                folder =  os.path.join(vissim_working_directory, model_name, "Agents_Results", agent_type, Session_ID)
                if not os.path.exists(folder):
                        os.makedirs(folder)

                if self.average_reward >= np.max(self.reward_storage):
                        
                        best_agent_memory = self.memory
                        print('Saving architecture, weights, optimizer state for best agent-{}'.format(self.ID))
                        if self.type == 'AC' :
                                best_agent_weights = self.model.get_weights()
                                Weights_Filename = os.path.join(vissim_working_directory, model_name, "Agents_Results", agent_type, Session_ID,'BestAgent'+str(self.ID)+'_Weights'+'.h5')
                                Optimizer_Filename = os.path.join(vissim_working_directory, model_name, "Agents_Results", agent_type, Session_ID,'BestAgent'+str(self.ID)+'_Optimizer'+'.h5')
                        
                                symbolic_weights = getattr(self.model.optimizer, 'weights')
                                weight_values = K.batch_get_value(symbolic_weights)
                                with open(Optimizer_Filename, 'wb') as f:
                                        pickle.dump(weight_values, f)

                                self.model.save_weights(Weights_Filename)
                        else : 
                                best_agent_weights = self.model
                                Filename = os.path.join(vissim_working_directory, model_name, "Agents_Results", agent_type, Session_ID,'BestAgent'+str(self.ID)+'.h5')
                                self.model.save(Filename)

                        Memory_Filename = os.path.join(vissim_working_directory, model_name, "Agents_Results", agent_type, Session_ID,'BestAgent'+str(self.ID)+'_Memory'+'.p')
                        pickle.dump(best_agent_memory, open(Memory_Filename, 'wb'))
                        #print("New best agent found. Saved in {}".format(Memory_Filename))
                        Training_Progress_Filename = os.path.join(vissim_working_directory, model_name, "Agents_Results", agent_type, Session_ID,'Agent'+str(self.ID)+'_Train'+'.p')
                        #print('Dumping Training Results into pickle file.')
                        Loss_Filename = os.path.join(vissim_working_directory, model_name, "Agents_Results", agent_type, Session_ID,'Agent'+str(self.ID)+'_Loss'+'.p')
                        #print('Dumping Loss Results into pickle file.')
                        pickle.dump(self.loss, open(Loss_Filename, 'wb'))
Esempio n. 12
0
    def on_epoch_end(self, epoch, logs=None):
        # if epoch < 50: return # 评估较慢,前50个epoch先不进行评估

        acc, f1, final = self.evaluate()
        print(f'acc={acc},f1={f1},final={final}')
        self.metrics.append((epoch, acc, f1, final))
        json.dump(self.metrics, open('train.log', 'w'), indent=4)
        if final > self.best:
            self.best = final
            self.model.save_weights('best_model.weights')
        print('learning rate: %s' % (tk.eval(self.model.optimizer.lr)))
        print('acc: %.4f, f1: %.4f, final: %.4f, best final: %.4f\n' %
              (acc, f1, final, self.best))

        if epoch + 1 == 30 or (self.stage == 0 and epoch > 15 and
                               (final < 0.5 or np.argmax(self.metrics, 0)[3] <
                                len(self.metrics) - 5)):
            """达到30个epoch,或者final开始下降到0.5以下(开始发散),
            或者连续5个epoch都没提升,就降低学习率。
            """
            self.stage = 1
            self.model.load_weights('best_model.weights')

            tk.set_value(self.model.optimizer.lr, 1e-4)
            tk.set_value(self.model.optimizer.iterations, 0)
            opt_weights = tk.batch_get_value(self.model.optimizer.weights)
            opt_weights = [w * 0. for w in opt_weights]
            tk.batch_set_value(zip(self.model.optimizer.weights, opt_weights))
Esempio n. 13
0
    def on_epoch_end(self, epoch, logs=None):
        train_loss = round(logs.get('loss'), 4)
        val_loss = round(logs.get('val_loss'), 4)

        # Save model as h5 file
        model_filepath = f'{self.model_dir}/model-{epoch + 1}-{train_loss}-{val_loss}.h5'
        save_model(self.model, model_filepath)

        # Save model weights as h5 file
        weights_filepath = f'{self.model_dir}/model-weights-{epoch + 1}-{train_loss}-{val_loss}.h5'
        self.model.save_weights(weights_filepath)

        # Save optimizer state as pickle file
        optimizer_weights_filepath = f'{self.model_dir}/optimizer-weights-{epoch + 1}-{train_loss}-{val_loss}.pkl'
        with open(optimizer_weights_filepath, 'wb') as f:
            pickle.dump(
                K.batch_get_value(getattr(self.model.optimizer, 'weights')), f)

        # Update the checkpoint epoch
        self.checkpoint['epoch'] += 1
        if val_loss < self.best_val_loss:
            self.checkpoint['best_model'] = model_filepath
            self.best_val_loss = val_loss
        self.checkpoint['last_model'] = model_filepath
        self.checkpoint['last_weights'] = weights_filepath
        self.checkpoint['last_optimizer_weights'] = optimizer_weights_filepath
        with open(f"{self.checkpoint['checkpoint_dir']}/checkpoint.json",
                  "w") as f:
            json.dump(self.checkpoint, f, indent=4)
Esempio n. 14
0
 def initialize(self):
     """ema_weights初始化跟原模型初始化一致。
     """
     self.old_weights = K.batch_get_value(self.model.weights)
     self.mv_trainable_weights_vals = {
         x.name: K.get_value(x)
         for x in self.model.trainable_weights
     }
Esempio n. 15
0
def mnist_generator(filenames):

    dataset = mnist_dataset(filenames)
    iter = dataset.make_one_shot_iterator()
    batch = iter.get_next()

    while True:
        yield K.batch_get_value(batch)
    def _pull_values(self, ca, pulling_indices, expected_output):
        pulling_indices_np = np.array(pulling_indices)
        res_tf = ca.get_clustered_weight(pulling_indices_np)

        res_np = K.batch_get_value([res_tf])[0]
        res_np_list = res_np.tolist()

        self.assertSequenceEqual(res_np_list, expected_output)
Esempio n. 17
0
def _get_cell_weights(rnn_cell, as_tensors=True, concat_gates=True):
    """Retrieves RNN layer weights from their cell(s).
    NOTE: if CuDNNLSTM or CuDNNGRU cell, `rnn_cell` must be the layer instead,
          where non-CuDNN cell attributes are stored.
    """
    def _get_cell_info(rnn_cell):
        rnn_type = type(rnn_cell).__name__.replace('Cell', '')

        if rnn_type in ['SimpleRNN', 'IndRNN']:
            gate_names = ['']
        elif rnn_type in ['LSTM', 'CuDNNLSTM']:
            gate_names = ['i', 'f', 'c', 'o']
        elif rnn_type in ['GRU', 'CuDNNGRU']:
            gate_names = ['z', 'r', 'h']

        if ('CuDNN' in rnn_type) or rnn_cell.use_bias:
            kernel_types = ['kernel', 'recurrent_kernel', 'bias']
        else:
            kernel_types = ['kernel', 'recurrent_kernel']

        return rnn_type, gate_names, kernel_types

    rnn_type, gate_names, kernel_types = _get_cell_info(rnn_cell)

    if TF_KERAS and not concat_gates:
        print(warn_str +
              "getting weights per-gate not supported for tf.keras " +
              "implementations; fetching per concat_gates==True instead")
        concat_gates = True
    if not concat_gates and gate_names[0] == '':
        print(warn_str + rnn_type + " is not a gated RNN; fetching per " +
              "concat_gates==True instead")
        concat_gates = True

    if concat_gates:
        if as_tensors:
            return [getattr(rnn_cell, w_type) for w_type in kernel_types]
        try:
            return rnn_cell.get_weights()
        except:
            return K.batch_get_value(rnn_cell.weights)

    if 'GRU' in rnn_type:
        kernel_types = ['kernel', 'recurrent_kernel', 'input_bias']
    rnn_weights = []
    for w_type in kernel_types:
        rnn_weights.append([])
        for g_name in gate_names:
            rnn_weights[-1].append(getattr(rnn_cell, w_type + '_' + g_name))

    if as_tensors:
        return rnn_weights
    else:
        for weight_idx in range(len(rnn_weights)):
            for gate_idx in range(len(rnn_weights[weight_idx])):
                rnn_weights[weight_idx][gate_idx] = K.eval(
                    rnn_weights[weight_idx][gate_idx])
        return rnn_weights
 def testLinearSolverSolveForX(self, x1, y1, x2, y2, x, y):
     """
 Verifies that TFLinearEquationSolver solves the given equations correctly
 for X.
 """
     solver = clustering_centroids.TFLinearEquationSolver(
         float(x1), float(y1), float(x2), float(y2))
     for_x = solver.solve_for_x(y)
     self.assertAlmostEqual(K.batch_get_value([for_x])[0], x)
 def testRandomClusterCentroidsWithSparsityPreservation(
         self, weights, number_of_clusters):
     dbci = clustering_centroids.RandomCentroidsInitialisation(
         weights, number_of_clusters, True)
     calc_centroids = K.batch_get_value([dbci.get_cluster_centroids()])[0]
     self.assertContainsSubset(
         [0.],
         calc_centroids,
         msg="The centroids must include the zero-point cluster")
 def _save_model(self, epoch, logs):
     # Save the model with super
     super(ModelCheckpointWorkAround, self)._save_model(epoch, logs)
     if self.save_optimizer:
         # Save the optimizer
         folder = os.path.dirname(self._get_file_path(epoch, logs))
         symbolic_weights = getattr(self.model.optimizer, 'weights')
         weight_values = K.batch_get_value(symbolic_weights)
         with open(os.path.join(folder, 'optimizer.pkl'), 'wb') as f:
             pkl.dump(weight_values, f)
Esempio n. 21
0
 def set_model(self, model):
     """绑定模型,并初始化参数
     """
     super(ExponentialMovingAverage, self).set_model(model)
     self.ema_weights = [K.zeros(K.shape(w)) for w in model.weights]
     self.old_weights = K.batch_get_value(model.weights)
     K.batch_set_value(zip(self.ema_weights, self.old_weights))
     self.updates = []
     for w1, w2 in zip(self.ema_weights, model.weights):
         op = K.moving_average_update(w1, w2, self.momentum)
         self.updates.append(op)
Esempio n. 22
0
 def set_kernal(self, kernal_values):
     params = self.kernel
     if not params:
         return
     weight_value_tuples = []
     param_values = K.batch_get_value(params)
     if param_values.shape != kernal_values.shape:
         raise ValueError('Layer kernel shape ' +
                          str(param_values.shape) +
                          ' not compatible with '
                          'provided kernel shape ' + str(kernal_values.shape))
     weight_value_tuples.append((params, kernal_values))
     K.batch_set_value(weight_value_tuples)
Esempio n. 23
0
 def set_bias(self, kernal_bias):
     if not self.use_bias:
         return
     params = self.bias
     if not params:
         return
     weight_value_tuples = []
     param_values = K.batch_get_value(params)
     if param_values.shape != kernal_bias.shape:
         raise ValueError('Layer bias shape ' +
                          str(param_values.shape) +
                          ' not compatible with '
                          'provided bias shape ' + str(kernal_bias.shape))
     weight_value_tuples.append((params, kernal_bias))
     K.batch_set_value(weight_value_tuples)
Esempio n. 24
0
def save_weights(cfg, submodel_settings, mdl, ticker_name=''):
    print(f"model> trying to save weights ...")
    pth_submodel = pathlib.Path(
        f"{cfg.model.base_dir}/{submodel_settings.id}/{ticker_name}")
    f_model_weights = pth_submodel.joinpath(cfg.model.model_weights_file_name)
    f_optimizer_weights = pth_submodel.joinpath(
        cfg.model.optimizer_weights_file_name)
    mkdirs(pth_submodel)
    mdl.save_weights(os.fspath(f_model_weights))
    print(f"model> saved model weights to '{f_model_weights.resolve()}'")
    with open(f_optimizer_weights.resolve(), 'wb') as f:
        pickle.dump(K.batch_get_value(getattr(mdl.optimizer, 'weights')), f)
        print(
            f"model> saved optimizer weights to '{f_optimizer_weights.resolve()}'"
        )
Esempio n. 25
0
def get_weight_grad(model, x, y, sample_weight=None, learning_phase=0):
    def _process_input_data(x, y, sample_weight, model):
        iterator = data_adapter.single_batch_iterator(
            model.distribute_strategy, x, y, sample_weight, class_weight=None)
        data = next(iterator)
        data = data_adapter.expand_1d(data)
        x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)
        return x, y, sample_weight

    def _clip_scale_grads(strategy, tape, optimizer, loss, params):
        with tape:
            if isinstance(optimizer, lso.LossScaleOptimizer):
                loss = optimizer.get_scaled_loss(loss)

        gradients = tape.gradient(loss, params)

        aggregate_grads_outside_optimizer = (
            optimizer._HAS_AGGREGATE_GRAD and not isinstance(
                strategy.extended,
                parameter_server_strategy.ParameterServerStrategyExtended))

        if aggregate_grads_outside_optimizer:
            gradients = optimizer._aggregate_gradients(zip(gradients, params))
        if isinstance(optimizer, lso.LossScaleOptimizer):
            gradients = optimizer.get_unscaled_gradients(gradients)

        gradients = optimizer._clip_gradients(gradients)
        return gradients

    x, y, sample_weight = _process_input_data(x, y, sample_weight, model)

    with tf.GradientTape() as tape:
        y_pred = model(x, training=bool(learning_phase))
        loss = tf.keras.losses.CategoricalCrossentropy()(y, y_pred)

        # loss = model.compiled_loss(y, y_pred, sample_weight,
        #                            regularization_losses=model.losses)

    gradients = _clip_scale_grads(model.distribute_strategy, tape,
                                  model.optimizer, loss,
                                  model.trainable_weights)
    gradients = K.batch_get_value(gradients)
    return gradients
Esempio n. 26
0
    def train(self, env_name, batch_size=500, policy_epsilon=0.2):
        """Train a model"""
        # initiate training loop

        train_vars = {
            'batch_size': batch_size,
            'policy_epsilon': policy_epsilon
        }

        timestr = time.strftime("%Y%m%d-%H%M%S") + "_" + str(env_name)
        tensorboard = TensorBoard(log_dir='./Graph/{}'.format(timestr),
                                  histogram_freq=0,
                                  write_graph=True,
                                  write_images=False)
        self.dqn.fit(self.env,
                     nb_max_start_steps=nb_max_start_steps,
                     nb_steps=nb_steps,
                     visualize=False,
                     verbose=2,
                     start_step_policy=self.start_step_policy,
                     callbacks=[tensorboard])

        self.policy.eps = policy_epsilon

        self.dqn.save_weights("dqn_{}_model.h5".format(env_name),
                              overwrite=True)

        # Save memory
        pickle.dump(self.dqn.memory,
                    open("train_memory_{}.p".format(env_name), "wb"))

        # Save optimizer weights
        symbolic_weights = getattr(self.dqn.trainable_model.optimizer,
                                   'weights')
        optim_weight_values = K.batch_get_value(symbolic_weights)
        pickle.dump(optim_weight_values,
                    open('optimizer_weights_{}.p'.format(env_name), "wb"))

        # # Dump dqn
        # pickle.dump(self.dqn, open( "dqn_{}.p".format(env_name), "wb" ))

        # Finally, evaluate our algorithm for 5 episodes.
        self.dqn.test(self.env, nb_episodes=5, visualize=False)
Esempio n. 27
0
    def save_weights_as_checkpoint(self, filename, mapping=None):
        """根据mapping将权重保存为checkpoint格式
        """
        mapping = mapping or self.variable_mapping()
        mapping = {self.prefixed(k): v for k, v in mapping.items()}
        mapping = {k: v for k, v in mapping.items() if k in self.layers}

        with tf.Graph().as_default():
            all_variables, all_values = [], []
            for layer, variables in mapping.items():
                layer = self.layers[layer]
                values = K.batch_get_value(layer.trainable_weights)
                for name, value in zip(variables, values):
                    variable, value = self.create_variable(name, value)
                    all_variables.append(variable)
                    all_values.append(value)
            with tf.Session() as sess:
                K.batch_set_value(zip(all_variables, all_values))
                saver = tf.train.Saver()
                saver.save(sess, filename)
 def saveOptimizer(self, optimizer, fname):
     f = h5py.File(fname, mode='w')
     optimizer_weights_group = f.create_group('optimizer_weights')
     symbolic_weights = getattr(optimizer, 'weights')
     weight_values = K.batch_get_value(symbolic_weights)
     weight_names = []
     for w, val in zip(symbolic_weights, weight_values):
         name = str(w.name)
         weight_names.append(name.encode('utf8'))
     optimizer_weights_group.attrs['weight_names'] = weight_names
     for name, val in zip(weight_names, weight_values):
         param_dset = optimizer_weights_group.create_dataset(
             name, val.shape, dtype=val.dtype)
         if not val.shape:
             # scalar
             param_dset[()] = val
         else:
             param_dset[:] = val
     f.flush()
     f.close()
    def __init__(self, layer):
        '''
        # Arguments
            layer: an instance of Convolution2D layer, whose configuration 
                   will be used to initiate DConvolution2D(input_shape, 
                   output_shape, weights)
        '''
        self.layer = layer

        #weights = layer.get_weights()
        weights = K.batch_get_value(layer.weights)  # WORK-AROUND FOR TF BUG

        W = weights[0]
        b = weights[1]
        nb_up_filter = W.shape[3]
        nb_up_row = W.shape[0]
        nb_up_col = W.shape[1]
        input = Input(shape=layer.input_shape[1:])
        output = Conv2D(filters=nb_up_filter,
                        kernel_size=(nb_up_row, nb_up_col),
                        padding='same',
                        weights=[W, b])(input)
        self.up_func = K.function([input, K.learning_phase()], output)

        # Flip W horizontally and vertically,
        # and set down_func for DConvolution2D
        # For TF2.2 W=(kernel_row,kernel_col,in_channels,out_channels)
        W = np.transpose(W, (0, 1, 3, 2))  #swap in/out channels
        W = W[::-1, ::-1, :, :]  # reverse elements in kernel

        nb_down_filter = W.shape[3]
        nb_down_row = W.shape[0]
        nb_down_col = W.shape[1]
        b = np.zeros(nb_down_filter)
        input = Input(shape=layer.output_shape[1:])
        output = Conv2D(filters=nb_down_filter,
                        kernel_size=(nb_down_row, nb_down_col),
                        padding='same',
                        weights=[W, b])(input)
        self.down_func = K.function([input, K.learning_phase()], output)
Esempio n. 30
0
def modify_set_weights(new_model, weights):
    if len(new_model.weights) != len(weights):
        raise ValueError('You called `set_weights(weights)` on layer "' +
                         new_model.name + '" with a  weight list of length ' +
                         str(len(weights)) + ', but the layer was expecting ' +
                         str(len(new_model.weights)) +
                         ' weights. Provided weights: ' + str(weights)[:50] +
                         '...')
    if not new_model.weights:
        return
    weight_value_tuples = []
    param_values = backend.batch_get_value(new_model.weights)
    r = 0
    l = len(new_model.weights)
    for pv, p, w in zip(param_values, new_model.weights, weights):
        if r == l - 2:  # 手动调整最后两层的weight的赋值
            new_18 = w
            old_18 = p
            # layer_18 = pv
            # weight_value_tuples.append((w, p))
        elif r == l - 1:
            new_19 = w
            old_19 = p
            # layer_19 = pv
            weight_value_tuples.append((old_18, new_19))
            weight_value_tuples.append((old_19, new_18))
            break
        elif pv.shape != w.shape:
            raise ValueError('Layer weight shape ' + str(pv.shape) +
                             ' not compatible with '
                             'provided weight shape ' + str(w.shape))
        else:
            weight_value_tuples.append((p, w))
        r += 1
    backend.batch_set_value(weight_value_tuples)
    return new_model