Example #1
0
def initialize_uninitialized(sess):
    global_vars = tf.global_variables()
    #for i in global_vars:
    #   print(i)
    is_not_initialized = sess.run(
        [tf.is_variable_initialized(var) for var in global_vars])
    not_initialized_vars = [
        v for (v, f) in zip(global_vars, is_not_initialized) if not f
    ]

    if len(not_initialized_vars):
        sess.run(tf.variables_initializer(not_initialized_vars))
def initialize_uninitialized(sess):
    global_vars = tf.global_variables()
    is_not_initialized = sess.run([tf.is_variable_initialized(var) for var in global_vars])
    not_initialized_vars = [v for (v, f) in zip(global_vars, is_not_initialized) if not f]

    # for i in not_initialized_vars: # only for testing
    #    print(i.name)

    if len(not_initialized_vars):
        sess.run(tf.variables_initializer(not_initialized_vars))

    return
  def collect_variables(self, vs):
    """Collects model variables.

    Args:
      vs: Tensorflow variables.

    Populates self.var_list with model variables and self.init_op with
    variables' initializer. This function is only called once with __call__.
    """
    # All variables.
    self.var_list = vs
    self.init_op = tf.variables_initializer(var_list=self.var_list)
    def start_interaction(self, env_fns, dynamics, nlump=2):
        self.loss_names, self._losses = zip(*list(self.to_report.items()))

        params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
        if MPI.COMM_WORLD.Get_size() > 1:
            trainer = MpiAdamOptimizer(learning_rate=self.ph_lr,
                                       comm=MPI.COMM_WORLD)
        else:
            trainer = tf.train.AdamOptimizer(learning_rate=self.ph_lr)
        gradsandvars = trainer.compute_gradients(self.total_loss, params)
        self._train = trainer.apply_gradients(gradsandvars)

        if MPI.COMM_WORLD.Get_rank() == 0:
            getsess().run(
                tf.variables_initializer(
                    tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)))
        bcast_tf_vars_from_root(
            getsess(), tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES))

        self.all_visited_rooms = []
        self.all_scores = []
        self.nenvs = nenvs = len(env_fns)
        self.nlump = nlump
        self.lump_stride = nenvs // self.nlump
        self.envs = [
            VecEnv(env_fns[l * self.lump_stride:(l + 1) * self.lump_stride],
                   spaces=[self.ob_space, self.ac_space])
            for l in range(self.nlump)
        ]

        self.rollout = Rollout(ob_space=self.ob_space,
                               ac_space=self.ac_space,
                               nenvs=nenvs,
                               nsteps_per_seg=self.nsteps_per_seg,
                               nsegs_per_env=self.nsegs_per_env,
                               nlumps=self.nlump,
                               envs=self.envs,
                               policy=self.stochpol,
                               int_rew_coeff=self.int_coeff,
                               ext_rew_coeff=self.ext_coeff,
                               record_rollouts=self.use_recorder,
                               dynamics=dynamics)

        self.buf_advs = np.zeros((nenvs, self.rollout.nsteps), np.float32)
        self.buf_rets = np.zeros((nenvs, self.rollout.nsteps), np.float32)

        if self.normrew:
            self.rff = RewardForwardFilter(self.gamma)
            self.rff_rms = RunningMeanStd()

        self.step_count = 0
        self.t_last_update = time.time()
        self.t_start = time.time()
    def collect_variables(self):
        """Collects model variables.

    Populates self.var_list with model variables and self.init_op with
    variables' initializer. This function is only called once with __call__.

    """
        self.var_list = [
            v for v in tf.global_variables()
            if "classification_network" in v.name
        ]
        self.init_op = tf.variables_initializer(var_list=self.var_list)
Example #6
0
  def collect_variables(self):
    """Collects model variables.

    Populates variable lists with model variables and self.init_op with
    variables' initializer. This function is only called once with __call__.
    """
    self.var_list_classification += self.dram_cell.var_list_classification
    self.var_list_location += self.dram_cell.var_list_location
    self.var_list = (
        self.var_list_classification + self.var_list_location)

    self.init_op = tf.variables_initializer(var_list=self.var_list)
Example #7
0
    def __init__(self, game):
        self.nnet = onnet(game, args)
        self.board_x, self.board_y = game.getBoardSize()
        self.action_size = game.getActionSize()

        self.sess = tf.Session(graph=self.nnet.graph)
        self.saver = None
        with tf.Session() as temp_sess:
            temp_sess.run(tf.global_variables_initializer())
        self.sess.run(
            tf.variables_initializer(
                self.nnet.graph.get_collection('variables')))
Example #8
0
def get_opt_reinit_op(opt, var_list, global_step):
    opt_slots = [
        opt.get_slot(var, name) for name in opt.get_slot_names()
        for var in var_list
    ]
    if isinstance(opt, tf.train.AdamOptimizer):
        beta_powers = opt._get_beta_accumulators()
        opt_slots.extend(
            beta_powers
        )  #[opt._beta1_power, opt._beta2_power])  #pylint: disable = W0212
    all_opt_variables = opt_slots + var_list + [global_step]
    opt_reinit_op = tf.variables_initializer(all_opt_variables)
    return opt_reinit_op
Example #9
0
    def fit(self, X):
        self.n_visible_units = X.shape[1]

        # Initialize RBM parameters
        self._build_model()

        sess.run(tf.variables_initializer([self.W, self.c, self.b]))

        if self.optimization_algorithm == 'sgd':
            self._stochastic_gradient_descent(X)
        else:
            raise ValueError("Invalid optimization algorithm.")
        return
Example #10
0
def create_reset_metric(metric, variable_scope, **metric_args):
    """
    Creates tensors of a metric (e.g., running mean), its update, and reset operation.
    :param metric: The metric and its tensors to create.
    :param variable_scope: The variable scope which is needed to create the reset operation.
    :param metric_args: The args used for generating the metric.
    :return: Tensors of the metric, its update, and reset operation.
    """
    with tf.variable_scope(variable_scope):
        metric_op, update_op = metric(**metric_args)
        vars = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES, variable_scope)
        reset_op = tf.variables_initializer(vars)
    return metric_op, update_op, reset_op
    def create_optimizer(self):
        self.learning_rate = tf.train.exponential_decay(self.lr_init,
                                                        self.global_step,
                                                        self.n_epoch,
                                                        self.lr_decay,
                                                        name="learning_rate")
        optimizer = tf.train.AdagradOptimizer(self.learning_rate)
        gradients, variables = zip(*optimizer.compute_gradients(self.loss))
        gradients, _ = tf.clip_by_global_norm(gradients, self.clip_norm)
        self.train_step = optimizer.apply_gradients(zip(gradients, variables),
                                                    name="train_step")

        self.reset_optimizer_op = tf.variables_initializer(
            optimizer.variables(), name="reset_optimizer")
Example #12
0
    def load_ckpt(self):
        """Load a model checkpoint
        In train mode, load the latest checkpoint from the checkpoint folder if it exists; otherwise, run initializer.
        In other modes, load from the specified checkpoint file.
        """
        if self.mode in ['train_noval', 'train_with_val']:
            self.last_ckpt = None
            if self.opts['train_mode'] == 'fine-tune':
                # In fine-tuning mode, we just want to load the trained params from the file and that's it...
                assert(tf.train.checkpoint_exists(self.opts['ckpt_path']))
                if self.opts['verbose']:
                    print(f"Initializing from pre-trained model at {self.opts['ckpt_path']} for finetuning...\n")
                # ...however, the AdamOptimizer also stores variables in the graph, so reinitialize them as well
                self.sess.run(tf.variables_initializer(self.optim.variables()))
                # Now initialize the trained params with actual values from the checkpoint
                _saver = tf.train.Saver(var_list=tf.trainable_variables())
                _saver.restore(self.sess, self.opts['ckpt_path'])
                if self.opts['verbose']:
                    print("... model initialized")
                self.last_ckpt = self.opts['ckpt_path']
            else:
                # In training mode, we either want to start a new training session or resume from a previous checkpoint
                self.last_ckpt = self.saver.best_checkpoint(self.opts['ckpt_dir'], maximize=False)
                if self.last_ckpt is None:
                    self.last_ckpt = tf.train.latest_checkpoint(self.opts['ckpt_dir'])

                if self.last_ckpt:
                    # We're resuming a session -> initialize the graph with the content of the checkpoint
                    if self.opts['verbose']:
                        print(f"Initializing model from previous checkpoint {self.last_ckpt} to resume training...\n")
                    self.saver.restore(self.sess, self.last_ckpt)
                    if self.opts['verbose']:
                        print("... model initialized")
                else:
                    # Initialize all the variables of the graph
                    if self.opts['verbose']:
                        print(f"Initializing model with random values for initial training...\n")
                    assert (self.mode in ['train_noval', 'train_with_val'])
                    self.sess.run(tf.global_variables_initializer())
                    if self.opts['verbose']:
                        print("... model initialized")
        else:
            # Initialize the graph with the content of the checkpoint
            self.last_ckpt = self.opts['ckpt_path']
            assert(self.last_ckpt is not None)
            if self.opts['verbose']:
                print(f"Loading model checkpoint {self.last_ckpt} for eval or testing...\n")
            self.saver.restore(self.sess, self.last_ckpt)
            if self.opts['verbose']:
                print("... model loaded")
Example #13
0
    def from_dict(cls, dct_to_load):
        weights = {var_name: dct_to_load.pop(var_name) for var_name in cls._get_weight_variables_names()}
        unsupervised_dbn_dct = dct_to_load.pop('unsupervised_dbn')
        num_classes = dct_to_load.pop('num_classes')

        instance = cls(**dct_to_load)

        setattr(instance, 'unsupervised_dbn', instance.unsupervised_dbn_class.from_dict(unsupervised_dbn_dct))
        setattr(instance, 'num_classes', num_classes)

        # Initialize RBM parameters
        instance._build_model(weights)
        sess.run(tf.variables_initializer([getattr(instance, name) for name in cls._get_weight_variables_names()]))
        return instance
Example #14
0
  def update(self, num_training_epochs=10, batch_size=128, verbose=False):
    """Trains the neural net.

    Randomly sampls data from the replay buffer. An update resets the optimizer
    state.

    Args:
      num_training_epochs: An epoch represents one pass over the training data.
        The total number training iterations this corresponds to is
        num_training_epochs * len(replay_buffer)/batch_size.
      batch_size: the number of examples sampled from the replay buffer and
        used for each net training iteration.
      verbose: whether to print training metrics during training.

    Returns:
      A list of length num_training_epochs. Each element of this list is
        another list containing LossValues tuples, one for every training
        iteration.
    """
    # The AlphaZero pseudocode resets the optimizer state before training.
    optim = self.bot.evaluator.optimizer
    tf.variables_initializer(optim.variables())
    tf.train.get_or_create_global_step().assign(0)

    num_epoch_iters = math.ceil(len(self.replay_buffer) / float(batch_size))
    losses = []
    for epoch in range(num_training_epochs):
      epoch_losses = []
      for _ in range(num_epoch_iters):
        train_data = self.replay_buffer.sample(batch_size)
        epoch_losses.append(self.bot.evaluator.update(train_data))

      losses.append(epoch_losses)
      if verbose:
        self._print_mean_epoch_losses(epoch, epoch_losses)

    return losses
Example #15
0
def initialize_uninitialized_variables(sess):
    """
    Only initialize the weights that have not yet been initialized by other
    means, such as importing a metagraph and a checkpoint. It's useful when
    extending an existing model.
    """
    uninit_vars = []
    uninit_tensors = []
    for var in tf.global_variables():
        uninit_vars.append(var)
        uninit_tensors.append(tf.is_variable_initialized(var))
    uninit_bools = sess.run(uninit_tensors)
    uninit = zip(uninit_bools, uninit_vars)
    uninit = [var for init, var in uninit if not init]
    sess.run(tf.variables_initializer(uninit))
Example #16
0
    def from_dict(cls, dct_to_load):
        weights = {var_name: dct_to_load.pop(var_name) for var_name in cls._get_weight_variables_names()}

        _activation_function_class = dct_to_load.pop('_activation_function_class')
        n_visible_units = dct_to_load.pop('n_visible_units')

        instance = cls(**dct_to_load)
        setattr(instance, '_activation_function_class', _activation_function_class)
        setattr(instance, 'n_visible_units', n_visible_units)

        # Initialize RBM parameters
        instance._build_model(weights)
        sess.run(tf.variables_initializer([getattr(instance, name) for name in cls._get_weight_variables_names()]))

        return instance
Example #17
0
    def _fine_tuning(self, data, _labels):
        self.num_classes = self._determine_num_output_neurons(_labels)
        if self.num_classes == 1:
            _labels = np.expand_dims(_labels, -1)

        self._build_model()
        sess.run(tf.variables_initializer([self.W, self.b]))

        labels = self._transform_labels_to_network_format(_labels)

        if self.verbose:
            print("[START] Fine tuning step:")
        self._stochastic_gradient_descent(data, labels)
        if self.verbose:
            print("[END] Fine tuning step")
Example #18
0
    def do_optimizer_initializations(self, optimizer):
        """Initializes gradient-based TF optimizers."""

        self.optimizer_weights = [optimizer]

        opt_library = optimizer.split(':')[0]
        opt_method = optimizer.split(':')[1]

        # optimization step
        self.opt, self.optim_step = self.optimizers_options[opt_library](
            opt_method)

        self.opt_vars = self.opt.variables()
        self.reset_optimizer_op = tf.variables_initializer(self.opt_vars)
        self.dice_sess.run(self.reset_optimizer_op)
Example #19
0
def embedding(vlist, rlist, metaPath, spSize):
    vs = []
    for i in range(0, len(vlist)):
        v = tf.Variable(rlist[i], name=vlist[i].name.split('/')[0])  # x/relu
        vs.append(v)

    with tf.Session() as sess:
        tf.variables_initializer(vs).run()  # assign to vs
        saver = tf.train.Saver(vs)
        saver.save(sess, './log/model.ckpt', 0)  # only contain vs

    # get writer and config
    summary_writer = tf.summary.FileWriter('./log/')
    config = projector.ProjectorConfig()

    # set config
    for v in vs:
        embed = config.embeddings.add()
        embed.tensor_name = v.name
        embed.metadata_path = metaPath + 'meta.tsv'
        embed.sprite.image_path = metaPath + 'meta.png'
        embed.sprite.single_image_dim.extend(spSize)
    # write
    projector.visualize_embeddings(summary_writer, config)
Example #20
0
def ir2tf(imp_resp, shape, sess, dim=None, is_real=True):
    """Compute the transfer function of an impulse response (IR).
    This function makes the necessary correct zero-padding, zero
    convention, correct fft2, etc... to compute the transfer function
    of IR. To use with unitary Fourier transform for the signal (ufftn
    or equivalent).

    Parameters
    ----------
    imp_resp : ndarray
        The impulse responses.
    shape : tuple of int
        A tuple of integer corresponding to the target shape of the
        transfer function.
    dim : int, optional
        The last axis along which to compute the transform. All
        axes by default.
    is_real : boolean, optional
       If True (default), imp_resp is supposed real and the Hermitian property
       is used with rfftn Fourier transform.

    Returns
    -------
    y : complex ndarray
       The transfer function of shape ``shape``.
    """
    if not dim:
        dim = len(imp_resp.shape)
    # Zero padding and fill
    irpadded = tf.Variable(tf.zeros(shape))
    sess.run(tf.variables_initializer([irpadded]))
    sess.run(
        tf.assign(irpadded[tuple([slice(0, s) for s in imp_resp.shape])],
                  imp_resp))

    # Roll for zero convention of the fft to avoid the phase
    # problem. Work with odd and even size.
    for axis, axis_size in enumerate(imp_resp.shape):
        if axis >= len(imp_resp.shape) - dim:
            irpadded = tf.roll(
                irpadded,
                shift=-tf.cast(tf.floor(tf.cast(axis_size, tf.int32) / 2),
                               tf.int32),
                axis=axis)
    if is_real:
        return tf.signal.rfft2d(irpadded)
    else:
        return tf.fft2d(tf.cast(irpadded, tf.complex64))
def initialize_uninitilized_global_variables(sess):
    # from https://github.com/tensorflow/cleverhans/tree/master/cleverhans
    # List all global variables
    global_vars = tf.global_variables()
    # Find initialized status for all variables
    is_var_init = [tf.is_variable_initialized(var) for var in global_vars]
    is_initialized = sess.run(is_var_init)

    # List all variables that were not initialialized previously
    not_initialized_vars = [
        var for (var, init) in zip(global_vars, is_initialized) if not init
    ]

    # Initialize all uninitialized variables found, if any
    if len(not_initialized_vars):
        sess.run(tf.variables_initializer(not_initialized_vars))
def restore(path: RichPath,
            is_train: bool,
            hyper_overrides: Optional[Dict[str, Any]] = None) -> Model:
    saved_data = path.read_as_pickle()

    if hyper_overrides is not None:
        saved_data['hyperparameters'].update(hyper_overrides)

    model_class = get_model_class_from_name(saved_data['model_type'])
    model = model_class(saved_data['hyperparameters'],
                        saved_data.get('run_name'))
    model.query_metadata.update(saved_data['query_metadata'])
    for (language, language_metadata
         ) in saved_data['per_code_language_metadata'].items():
        model.per_code_language_metadata[language] = language_metadata
    model.make_model(is_train=is_train)

    variables_to_initialize = []
    with model.sess.graph.as_default():
        with tf.name_scope("restore"):
            restore_ops = []
            used_vars = set()
            for variable in sorted(model.sess.graph.get_collection(
                    tf.GraphKeys.GLOBAL_VARIABLES),
                                   key=lambda v: v.name):
                used_vars.add(variable.name)
                if variable.name in saved_data['weights']:
                    # print('Initializing %s from saved value.' % variable.name)
                    restore_ops.append(
                        variable.assign(saved_data['weights'][variable.name]))
                else:
                    print(
                        'Freshly initializing %s since no saved value was found.'
                        % variable.name)
                    variables_to_initialize.append(variable)
            for var_name in sorted(saved_data['weights']):
                if var_name not in used_vars:
                    if var_name.endswith('Adam:0') or var_name.endswith(
                            'Adam_1:0') or var_name in [
                                'beta1_power:0', 'beta2_power:0'
                            ]:
                        continue
                    print('Saved weights for %s not used by model.' % var_name)
            restore_ops.append(
                tf.variables_initializer(variables_to_initialize))
            model.sess.run(restore_ops)
    return model
Example #23
0
  def train(self, num_iterations=100, learning_rate=1.0, plot_results=True,
            optimizer=tf.train.GradientDescentOptimizer):
    with self._loss.graph.as_default():
      opt = optimizer(learning_rate)
      train_op = opt.minimize(self._loss)
      local_init_op = tf.group(
          tf.variables_initializer(opt.variables()),
          tf.local_variables_initializer())
      if self._session is None:
        self._session = tf.Session()
        with self._session.as_default():
          self._session.run(tf.global_variables_initializer())
          self._session.run(tf.tables_initializer())
          tf.train.start_queue_runners()

    with self._session.as_default():
      local_init_op.run()
      iterations = []
      metrics = self._metrics or ({},)
      metrics_vals = [collections.defaultdict(list) for _ in self._metrics]

      for i in range(num_iterations + 1):
        _, results = self._session.run((train_op, metrics))
        if (i % 10 == 0) or i == num_iterations:
          print("\r iteration %d: " % i + ", ".join(
                ["%s=%f" % (k, v) for r in results for k, v in r.items()]),
                end='')
          iterations.append(i)
          for metric_val, result in zip(metrics_vals, results):
            for k, v in result.items():
              metric_val[k].append(v)

      for k, v in self._embedding_vars.items():
        self._embeddings[k] = v.eval()

      if plot_results:
        num_subplots = len(metrics)+1
        fig = plt.figure()
        fig.set_size_inches(num_subplots*10, 8)
        for i, metric_vals in enumerate(metrics_vals):
          ax = fig.add_subplot(1, num_subplots, i+1)
          for k, v in metric_vals.items():
            ax.plot(iterations, v, label=k)
          ax.set_xlim([1, num_iterations])
          ax.legend()
      return results
Example #24
0
    def build_graph(self):
        self.build_datapipeline()

        xb, yb = self.dataset_iterator.get_next()
        logits = self.model(xb)
        self.loss = self.loss_func(yb, logits)

        self.variables = [(v, i)
                          for i, v in enumerate(tf.trainable_variables())
                          if 'dense' in v.name]
        # dont apply to last dense layer
        self.variables.pop(-1)
        self.vs = [
            tf.random.normal((v.shape.as_list()[-1], 1), mean=0., stddev=1.)
            for v, _ in self.variables
        ]

        assert len(self.variables) > 0
        # spectral norm reg
        grads = tf.gradients(self.loss, tf.trainable_variables())
        new_vs = []
        for (var, idx), v in zip(self.variables, self.vs):
            original_shape = grads[idx].shape
            W_grad = tf.reshape(grads[idx], [-1, var.shape[-1]])
            W = tf.reshape(var, [-1, var.shape[-1]])

            u = W @ v
            v = tf.transpose(W) @ u
            sigma = tf.norm(u, 2) / tf.norm(v, 2)
            reg_value = sigma * (u @ tf.transpose(v))
            W_grad += self.reg_constant * reg_value

            grads[idx] = tf.reshape(W_grad, original_shape)
            new_vs.append(v)
        self.vs = new_vs

        self.acc, self.acc_op = tf.metrics.accuracy(tf.argmax(yb, 1),
                                                    tf.argmax(logits, 1),
                                                    name='acc')
        self.acc_vars = tf.get_collection(tf.GraphKeys.LOCAL_VARIABLES,
                                          scope="acc")
        self.acc_initializer = tf.variables_initializer(var_list=self.acc_vars)

        self.train_op = self.optimizer.apply_gradients(
            zip(grads, tf.trainable_variables()))
Example #25
0
    def testCreateRegularizer_Sliced(self):
        # Call handler to create regularizer.
        handler = batch_norm_source_op_handler.BatchNormSourceOpHandler(
            _GAMMA_THRESHOLD)
        batch_norm_op_slice = orm.OpSlice(self.batch_norm_op, orm.Slice(0, 3))
        regularizer = handler.create_regularizer(batch_norm_op_slice)

        # Verify regularizer is the gamma tensor.
        with self.cached_session():
            # Initialize the gamma tensor to check value equality.
            with tf.variable_scope('', reuse=tf.AUTO_REUSE):
                gamma_tensor = tf.get_variable('conv1/BatchNorm/gamma')
            init = tf.variables_initializer([gamma_tensor])
            init.run()

            # Verify regularizer is the sliced gamma tensor.
            self.assertAllEqual(gamma_tensor.eval()[0:3],
                                regularizer._gamma.eval())
Example #26
0
    def fit(self, X):
        """
        Fit a model given data.
        :param X: array-like, shape = (n_samples, n_features)
        :return:
        """
        self.n_visible_units = X.shape[1]

        # Initialize RBM parameters
        self._build_model()

        sess.run(tf.variables_initializer([self.W, self.c, self.b]))

        if self.optimization_algorithm == 'sgd':
            self._stochastic_gradient_descent(X)
        else:
            raise ValueError("Invalid optimization algorithm.")
        return
    def update_loss(self, n_task):

        if n_task == 0:
            return

        loss = self.vars['losses'][0] if self.use_orig_loss else self.vars[
            'losses'][n_task - 1]

        penalties = []
        old_vars = self.objs[
            'fisher_old_ws'] if self.use_latest_theta_star else self.saved_wts[
                n_task - 1]
        fisher_vars = self.objs[
            'fisher_diags'] if self.use_latest_theta_star else self.saved_fishers[
                n_task - 1]
        for var, old_var, fisher in zip(self.objs['fisher_ws'], old_vars,
                                        fisher_vars):

            penalties += [
                tf.multiply(fisher, tf.square(tf.subtract(var, old_var)))
            ]

        ewc_penalty = tf.add_n(
            [tf.reduce_sum(penalty) for penalty in penalties])
        new_loss = tf.add(
            loss,
            tf.multiply(tf.constant(self.ewc_const, tf.float32), ewc_penalty))

        self.vars['loss'] = new_loss
        self.vars['losses'][n_task] = new_loss

        orig_var_list = self.vars['orig_var_list']
        # print("Trainable vars: %s" % str(orig_var_list))
        print("Trainable vars:")
        self.print_vars(orig_var_list)
        if self.reset_opt:
            print('Reset opt')
            self.objs['sess'].run(
                tf.variables_initializer(self.objs['opt'].variables()))
        op = self.objs['opt'].minimize(new_loss, var_list=orig_var_list)
        self.vars['train_op'] = op
        self.vars['train_ops'][n_task] = op

        print('Updated train_op and loss')
    def build(self, num_inputs, num_outputs, num_targets):

        # build a new graph and session in which training will take place
        self.graph = tf.Graph()
        self.sess = tf.Session(graph=self.graph)
        with self.graph.as_default():

            # variables common to all neurons
            self.targets = tf.placeholder(dtype=dtype,
                                          shape=[None, num_targets],
                                          name='targets')
            self.inputs, self.losses, self.neurons, self.train_ops, variables = [], [], [], [], []

            # group neurons based on their inputs
            for each_num_inputs in num_inputs:
                inputs = tf.placeholder(dtype=dtype,
                                        shape=[None, each_num_inputs],
                                        name='inputs')
                self.inputs.append(inputs)

                # group neurons based on their activations
                for activation in self.activations:

                    # construct prediction of neuron
                    y_pred, weights = self._build_perceptron(
                        inputs, num_outputs, activation, self.weight_init)
                    self.neurons.append((weights, activation))

                    # construct loss function
                    loss = reg_loss = self.loss_function(self.targets, y_pred)
                    self.losses.append(loss)
                    if self.regularizer is not None:
                        reg_loss = reg_loss + self.reg_penalty * self.regularizer(
                            weights[0])

                    # construct optimizer
                    sgd = self.optimizer(**self.optimizer_args)
                    self.train_ops.append(
                        sgd.minimize(reg_loss, var_list=weights))
                    variables.extend(weights + sgd.variables())

            # group training ops and initialize variables
            self.train_ops = tf.group(self.train_ops)
            self.sess.run(tf.variables_initializer(variables))
def initialize_interdependent_variables(session, vars_list, feed_dict):
    """Initialize a list of variables one at a time, which is useful if
    initialization of some variables depends on initialization of the others.
    """
    vars_left = vars_list
    while len(vars_left) > 0:
        new_vars_left = []
        for v in vars_left:
            try:
                session.run(tf.variables_initializer([v]), feed_dict)
            except tf.errors.FailedPreconditionError:
                new_vars_left.append(v)
        if len(new_vars_left) >= len(vars_left):
            # This can happen if the variables all depend on each other, or more likely if there's
            # another variable outside of the list, that still needs to be initialized. This could be
            # detected here, but life's finite.
            raise Exception("Cycle in variable dependencies, or extenrnal precondition unsatisfied.")
        else:
            vars_left = new_vars_left
def yolo_non_max_suppression(scores,
                             boxes,
                             classes,
                             max_boxes=10,
                             iou_threshold=0.5):
    max_boxes_tensor = K.variable(
        max_boxes, dtype="int32")  # 用于tf.image.non_max_suppression()
    K.get_session().run(tf.variables_initializer([max_boxes_tensor
                                                  ]))  # 初始化变量max_boxes_tensor

    # 使用使用tf.image.non_max_suppression()来获取与我们保留的框相对应的索引列表
    nms_indices = tf.image.non_max_suppression(boxes, scores, max_boxes,
                                               iou_threshold)

    scores = K.gather(scores, nms_indices)
    boxes = K.gather(boxes, nms_indices)
    classes = K.gather(classes, nms_indices)

    return scores, boxes, classes