コード例 #1
0
 def _create_lagrangian_multipliers(optimizer_dict, doo_ds):
     lag_mul = [slot_creator.create_slot(v.initialized_value(), utils.val_or_zero(der, v), 'alpha') for v, der
                in zip(optimizer_dict.state, doo_ds)]
     [tf.add_to_collection(utils.GraphKeys.LAGRANGIAN_MULTIPLIERS, lm) for lm in lag_mul]
     utils.remove_from_collection(utils.GraphKeys.GLOBAL_VARIABLES, *lag_mul)
     # this prevents the 'automatic' initialization with tf.global_variables_initializer.
     return lag_mul
コード例 #2
0
ファイル: hyper_gradients.py プロジェクト: AmirooR/FAR-HO
    def _create_lagrangian_multipliers(optimizer_dict, doo_ds):
        for v in optimizer_dict.state:
            print('LM:: {}'.format(v))
        lag_mul = []
        init_ops = []
        for v, der in zip(optimizer_dict.state, doo_ds):
            lm = slot_creator.create_slot(v.initialized_value(),
                                          utils.val_or_zero(None, v), 'alpha')
            init_op = lm.assign(utils.val_or_zero(der, v))
            lag_mul.append(lm)
            init_ops.append(init_op)
        #from IPython import embed;embed()
        #utils.val_or_zero(der, v) assign lag_mul <- utils.val_or_zero ==> op
        # merge -> return
        #

        #lag_mul = [slot_creator.create_slot(v.initialized_value(), utils.val_or_zero(None, v), 'alpha') for v, der
        #           in zip(optimizer_dict.state, doo_ds)]

        #lag_mul = [slot_creator.create_slot(tf.truncated_normal(v.shape, stddev=0.01), utils.val_or_zero(der, v), 'alpha') for v, der
        #           in zip(optimizer_dict.state, doo_ds)]

        [
            tf.add_to_collection(utils.GraphKeys.LAGRANGIAN_MULTIPLIERS, lm)
            for lm in lag_mul
        ]
        utils.remove_from_collection(utils.GraphKeys.GLOBAL_VARIABLES,
                                     *lag_mul)
        # this prevents the 'automatic' initialization with tf.global_variables_initializer.
        return lag_mul, init_ops
コード例 #3
0
ファイル: hyper_gradients.py プロジェクト: codealphago/FAR-HO
 def _create_lagrangian_multipliers(optimizer_dict, doo_ds):
     lag_mul = [slot_creator.create_slot(v, utils.val_or_zero(der, v), 'alpha') for v, der
                in zip(optimizer_dict.state, doo_ds)]
     [tf.add_to_collection(utils.GraphKeys.LAGRANGIAN_MULTIPLIERS, lm) for lm in lag_mul]
     utils.remove_from_collection(utils.GraphKeys.GLOBAL_VARIABLES, *lag_mul)
     # this prevents the 'automatic' initialization with tf.global_variables_initializer.
     return lag_mul
コード例 #4
0
 def _create_hypergradient_from_dodh(hyper, doo_dhypers):
     """
     Creates one hyper-gradient as a variable. doo_dhypers:  initialization, that is the derivative of
     the outer objective w.r.t this hyper
     """
     hgs = slot_creator.create_slot(hyper, utils.val_or_zero(doo_dhypers, hyper), 'hypergradient')
     utils.remove_from_collection(utils.GraphKeys.GLOBAL_VARIABLES, hgs)
     return hgs
コード例 #5
0
ファイル: hyper_gradients.py プロジェクト: codealphago/FAR-HO
    def _create_hypergradient(hyper, doo_dhypers):
        """
        Creates one hyper-gradient as a variable..

        :param hyper:  the relative hyperparameter
        :param doo_dhypers:  initialization, that is the derivative of the outer objective w.r.t this hyper
        :return:
        """
        hgs = slot_creator.create_slot(hyper, utils.val_or_zero(doo_dhypers, hyper), 'hypergradient')
        utils.remove_from_collection(utils.GraphKeys.GLOBAL_VARIABLES, hgs)
        return hgs