Example #1
0
    def _setup(self, inputs):
        logger.info("Setting up the queue for CPU prefetching ...")
        self.input_placehdrs = [v.build_placeholder_reuse() for v in inputs]
        assert len(self.input_placehdrs) > 0, \
            "BatchQueueInput has to be used with some InputDesc!"

        # prepare placeholders without the first dimension
        placehdrs_nobatch = []
        for p in self.input_placehdrs:
            placehdrs_nobatch.append(
                tf.placeholder(dtype=p.dtype,
                               shape=p.get_shape().as_list()[1:],
                               name=get_op_tensor_name(p.name)[0] +
                               '-nobatch'))

        # dequeue_many requires fully-defined shapes
        shape_err = "Use of BatchQueueInput requires inputs to have fully-defined "
        "shapes except for the batch dimension"
        shapes = []
        for p in placehdrs_nobatch:
            assert p.get_shape().is_fully_defined(), shape_err
            shapes.append(p.get_shape())

        with self.cached_name_scope():
            if self.queue is None:
                self.queue = tf.FIFOQueue(
                    3000, [x.dtype for x in self.input_placehdrs],
                    shapes=shapes,
                    name='input_queue')
            for shp in self.queue.shapes:
                assert shp.is_fully_defined(), shape_err

            self.thread = EnqueueThread(self.queue, self._inf_ds,
                                        placehdrs_nobatch)
    def __init__(self, K, gamma, select_name, reward_names):
        assert K == len(reward_names)
        self.K = K
        self.gamma = gamma
        self.w = np.ones(K, dtype=np.float64) / K
        self.sample_w = np.ones(K, dtype=np.float64) / K
        # local record of selected value
        self._select = self.K - 1
        self.select_name = select_name
        self._select_readable_name, self.select_var_name = get_op_tensor_name(
            select_name)
        self.reward_names = reward_names
        names = [get_op_tensor_name(name) for name in reward_names]
        self._r_readable_names, self.r_names = zip(*names)
        self._r_readable_names = list(self._r_readable_names)
        self.r_names = list(self.r_names)

        self._select = self.K - 1
        self.active = False
        self.is_first = True
    def _setup_graph(self):
        _, self.w_var_name = get_op_tensor_name(self.weight_name)

        _, l_var_names = zip(*[get_op_tensor_name(nm) \
                for nm in self.loss_names])
        self.l_var_names = list(l_var_names)

        all_vars = tf.global_variables()
        for v in all_vars:
            if v.name == self.w_var_name:
                self.weight = v
                break
        else:
            raise ValueError("{} does not exist as VAR".format(
                self.w_var_name))

        self.losses = []
        for nm in self.l_var_names:
            self.losses.append(get_op_or_tensor_by_name(nm))

        self.weight_holder = tf.placeholder(tf.float32,
                                            shape=(self.K, ),
                                            name='adaW_holder')
        self.assign_op = self.weight.assign(self.weight_holder)
    def __init__(self, K, select_name, distribution=None):
        self.K = K
        if distribution is not None:
            self.w = distribution / np.sum(distribution)
        else:
            self.w = np.ones(K, dtype=np.float64)
            if K >= 8:
                self.w = self.w / 2.0 / (K - 4)
                self.w[K / 2] = 0.125
                self.w[K / 4] = 0.125
                self.w[K * 3 / 4] = 0.125
                self.w[K - 1] = 0.125
            else:
                self.w = self.w / K

        # local record of selected value
        self._select = self.K - 1
        self.select_name = select_name
        self._select_readable_name, self.select_var_name = get_op_tensor_name(
            select_name)

        self._select = self.K - 1
Example #5
0
    os.environ['CUDA_VISIBLE_DEVICES'] = ''

    try:
        tf.train.import_meta_graph(args.meta, clear_devices=True)
    except KeyError:
        print(
            "If your graph contains non-standard ops, you need to import the relevant library first."
        )
        raise

    # loading...
    if args.input.endswith('.npz'):
        dic = np.load(args.input)
    else:
        dic = varmanip.load_chkpt_vars(args.input)
    dic = {get_op_tensor_name(k)[1]: v for k, v in six.iteritems(dic)}

    # save variables that are GLOBAL, and either TRAINABLE or MODEL
    var_to_dump = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
    var_to_dump.extend(tf.get_collection(tf.GraphKeys.MODEL_VARIABLES))
    if len(set(var_to_dump)) != len(var_to_dump):
        print("TRAINABLE and MODEL variables have duplication!")
    var_to_dump = list(set(var_to_dump))
    globvarname = set([k.name for k in tf.global_variables()])
    var_to_dump = set([k.name for k in var_to_dump if k.name in globvarname])

    for name in var_to_dump:
        assert name in dic, "Variable {} not found in the model!".format(name)

    dic_to_dump = {k: v for k, v in six.iteritems(dic) if k in var_to_dump}
    varmanip.save_chkpt_vars(dic_to_dump, args.output)
    parser.add_argument('--meta', help='metagraph file', required=True)
    parser.add_argument(dest='input', help='input model file, has to be a TF checkpoint')
    parser.add_argument(dest='output', help='output model file, can be npz or TF checkpoint')
    args = parser.parse_args()

    # this script does not need GPU
    os.environ['CUDA_VISIBLE_DEVICES'] = ''

    tf.train.import_meta_graph(args.meta, clear_devices=True)

    # loading...
    if args.input.endswith('.npz'):
        dic = np.load(args.input)
    else:
        dic = varmanip.load_chkpt_vars(args.input)
    dic = {get_op_tensor_name(k)[1]: v for k, v in six.iteritems(dic)}

    # save variables that are GLOBAL, and either TRAINABLE or MODEL
    var_to_dump = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
    var_to_dump.extend(tf.get_collection(tf.GraphKeys.MODEL_VARIABLES))
    if len(set(var_to_dump)) != len(var_to_dump):
        print("TRAINABLE and MODEL variables have duplication!")
    var_to_dump = list(set(var_to_dump))
    globvarname = set([k.name for k in tf.global_variables()])
    var_to_dump = set([k.name for k in var_to_dump if k.name in globvarname])

    for name in var_to_dump:
        assert name in dic, "Variable {} not found in the model!".format(name)

    dic_to_dump = {k: v for k, v in six.iteritems(dic) if k in var_to_dump}
    varmanip.save_chkpt_vars(dic_to_dump, args.output)