コード例 #1
0
 def __del__(self):
     for i, path in enumerate(self._in_paths):
         if path:
             try:
                 self._cmd_queues[i].put(EndSignal())
                 self._cmd_queues[i].join()
             except:
                 pass
コード例 #2
0
            def read_offline(in_path, cmd_queue, out_queue):
                end_recved = False

                def get_cmd():
                    cmd, end_recved = None, False
                    try:
                        if not cmd_queue.empty():
                            cmd = cmd_queue.get()
                            cmd_queue.task_done()
                            if isinstance(cmd, EndSignal):
                                end_recved = True
                    except IOError:
                        end_recved = True
                    return cmd, end_recved

                # wait for the sync in start
                while not end_recved:
                    cmd, end_recved = get_cmd()
                    if isinstance(cmd, SyncSignal):
                        out_queue.put(SyncSignal())
                        break
                # for multiple-times offline serving
                while not end_recved:
                    # wait for the sync in get_knowledge_desc()
                    while not end_recved:
                        cmd, end_recved = get_cmd()
                        if isinstance(cmd, SyncSignal):
                            out_queue.put(SyncSignal())
                            break

                    if end_recved:
                        break
                    with open(in_path, 'rb') as fin:
                        # get knowledge desc
                        desc = pickle.load(fin)
                        out_queue.put(desc)
                        # wait for the data accessing signal
                        while not end_recved:
                            cmd, end_recved = get_cmd()
                            if isinstance(cmd, StartSignal):
                                break
                        # get knowledge data
                        while not end_recved:
                            try:
                                data = pickle.load(fin)
                                out_queue.put(data)
                                _, end_recved = get_cmd()
                            except EOFError:
                                break
                    if end_recved:
                        break
                    out_queue.put(EndSignal())
                    out_queue.join()
コード例 #3
0
 def func():
     idx = 0
     while True:
         data = self._in_queue.get()
         self._in_queue.task_done()
         if not isinstance(data, EndSignal):
             self._local_in_queues[
                 idx % self._num_postprocess_threads].put(data)
             idx += 1
         else:
             for q in self._local_in_queues:
                 q.put(EndSignal())
             break
コード例 #4
0
 def writer(buf_queue, schema_keys):
     samples_sent, batches_sent = 0, 0
     while True:
         outputs = buf_queue.get()
         buf_queue.task_done()
         if not isinstance(outputs, EndSignal):
             batch_samples = dict(zip(schema_keys, outputs))
             if self._knowledge_queue:
                 self._knowledge_queue.put(batch_samples)
             if self._out_file:
                 self._out_file.write(pickle.dumps(batch_samples))
         else:
             if self._knowledge_queue:
                 self._knowledge_queue.put(EndSignal())
コード例 #5
0
ファイル: teacher.py プロジェクト: zhougf/PaddleSlim
 def writer(buf_queue, schema_keys):
     samples_sent, batches_sent = 0, 0
     while True:
         outputs = buf_queue.get()
         buf_queue.task_done()
         if not isinstance(outputs, EndSignal):
             batch_samples = dict(zip(schema_keys, outputs))
             if self._knowledge_queue:
                 self._knowledge_queue.put(batch_samples)
             if self._out_file:
                 self._out_file.write(pickle.dumps(batch_samples))
         else:
             if self._knowledge_queue:
                 self._knowledge_queue.put(EndSignal())
             # should close file in child thread to wait for all
             # writing finished
             if self._out_file:
                 self._out_file.close()
コード例 #6
0
        def make_new_batch(in_queue, out_queue, batch_size):
            """ 
            Get knowledge data from a local queue and make a new batch data in 
            the batch size of student, then put it into the intermediate 
            queue (out_queue).
            """
            batches, num_samples = [], 0
            while True:
                batch_samples = in_queue.get()
                in_queue.task_done()
                if not isinstance(batch_samples, EndSignal):
                    cur_num_samples = list(batch_samples.values())[0].shape[0]
                    if num_samples + cur_num_samples < batch_size:
                        batches.append(batch_samples)
                        num_samples += cur_num_samples
                    elif num_samples + cur_num_samples == batch_size:
                        batches.append(batch_samples)
                        out_queue.put(concat_batches(batches))
                        batches, num_samples = [], 0
                    else:
                        num_splited = batch_size - num_samples
                        first, second = split_batch(batch_samples, num_splited)
                        batches.append(first)
                        out_queue.put(concat_batches(batches))
                        num_left = cur_num_samples - num_splited
                        while num_left > batch_size:
                            first, second = split_batch(second, batch_size)
                            out_queue.put(first)
                            num_left -= batch_size

                        if num_left == batch_size:
                            out_queue.put(second)
                            batches, num_samples = [], 0
                        else:
                            batches, num_samples = [second], num_left
                else:
                    if len(batches) > 0:
                        out_queue.put(concat_batches(batches))
                    out_queue.put(EndSignal())
                    break
コード例 #7
0
 def wrapper():
     # The batch size of the teacher and student model may be
     # not the same, make a new batch in the batch size of the
     # student model.
     batches, num_samples = [], 0
     while True:
         batch_samples = queue.get()
         queue.task_done()
         if not isinstance(batch_samples, EndSignal):
             cur_num_samples = list(
                 batch_samples.values())[0].shape[0]
             if num_samples + cur_num_samples < batch_size:
                 batches.append(batch_samples)
                 num_samples += cur_num_samples
             elif num_samples + cur_num_samples == batch_size:
                 batches.append(batch_samples)
                 yield concat_batches(batches)
                 batches, num_samples = [], 0
             else:
                 num_splited = batch_size - num_samples
                 first, second = split_batch(
                     batch_samples, num_splited)
                 batches.append(first)
                 yield concat_batches(batches)
                 num_left = cur_num_samples - num_splited
                 while num_left > batch_size:
                     first, second = split_batch(
                         second, batch_size)
                     yield first
                     num_left -= batch_size
                 batches, num_samples = [second], num_left
         else:
             if len(batches) > 0:
                 yield concat_batches(batches)
             yield EndSignal()
             break
コード例 #8
0
        def gather_and_merge(in_queues, out_queue):
            """ 
            Gather knowledge from all intermediate queues, merge them 
            and put the final knowledge into the knowledge queue to 
            student (out_queue).
            """
            def data_receiver(queue):
                while True:
                    batch = queue.get()
                    queue.task_done()
                    yield batch
                    if isinstance(batch, EndSignal):
                        break

            data_receivers = [data_receiver(queue) for queue in in_queues]

            end_received = [0] * len(in_queues)
            while True:
                knowledge = OrderedDict([
                    (k, []) for k, v in list(self._knowledge_desc.items())
                ])
                for idx, receiver in enumerate(data_receivers):
                    if not end_received[idx]:
                        batch_samples = receiver.next(
                        ) if six.PY2 else receiver.__next__()
                        if not isinstance(batch_samples, EndSignal):
                            for k, v in list(batch_samples.items()):
                                knowledge[k].append(v)
                        else:
                            end_received[idx] = 1
                if sum(end_received) == len(in_queues):
                    break
                knowledge = self._merge_knowledge(knowledge)
                out_queue.put(knowledge)
            out_queue.put(EndSignal())
            out_queue.join()
コード例 #9
0
        def listen(queues, out_queue):
            def data_receiver(queue, batch_size):
                def wrapper():
                    # The batch size of the teacher and student model may be
                    # not the same, make a new batch in the batch size of the
                    # student model.
                    batches, num_samples = [], 0
                    while True:
                        batch_samples = queue.get()
                        queue.task_done()
                        if not isinstance(batch_samples, EndSignal):
                            cur_num_samples = list(
                                batch_samples.values())[0].shape[0]
                            if num_samples + cur_num_samples < batch_size:
                                batches.append(batch_samples)
                                num_samples += cur_num_samples
                            elif num_samples + cur_num_samples == batch_size:
                                batches.append(batch_samples)
                                yield concat_batches(batches)
                                batches, num_samples = [], 0
                            else:
                                num_splited = batch_size - num_samples
                                first, second = split_batch(
                                    batch_samples, num_splited)
                                batches.append(first)
                                yield concat_batches(batches)
                                num_left = cur_num_samples - num_splited
                                while num_left > batch_size:
                                    first, second = split_batch(
                                        second, batch_size)
                                    yield first
                                    num_left -= batch_size
                                batches, num_samples = [second], num_left
                        else:
                            if len(batches) > 0:
                                yield concat_batches(batches)
                            yield EndSignal()
                            break

                return wrapper

            data_receivers = [
                data_receiver(queue, self._batch_size)() for queue in queues
            ]

            end_received = [0] * len(queues)
            while True:
                knowledge = OrderedDict([
                    (k, []) for k, v in list(self._knowledge_desc.items())
                ])
                for idx, receiver in enumerate(data_receivers):
                    if not end_received[idx]:
                        batch_samples = receiver.next(
                        ) if six.PY2 else receiver.__next__()
                        if not isinstance(batch_samples, EndSignal):
                            for k, v in list(batch_samples.items()):
                                knowledge[k].append(v)
                        else:
                            end_received[idx] = 1
                if sum(end_received) == len(queues):
                    break
                knowledge = self._merge_knowledge(knowledge)
                out_queue.put(knowledge)
            out_queue.put(EndSignal())
            out_queue.join()
コード例 #10
0
    def start_knowledge_service(self,
                                feed_list,
                                schema,
                                program,
                                reader_config,
                                exe,
                                buf_size=10,
                                use_fp16=False,
                                times=1):
        """
        Start the knowledge service to generate and transfer knowledge data.
        In GPU mode, the devices to execute knowledge prediction will be 
        determined by environment variable **FLAGS_selected_gpus**, or by 
        **CUDA_VISIBLE_DEVICES** if it is not set, and by **CPU_NUM** (default 
        1) in CPU mode. Only supported in static graph. 

        Args:
            feed_list (list): A list of feed Variables or their names for the 
                              input program.
            schema (dict): A dictionary to specify names and fetched 
                           Variables of knowledge.
            program (fluid.Program): Inference program for the teacher model.
            reader_config (dict): The config for data reader. Support all the 
                three types of generators used by `fluid.io.PyReader` and 
                `fluid.io.DataLoader`, and their configs contain the key-value 
                pair of the generator type and a generator object, plus
                other necessary argument pairs. See the following: 

                    1) sample generator:
                       reader_config={"sample_generator": #some_sample_generator, 
                                  "batch_size": #batch_size, "drop_last": #drop_last},
                       'drop_last' set to True by default, 
                    2) sample list generator:
                       reader_config={"sample_list_generator": 
                                       #some_sample_list_generator},
                    3) batch generator:
                       reader_config={"batch_generator": #some_batch_genrator}.

                The trial to parse config will be in the order of 1) -> 3), and 
                any other unrelated keys in these configs will be ignored.
            exe (fluid.Executor): The executor to run the input program.
            buf_size (int): The size of buffers for data reader and knowledge 
                            writer on each device. 
            use_fp16 (bool): Whether to transfer/store knowledge data in float16 
                         if their data type is float32/float64. In the offline 
                         mode, it will reduce the size of dumped knowledge file, 
                         and in the online mode, it will speedup the online 
                         transfer, with the sacrifice in precision . Default False.
            times (int): The maximum repeated serving times. Default 1. Whenever 
                         the public method 'get_knowledge_generator()' in Student 
                         object called once, the serving times will be added one, 
                         until reaching the maximum and ending the service. Only 
                         valid in online mode, and will be ignored in offline mode.
        """
        if not self._started:
            raise ValueError("The method start() should be called first!")

        if not isinstance(program, fluid.Program):
            raise ValueError(
                "Input argument 'program' should be a fluid Program!")
        self._program = program._inference_optimize(prune_read_op=True)

        if not isinstance(feed_list, list):
            raise ValueError("Input argument 'feed_list' should be a list!")
        else:
            self._feed_list = []
            for feed in feed_list:
                if isinstance(feed, fluid.framework.Variable):
                    self._feed_list.append(feed)
                elif isinstance(feed, str) or isinstance(feed, unicode):
                    self._feed_list.append(self._program.global_block().var(
                        feed))
                else:
                    raise ValueError("Input 'feed_list' should consist of feed "
                                     "Variables or their names!")

        if not isinstance(schema, dict) and not isinstance(schema, OrderedDict):
            raise ValueError(
                "Input argument 'schema' should be a dict or OrderedDict!")
        self._schema = schema

        if not isinstance(reader_config, dict):
            raise ValueError("The reader config must be a dictionary!")

        if not isinstance(exe, fluid.Executor):
            raise ValueError("Input argument should be a fluid Executor!")
        self._exe = exe

        self._use_fp16 = use_fp16

        if not buf_size > 0:
            raise ValueError("The buffer size should be positive!")
        self._buf_size = buf_size

        if not times > 0:
            raise ValueError("Repeated serving times should be positive!")
        self._times = times
        if self._times > 1 and self._out_file:
            self._times = 1
            print("WARNING: args 'times' will be ignored in offline mode")

        desc = {}
        for name, var in list(schema.items()):
            if not isinstance(var, fluid.framework.Variable):
                raise ValueError("The member of schema must be fluid Variable.")
            desc[name] = {
                "shape": var.shape,
                "dtype": convert_dtype(var.dtype),
                "lod_level": var.lod_level
            }
        if not self._knowledge_desc:
            self._knowledge_desc = desc
        else:
            if self._out_file and not self._knowledge_desc == desc:
                raise ValueError("The knowledge description should be kept "
                                 "consistent in offline mode!")

        if isinstance(self._exe.place, fluid.CUDAPlace):
            places = fluid.cuda_places()
        else:
            places = fluid.cpu_places()
        dev_count = len(places)

        data_loader = fluid.io.DataLoader.from_generator(
            feed_list=self._feed_list,
            capacity=self._buf_size * dev_count,
            use_double_buffer=(dev_count == 1),
            iterable=True)

        places = [fluid.CPUPlace()] if dev_count > 1 else [self._exe.place]
        if "sample_generator" in reader_config:
            if "batch_size" not in reader_config:
                raise ValueError("batch size must be specified when using "
                                 "sample generator!")
            sample_generator = reader_config["sample_generator"]
            batch_size = reader_config["batch_size"]
            drop_last = reader_config[
                "drop_last"] if "drop_last" in reader_config else True

            data_loader.set_sample_generator(
                reader=sample_generator,
                batch_size=batch_size,
                drop_last=drop_last,
                places=places)
        elif "sample_list_generator" in reader_config:
            sample_list_generator = reader_config["sample_list_generator"]
            data_loader.set_sample_list_generator(
                reader=sample_list_generator, places=places)
        elif "batch_generator" in reader_config:
            batch_generator = reader_config["batch_generator"]
            data_loader.set_batch_generator(
                reader=batch_generator, places=places)
        else:
            raise ValueError(
                "The reader config doesn't contain any valid "
                "generator type, which should be one of 'sample_generator', "
                "'sample_list_generator', and 'batch_generator'.")

        def cast2fp16(know):
            for k, v in list(know.items()):
                if not isinstance(v, np.ndarray):
                    break
                if v.dtype == np.float32 or v.dtype == np.float64:
                    v = v.astype("float16")
                    know[k] = v
            return know

        feed_var_names = [var.name for var in self._feed_list]
        schema_in_feed, schema_in_fetch = {}, {}
        for k, v in list(self._schema.items()):
            if k in feed_var_names:
                schema_in_feed[k] = v
            else:
                schema_in_fetch[k] = v
        schema_in_fetch_keys, schema_in_fetch_vars = zip(
            *list(schema_in_fetch.items()))

        def know_maker(in_queue, out_queue, use_fp16):
            while True:
                data = in_queue.get()
                in_queue.task_done()
                if isinstance(data, tuple):
                    dev_batches, outputs = data
                    know = {}
                    for k in schema_in_feed.keys():
                        batch_know = [
                            np.array(batch[k]) for batch in dev_batches
                        ]
                        know[k] = np.concatenate(batch_know)
                    know.update(dict(zip(schema_in_fetch_keys, outputs)))
                    if use_fp16:
                        know = cast2fp16(know)
                    out_queue.put(know)
                else:
                    # forward other types of data directly (maybe knowledge desc or EndSignal)
                    out_queue.put(data)
                    if isinstance(data, EndSignal):
                        break

        know_make_queue = Queue.Queue(self._buf_size)
        if self._out_file:
            # For offline dump, write the knowledge description to the head of file
            self._out_file.write(pickle.dumps(self._knowledge_desc))
            print("output path: %s" % self._out_path)
            offline_write_queue = Queue.Queue(self._buf_size)

            def offline_write(queue):
                while True:
                    know = queue.get()
                    queue.task_done()
                    if not isinstance(know, EndSignal):
                        self._out_file.write(pickle.dumps(know))
                    else:
                        # should close file in child thread to wait for all 
                        # writing finished
                        self._out_file.close()

            t = Thread(target=offline_write, args=(offline_write_queue, ))
            t.daemon = True
            t.start()
            make_knowledge = WorkerParallel(
                num_postprocess_threads, know_make_queue, offline_write_queue)

        if self._knowledge_queues:
            make_knowledge = WorkerParallel(num_postprocess_threads,
                                            know_make_queue,
                                            self._knowledge_queues)

        compiled_program = fluid.compiler.CompiledProgram(
            self._program).with_data_parallel()

        print("Knowledge description {}".format(self._knowledge_desc))
        print(
            time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) +
            "  Teacher begins to serve ...")

        data_reader = MixedDataReader(data_loader, dev_count)
        for repeated in range(self._times):
            make_knowledge(worker=know_maker, args=(self._use_fp16, ))
            if self._knowledge_queues:
                # wait for the accessing of knowledge desc and data
                while True:
                    if self._sync_required:
                        for q in self._knowledge_queues:
                            q.put(SyncSignal())
                        # For online mode, send knowledge description every sync
                        know_make_queue.put(self._knowledge_desc)
                        self._sync_required = False
                    if self._data_required:
                        self._data_required = False
                        break
                for q in self._knowledge_queues:
                    q.join()

            print("No.{} time serving ... ".format(repeated))
            num_batches_sent = 0
            for index, dev_batches in enumerate(data_reader.multi_dev_generator(
            )):
                if self._sync_required:
                    break
                outputs = self._exe.run(compiled_program,
                                        feed=dev_batches,
                                        fetch_list=schema_in_fetch_vars)
                know_make_queue.put((dev_batches, outputs))

                num_batches_sent += dev_count
                if num_batches_sent % (100 * dev_count) == 0:
                    log = "Processed {} batch samples.".format(num_batches_sent)
                    if self._knowledge_queues:
                        qsize = 0
                        for q in self._knowledge_queues:
                            qsize += q.qsize()
                        log += " Knowledge queue size {}.".format(qsize)
                    print(log)

            dev_batches, outputs = [], []
            for index, batch in enumerate(data_reader.tail_generator()):
                if self._sync_required:
                    break
                dev_batches.append(batch)
                output = self._exe.run(self._program,
                                       feed=batch,
                                       fetch_list=schema_in_fetch_vars)
                if outputs:
                    outputs = [
                        np.concatenate(
                            (outs, out), axis=0)
                        for (outs, out) in zip(outputs, output)
                    ]
                else:
                    outputs = copy.deepcopy(output)
            if dev_batches or outputs:
                know_make_queue.put((dev_batches, outputs))
                num_batches_sent += (index + 1)

            print("Processed {} batch samples in total.".format(
                num_batches_sent))
            know_make_queue.put(EndSignal())
            know_make_queue.join()

            if self._knowledge_queues:
                for q in self._knowledge_queues:
                    q.join()
            if self._out_file:
                offline_write_queue.join()
        print(
            time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) +
            "  Teacher ends serving.")
コード例 #11
0
    def start_knowledge_service(self,
                                feed_list,
                                schema,
                                program,
                                reader_config,
                                exe,
                                buf_size=10,
                                times=1):
        """
        Start the knowledge service to generate and transfer knowledge data.
        In GPU mode, the devices to execute knowledge prediction will be 
        determined by environment variable **FLAGS_selected_gpus**, or by 
        **CUDA_VISIBLE_DEVICES** if it is not set, and by **CPU_NUM** (default 
        1) in CPU mode. Only supported in static graph. 

        Args:
            feed_list (list): A list of feed Variables or their names for the 
                              input program.
            schema (dict): A dictionary to specify names and fetched 
                           Variables of knowledge.
            program (fluid.Program): Inference program for the teacher model.
            reader_config (dict): The config for data reader. Support all the 
                three types of generators used by `fluid.io.PyReader` and 
                `fluid.io.DataLoader`, and their configs contain the key-value 
                pair of the generator type and a generator object, plus
                other necessary argument pairs. See the following: 

                    1) sample generator:
                       reader_config={"sample_generator": #some_sample_generator, 
                                  "batch_size": #batch_size, "drop_last": #drop_last},
                       'drop_last' set to True by default, 
                    2) sample list generator:
                       reader_config={"sample_list_generator": 
                                       #some_sample_list_generator},
                    3) batch generator:
                       reader_config={"batch_generator": #some_batch_genrator}.

                The trial to parse config will be in the order of 1) -> 3), and 
                any other unrelated keys in these configs will be ignored.
            exe (fluid.Executor): The executor to run the input program.
            buf_size (int): The size of buffers for data reader and knowledge 
                            writer on each device. 
            times (int): The maximum repeated serving times. Default 1. Whenever 
                         the public method 'get_knowledge_generator()' in Student 
                         object called once, the serving times will be added one, 
                         until reaching the maximum and ending the service.
        """
        if not self._started:
            raise ValueError("The method start() should be called first!")

        if not isinstance(program, fluid.Program):
            raise ValueError(
                "Input argument 'program' should be a fluid Program!")
        self._program = program._inference_optimize(prune_read_op=True)

        if not isinstance(feed_list, list):
            raise ValueError("Input argument 'feed_list' should be a list!")
        else:
            self._feed_list = []
            for feed in feed_list:
                if isinstance(feed, fluid.framework.Variable):
                    self._feed_list.append(feed)
                elif isinstance(feed, str) or isinstance(feed, unicode):
                    self._feed_list.append(
                        self._program.global_block().var(feed))
                else:
                    raise ValueError(
                        "Input 'feed_list' should consist of feed "
                        "Variables or their names!")

        if not isinstance(schema, dict) and not isinstance(
                schema, OrderedDict):
            raise ValueError(
                "Input argument 'schema' should be a dict or OrderedDict!")
        self._schema = schema

        if not isinstance(reader_config, dict):
            raise ValueError("The reader config must be a dictionary!")

        if not isinstance(exe, fluid.Executor):
            raise ValueError("Input argument should be a fluid Executor!")
        self._exe = exe

        if not buf_size > 0:
            raise ValueError("The buffer size should be positive!")
        self._buf_size = buf_size

        if not times > 0:
            raise ValueError("Repeated serving times should be positive!")
        self._times = times

        desc = {}
        for name, var in schema.items():
            if not isinstance(var, fluid.framework.Variable):
                raise ValueError(
                    "The member of schema must be fluid Variable.")
            desc[name] = {
                "shape": var.shape,
                "dtype": convert_dtype(var.dtype),
                "lod_level": var.lod_level
            }
        if not self._knowledge_desc:
            self._knowledge_desc = desc
        else:
            if self._out_file and not self._knowledge_desc == desc:
                raise ValueError("The knowledge description should be kept "
                                 "consistent in offline mode!")

        if isinstance(self._exe.place, fluid.CUDAPlace):
            places = fluid.cuda_places()
        else:
            places = fluid.cpu_places()
        dev_count = len(places)

        data_loader = fluid.io.DataLoader.from_generator(
            feed_list=self._feed_list,
            capacity=self._buf_size * dev_count,
            use_double_buffer=(dev_count == 1),
            iterable=True)

        places = [fluid.CPUPlace()] if dev_count > 1 else [self._exe.place]
        if "sample_generator" in reader_config:
            if "batch_size" not in reader_config:
                raise ValueError("batch size must be specified when using "
                                 "sample generator!")
            sample_generator = reader_config["sample_generator"]
            batch_size = reader_config["batch_size"]
            drop_last = reader_config[
                "drop_last"] if "drop_last" in reader_config else True

            data_loader.set_sample_generator(reader=sample_generator,
                                             batch_size=batch_size,
                                             drop_last=drop_last,
                                             places=places)
        elif "sample_list_generator" in reader_config:
            sample_list_generator = reader_config["sample_list_generator"]
            data_loader.set_sample_list_generator(reader=sample_list_generator,
                                                  places=places)
        elif "batch_generator" in reader_config:
            batch_generator = reader_config["batch_generator"]
            data_loader.set_batch_generator(reader=batch_generator,
                                            places=places)
        else:
            raise ValueError(
                "The reader config doesn't contain any valid "
                "generator type, which should be one of 'sample_generator', "
                "'sample_list_generator', and 'batch_generator'.")

        def writer(buf_queue, schema_keys):
            samples_sent, batches_sent = 0, 0
            while True:
                outputs = buf_queue.get()
                buf_queue.task_done()
                if not isinstance(outputs, EndSignal):
                    batch_samples = dict(zip(schema_keys, outputs))
                    if self._knowledge_queue:
                        self._knowledge_queue.put(batch_samples)
                    if self._out_file:
                        self._out_file.write(pickle.dumps(batch_samples))
                else:
                    if self._knowledge_queue:
                        self._knowledge_queue.put(EndSignal())

        # Asynchronous output
        out_buf_queue = Queue.Queue(self._buf_size)
        schema_keys, schema_vars = zip(*self._schema.items())
        out_thread = Thread(target=writer, args=(out_buf_queue, schema_keys))
        out_thread.daemon = True
        out_thread.start()

        compiled_program = fluid.compiler.CompiledProgram(
            self._program).with_data_parallel()

        print("Knowledge description {}".format(self._knowledge_desc))
        print(
            time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) +
            "  Teacher begins to serve ...")
        # For offline dump, write the knowledge description to the head of file
        if self._out_file:
            self._out_file.write(pickle.dumps(self._knowledge_desc))
            print("output path: %s" % self._out_path)

        data_reader = MixedDataReader(data_loader, dev_count)
        # For online mode, send knowledge description every time
        for repeated in range(self._times):
            if self._knowledge_queue:
                # wait for the accessing of knowledge desc and data
                while True:
                    if self._sync_required:
                        self._knowledge_queue.put(SyncSignal())
                        self._knowledge_queue.put(self._knowledge_desc)
                        self._sync_required = False
                    if self._data_required:
                        self._data_required = False
                        break
                self._knowledge_queue.join()

            print("No.{} time serving ... ".format(repeated))
            num_batches_sent = 0
            for dev_batches in data_reader.multi_dev_generator():
                if self._sync_required:
                    break
                outputs = self._exe.run(compiled_program,
                                        feed=dev_batches,
                                        fetch_list=schema_vars)
                out_buf_queue.put(outputs)
                num_batches_sent += dev_count
                if num_batches_sent % (100 * dev_count) == 0:
                    log = "Processed {} batch samples.".format(
                        num_batches_sent)
                    if self._knowledge_queue:
                        log += " Knowledge queue size {}.".format(
                            self._knowledge_queue.qsize())
                    print(log)

            outputs = []
            for index, batch in enumerate(data_reader.tail_generator()):
                if self._sync_required:
                    break
                output = self._exe.run(self._program,
                                       feed=batch,
                                       fetch_list=schema_vars)
                if outputs:
                    outputs = [
                        np.concatenate((outs, out), axis=0)
                        for (outs, out) in zip(outputs, output)
                    ]
                else:
                    outputs = copy.deepcopy(output)
            if outputs:
                out_buf_queue.put(outputs)
                num_batches_sent += (index + 1)

            print("Processed {} batch samples in total.".format(
                num_batches_sent))

            out_buf_queue.put(EndSignal())
            out_buf_queue.join()

        if self._knowledge_queue:
            self._knowledge_queue.join()
        print(
            time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) +
            "  Teacher ends serving.")