def setUpClass(cls):
        os.environ['CPU_NUM'] = str(4)
        reader = paddle.batch(wmt16.train(ModelHyperParams.src_vocab_size,
                                          ModelHyperParams.trg_vocab_size),
                              batch_size=transformer_model.batch_size)

        with fluid.recordio_writer.create_recordio_writer(
                os.environ.get("RECORDIO_FILENAME")) as writer:
            for batch in reader():
                for tensor in prepare_batch_input(batch,
                                                  ModelHyperParams.src_pad_idx,
                                                  ModelHyperParams.trg_pad_idx,
                                                  ModelHyperParams.n_head):
                    t = fluid.LoDTensor()
                    t.set(tensor, fluid.CPUPlace())
                    writer.append_tensor(t)
                writer.complete_append_tensor()
Esempio n. 2
0
        def _soft_nms(bboxes, scores):
            bboxes = np.array(bboxes)
            scores = np.array(scores)
            class_nums = scores.shape[-1]

            softnms_thres = self.score_threshold
            softnms_sigma = self.softnms_sigma
            keep_top_k = self.keep_top_k

            cls_boxes = [[] for _ in range(class_nums)]
            cls_ids = [[] for _ in range(class_nums)]

            start_idx = 1 if self.background_label == 0 else 0
            for j in range(start_idx, class_nums):
                inds = np.where(scores[:, j] >= softnms_thres)[0]
                scores_j = scores[inds, j]
                rois_j = bboxes[inds, j, :]
                dets_j = np.hstack((scores_j[:, np.newaxis], rois_j)).astype(
                    np.float32, copy=False)
                cls_rank = np.argsort(-dets_j[:, 0])
                dets_j = dets_j[cls_rank]

                cls_boxes[j] = _soft_nms_for_cls(
                    dets_j, sigma=softnms_sigma, thres=softnms_thres)
                cls_ids[j] = np.array([j] * cls_boxes[j].shape[0]).reshape(-1,
                                                                           1)

            cls_boxes = np.vstack(cls_boxes[start_idx:])
            cls_ids = np.vstack(cls_ids[start_idx:])
            pred_result = np.hstack([cls_ids, cls_boxes])

            # Limit to max_per_image detections **over all classes**
            image_scores = cls_boxes[:, 0]
            if len(image_scores) > keep_top_k:
                image_thresh = np.sort(image_scores)[-keep_top_k]
                keep = np.where(cls_boxes[:, 0] >= image_thresh)[0]
                pred_result = pred_result[keep, :]

            res = fluid.LoDTensor()
            res.set_lod([[0, pred_result.shape[0]]])
            if pred_result.shape[0] == 0:
                pred_result = np.array([[1]], dtype=np.float32)
            res.set(pred_result, fluid.CPUPlace())

            return res
Esempio n. 3
0
def infer(args):
    data_shape = cityscape.test_data_shape()
    num_classes = cityscape.num_classes()
    # define network
    images = fluid.layers.data(name='image', shape=data_shape, dtype='float32')
    _, _, sub124_out = icnet(images, num_classes,
                             np.array(data_shape[1:]).astype("float32"))
    predict = fluid.layers.resize_bilinear(sub124_out,
                                           out_shape=data_shape[1:3])
    predict = fluid.layers.transpose(predict, perm=[0, 2, 3, 1])
    predict = fluid.layers.reshape(predict, shape=[-1, num_classes])
    _, predict = fluid.layers.topk(predict, k=1)
    predict = fluid.layers.reshape(predict,
                                   shape=[data_shape[1], data_shape[2],
                                          -1])  # batch_size should be 1
    inference_program = fluid.default_main_program().clone(for_test=True)
    # prepare environment
    place = fluid.CPUPlace()
    if args.use_gpu:
        place = fluid.CUDAPlace(0)
    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())
    assert os.path.exists(args.model_path)
    fluid.io.load_params(exe, args.model_path)
    print("loaded model from: %s" % args.model_path)
    sys.stdout.flush()

    if not os.path.isdir(args.out_path):
        os.makedirs(args.out_path)

    for line in open(args.images_list):
        image_file = args.images_path + "/" + line.strip()
        filename = os.path.basename(image_file)
        image = paddle.dataset.image.load_image(
            image_file, is_color=True).astype("float32")
        image -= IMG_MEAN
        img = paddle.dataset.image.to_chw(image)[np.newaxis, :]
        image_t = fluid.LoDTensor()
        image_t.set(img, place)
        result = exe.run(inference_program,
                         feed={"image": image_t},
                         fetch_list=[predict])
        cv2.imwrite(args.out_path + "/" + filename + "_result.png",
                    color(result[0]))
    print("Saved images into: %s" % args.out_path)
Esempio n. 4
0
def to_lodtensor(data, place, dtype=None):
    """Convert data to LoDTensor."""
    if place is None:
        return data
    lengths = []
    while isinstance(data[0], list):
        lengths.append(list(map(len, data)))
        data = [x for xs in data for x in xs]
    if dtype is None:
        if isinstance(data[0], float):
            dtype = "float32"
        else:
            dtype = "int64"
    data = np.array(data, dtype=dtype)
    data_tensor = fluid.LoDTensor()
    data_tensor.set(data, place)
    data_tensor.set_recursive_sequence_lengths(lengths)
    return data_tensor
Esempio n. 5
0
def get_sub_feed(input, place):
    new_dict = {}
    res_feed = {}
    key_name = ['bbox', 'im_info', 'im_id', 'im_shape', 'bbox_flip']
    for k in key_name:
        if k in input.keys():
            new_dict[k] = input[k]
    for k in input.keys():
        if 'image' in k:
            new_dict[k] = input[k]
    for k, v in new_dict.items():
        data_t = fluid.LoDTensor()
        data_t.set(v[0], place)
        if 'bbox' in k:
            lod = length2lod(v[1][0])
            data_t.set_lod(lod)
        res_feed[k] = data_t
    return res_feed
Esempio n. 6
0
def to_lodtensor(data, place, type):
    """
    convert ot LODtensor
    """
    seq_lens = [len(seq) for seq in data]
    cur_len = 0
    lod = [cur_len]
    for l in seq_lens:
        cur_len += l
        lod.append(cur_len)
    if type in [0, 1, 2, 5, 6]: 
        data = np.array(data).astype("int64")
    if type in [3]: 
        data = np.array(data).astype("float")
    res = fluid.LoDTensor()
    res.set(data, place)
    res.set_lod([lod])
    return res
Esempio n. 7
0
def to_lodtensor(data, place):
    seq_lens = [len(seq) for seq in data]
    cur_len = 0
    lod = [cur_len]
    for l in seq_lens:
        cur_len += l
        lod.append(cur_len)
    flattened_data = np.concatenate(data, axis=0).astype("float32")
    print("data : {}".format(data))
    print("flattened_data : {}".format(flattened_data))
    print("flattened_data.shape : {}".format(flattened_data.shape))
    flattened_data = flattened_data.reshape([len(flattened_data), 1])
    print("flattened_data : {}".format(flattened_data))
    print("flattened_data.shape : {}".format(flattened_data.shape))
    res = fluid.LoDTensor()
    res.set(flattened_data, place)
    res.set_lod([lod])
    return res
Esempio n. 8
0
def repeat_array_or_tensor(array_or_tensor, place, times):
    """Repeate numpy array or LoD tensor."""
    if isinstance(array_or_tensor, fluid.LoDTensor):
        data = [np.array(array_or_tensor)] * times
        recursive_sequence_lengths = [
            array_or_tensor.recursive_sequence_lengths()
        ] * times
        data = np.concatenate(data, axis=0)
        recursive_sequence_lengths = [
            sum(lens, []) for lens in zip(*recursive_sequence_lengths)
        ]
        data_tensor = fluid.LoDTensor()
        data_tensor.set(data, place)
        data_tensor.set_recursive_sequence_lengths(recursive_sequence_lengths)
        assert data_tensor.has_valid_recursive_sequence_lengths()
        return data_tensor
    elif isinstance(array_or_tensor, list):
        return list(chain(*([array_or_tensor] * times)))
    else:
        return np.concatenate([array_or_tensor] * times, axis=0)
Esempio n. 9
0
def infer(args):
    """ Gets one batch of feature data and predicts labels for each sample.
    """

    if not os.path.exists(args.infer_model_path):
        raise IOError("Invalid inference model path!")

    place = fluid.CUDAPlace(0) if args.device == 'GPU' else fluid.CPUPlace()
    exe = fluid.Executor(place)

    # load model
    [infer_program, feed_dict,
     fetch_targets] = fluid.io.load_inference_model(args.infer_model_path, exe)

    ltrans = [
        trans_add_delta.TransAddDelta(2, 2),
        trans_mean_variance_norm.TransMeanVarianceNorm(args.mean_var),
        trans_splice.TransSplice()
    ]

    infer_data_reader = reader.AsyncDataReader(args.infer_feature_lst,
                                               args.infer_label_lst)
    infer_data_reader.set_transformers(ltrans)

    feature_t = fluid.LoDTensor()
    one_batch = infer_data_reader.batch_iterator(args.batch_size, 1).next()

    (features, labels, lod) = one_batch
    feature_t.set(features, place)
    feature_t.set_lod([lod])

    results = exe.run(infer_program,
                      feed={feed_dict[0]: feature_t},
                      fetch_list=fetch_targets,
                      return_numpy=False)

    probs, lod = lodtensor_to_ndarray(results[0])
    preds = probs.argmax(axis=1)
    infer_batch = split_infer_result(preds, lod)
    for index, sample in enumerate(infer_batch):
        print("result %d: " % index, sample, '\n')
Esempio n. 10
0
def sample_list_to_tensor_array(sample_list):
    slot_num = None
    slots = None
    for sample in sample_list:
        if slot_num is None:
            slot_num = len(sample)
            slots = [None] * len(sample)
        else:
            assert slot_num == len(sample)

        for slot_id, slot_item in enumerate(sample):
            if slots[slot_id] is None:
                slots[slot_id] = []
            slots[slot_id].append(slot_item)

    tensor_array = fluid.LoDTensorArray()
    for slot in slots:
        t = fluid.LoDTensor()
        t.set(np.array(slot), fluid.CPUPlace())
        tensor_array.append(t)

    return tensor_array
Esempio n. 11
0
def infer(args):
    data_shape = [-1, 3, 256, 256]
    input = fluid.layers.data(name='input', shape=data_shape, dtype='float32')
    if args.input_style == "A":
        model_name = 'g_a'
        fake = build_generator_resnet_blocks(input, name="g_A")
    elif args.input_style == "B":
        model_name = 'g_b'
        fake = build_generator_resnet_blocks(input, name="g_B")
    else:
        raise "Input with style [%s] is not supported." % args.input_style
    # prepare environment
    place = fluid.CPUPlace()
    if args.use_gpu:
        place = fluid.CUDAPlace(0)
    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())
    fluid.io.load_persistables(exe, args.init_model + "/" + model_name)

    if not os.path.exists(args.output):
        os.makedirs(args.output)
    for file in glob.glob(args.input):
        image_name = os.path.basename(file)
        image = Image.open(file)
        image = image.resize((256, 256))
        image = np.array(image) / 127.5 - 1
        if len(image.shape) != 3:
            continue
        data = image.transpose([2, 0, 1])[np.newaxis, :].astype("float32")
        tensor = fluid.LoDTensor()
        tensor.set(data, place)

        fake_temp = exe.run(fetch_list=[fake.name], feed={"input": tensor})
        fake_temp = np.squeeze(fake_temp[0]).transpose([1, 2, 0])
        input_temp = np.squeeze(data).transpose([1, 2, 0])

        imsave(args.output + "/fake_" + image_name,
               ((fake_temp + 1) * 127.5).astype(np.uint8))
    def setUp(self):
        with fluid.program_guard(self.main_program, self.startup_program):
            dict_dim, emb_dim = 128, 64
            data = fluid.data(
                name='step_data', shape=[None], dtype='int64', lod_level=1)
            emb = fluid.embedding(input=data, size=[dict_dim, emb_dim])
            hidden_dim = 512
            x = fluid.layers.fc(input=emb, size=hidden_dim * 3, bias_attr=False)
            hidden = fluid.layers.dynamic_gru(
                input=x,
                size=hidden_dim,
                bias_attr=True,
                origin_mode=False,
                is_reverse=True)

        batch = 16
        lod_tensor = fluid.LoDTensor()
        lod_tensor.set(np.random.randint(
            0, dict_dim, size=[batch]).astype("int64"),
                       fluid.CPUPlace())
        lod_tensor.set_lod([[0, batch]])
        self.feeds = {"step_data": lod_tensor}
        self.fetch_list = [hidden]
def to_lodtensor(data, seq_lens, place):
    """ convert to LoDTensor """
    cur_len = 0
    lod = [cur_len]

    data_array = []
    for idx, seq in enumerate(seq_lens):
        if seq > 0:
            data_array.append(data[idx, :seq])

            cur_len += seq
            lod.append(cur_len)
        else:
            data_array.append(np.zeros([1, 1], dtype='int64'))
            cur_len += 1
            lod.append(cur_len)
    flattened_data = np.concatenate(data_array, axis=0).astype("int64")
    flattened_data = flattened_data.reshape([len(flattened_data), 1])
    res = fluid.LoDTensor()
    res.set(flattened_data, place)

    res.set_lod([lod])
    return res
Esempio n. 14
0
    def prepare_lod_data(self):
        def fake_data_generator():
            for n in range(1, self.total_ins_num + 1):
                d1 = (np.ones((n, 3)) * n).astype('float32')
                d2 = (np.array(n).reshape((1, 1))).astype('int32')
                yield d1, d2

        # Prepare lod data
        with fluid.program_guard(fluid.Program(), fluid.Program()):
            with fluid.recordio_writer.create_recordio_writer(
                    filename=self.lod_data_file_name) as writer:
                eof = False
                generator = fake_data_generator()
                while (not eof):
                    data_batch = [
                        np.array([]).reshape((0, 3)),
                        np.array([]).reshape((0, 1))
                    ]
                    lod = [0]
                    for _ in range(self.batch_size):
                        try:
                            ins = next(generator)
                        except StopIteration:
                            eof = True
                            break
                        for i, d in enumerate(ins):
                            data_batch[i] = np.concatenate((data_batch[i], d),
                                                           axis=0)
                        lod.append(lod[-1] + ins[0].shape[0])
                    if data_batch[0].shape[0] > 0:
                        for i, d in enumerate(data_batch):
                            t = fluid.LoDTensor()
                            t.set(data_batch[i], fluid.CPUPlace())
                            if i == 0:
                                t.set_lod([lod])
                            writer.append_tensor(t)
                        writer.complete_append_tensor()
Esempio n. 15
0
    def setUp(self):
        with fluid.program_guard(self.main_program, self.startup_program):
            dict_dim, emb_dim = 128, 64
            hidden_dim = 512

            data = fluid.data(name='data',
                              shape=[1],
                              dtype='int64',
                              lod_level=1)
            emb = fluid.embedding(input=data, size=[dict_dim, emb_dim])
            x = fluid.layers.fc(input=emb,
                                size=hidden_dim * 4,
                                bias_attr=False)
            forward, cell = fluid.layers.dynamic_lstm(input=x,
                                                      size=hidden_dim * 4)

        batch = 16
        lod_tensor = fluid.LoDTensor()
        lod_tensor.set(
            np.random.randint(0, dict_dim, size=[batch]).astype("int64"),
            fluid.CPUPlace())
        lod_tensor.set_lod([[0, batch]])
        self.feeds = {"data": lod_tensor}
        self.fetch_list = [forward, cell]
Esempio n. 16
0
def correct_rois(rois, x_scale, y_scale, x_offset, y_offset):
    lod = rois.lod()[0]
    rois = np.array(rois)

    rois_lst = []
    for i in range(len(lod) - 1):
        rois_lst.append(rois[lod[i]:lod[i + 1]])

    x_scale = np.reshape(np.array(x_scale), [-1])
    y_scale = np.reshape(np.array(y_scale), [-1])
    x_offset = np.reshape(np.array(x_offset), [-1])
    y_offset = np.reshape(np.array(y_offset), [-1])

    rois_lst_ = []
    for r, xs, ys, xo, yo in zip(rois_lst, x_scale, y_scale, x_offset,
                                 y_offset):
        scale = np.array([xs, ys, xs, ys])
        offset = np.array([xo, yo, xo, yo])
        rois_lst_.append(r * scale + offset)
    flatten_corrected_rois = np.concatenate(rois_lst_, axis=0)
    res = fluid.LoDTensor()
    res.set(flatten_corrected_rois, fluid.CPUPlace())
    res.set_lod([lod])
    return res
Esempio n. 17
0
        def generate_func(
                rpn_rois,
                gt_classes,
                is_crowd,
                gt_boxes,
                im_info, ):
            rpn_rois_lod = rpn_rois.lod()[0]
            gt_classes_lod = gt_classes.lod()[0]

            # convert
            rpn_rois = np.array(rpn_rois)
            gt_classes = np.array(gt_classes)
            is_crowd = np.array(is_crowd)
            gt_boxes = np.array(gt_boxes)
            im_info = np.array(im_info)

            rois = []
            labels_int32 = []
            bbox_targets = []
            bbox_inside_weights = []
            bbox_outside_weights = []
            lod = [0]

            for idx in range(len(rpn_rois_lod) - 1):
                rois_si = rpn_rois_lod[idx]
                rois_ei = rpn_rois_lod[idx + 1]

                gt_si = gt_classes_lod[idx]
                gt_ei = gt_classes_lod[idx + 1]
                frcn_blobs = _sample_rois(
                    rpn_rois[rois_si:rois_ei], gt_classes[gt_si:gt_ei],
                    is_crowd[gt_si:gt_ei], gt_boxes[gt_si:gt_ei], im_info[idx],
                    batch_size_per_im, fg_fraction, fg_thresh, bg_thresh_hi,
                    bg_thresh_lo, bbox_reg_weights, class_nums, use_random,
                    is_cls_agnostic, is_cascade_rcnn)
                lod.append(frcn_blobs['rois'].shape[0] + lod[-1])
                rois.append(frcn_blobs['rois'])
                labels_int32.append(frcn_blobs['labels_int32'].reshape(-1, 1))
                bbox_targets.append(frcn_blobs['bbox_targets'])
                bbox_inside_weights.append(frcn_blobs['bbox_inside_weights'])
                bbox_outside_weights.append(frcn_blobs['bbox_outside_weights'])

            rois = np.vstack(rois)
            labels_int32 = np.vstack(labels_int32)
            bbox_targets = np.vstack(bbox_targets)
            bbox_inside_weights = np.vstack(bbox_inside_weights)
            bbox_outside_weights = np.vstack(bbox_outside_weights)

            # create lod-tensor for return
            # notice that the func create_lod_tensor does not work well here
            ret_rois = fluid.LoDTensor()
            ret_rois.set_lod([lod])
            ret_rois.set(rois.astype("float32"), fluid.CPUPlace())

            ret_labels_int32 = fluid.LoDTensor()
            ret_labels_int32.set_lod([lod])
            ret_labels_int32.set(
                labels_int32.astype("int32"), fluid.CPUPlace())

            ret_bbox_targets = fluid.LoDTensor()
            ret_bbox_targets.set_lod([lod])
            ret_bbox_targets.set(
                bbox_targets.astype("float32"), fluid.CPUPlace())

            ret_bbox_inside_weights = fluid.LoDTensor()
            ret_bbox_inside_weights.set_lod([lod])
            ret_bbox_inside_weights.set(
                bbox_inside_weights.astype("float32"), fluid.CPUPlace())

            ret_bbox_outside_weights = fluid.LoDTensor()
            ret_bbox_outside_weights.set_lod([lod])
            ret_bbox_outside_weights.set(
                bbox_outside_weights.astype("float32"), fluid.CPUPlace())

            return ret_rois, ret_labels_int32, ret_bbox_targets, ret_bbox_inside_weights, ret_bbox_outside_weights
Esempio n. 18
0
def fluid_create_lod_tensor(array, lod, place):
    assert isinstance(array, np.ndarray), (type(array))
    tensor = fluid.LoDTensor()
    tensor.set(array, place)
    tensor.set_lod(lod)
    return tensor
Esempio n. 19
0
def infer(args):
    data_shape = [-1, 3, args.image_size, args.image_size]
    input = fluid.layers.data(name='input', shape=data_shape, dtype='float32')
    label_org_ = fluid.layers.data(name='label_org_',
                                   shape=[args.c_dim],
                                   dtype='float32')
    label_trg_ = fluid.layers.data(name='label_trg_',
                                   shape=[args.c_dim],
                                   dtype='float32')
    image_name = fluid.layers.data(name='image_name',
                                   shape=[args.n_samples],
                                   dtype='int32')

    model_name = 'net_G'
    if args.model_net == 'CycleGAN':
        py_reader = fluid.io.PyReader(
            feed_list=[input, image_name],
            capacity=4,  ## batch_size * 4
            iterable=True,
            use_double_buffer=True)
        from network.CycleGAN_network import CycleGAN_model
        model = CycleGAN_model()
        if args.input_style == "A":
            fake = model.network_G(input, name="GA", cfg=args)
        elif args.input_style == "B":
            fake = model.network_G(input, name="GB", cfg=args)
        else:
            raise "Input with style [%s] is not supported." % args.input_style
    elif args.model_net == 'Pix2pix':
        py_reader = fluid.io.PyReader(
            feed_list=[input, image_name],
            capacity=4,  ## batch_size * 4
            iterable=True,
            use_double_buffer=True)

        from network.Pix2pix_network import Pix2pix_model
        model = Pix2pix_model()
        fake = model.network_G(input, "generator", cfg=args)
    elif args.model_net == 'StarGAN':

        py_reader = fluid.io.PyReader(
            feed_list=[input, label_org_, label_trg_, image_name],
            capacity=32,
            iterable=True,
            use_double_buffer=True)

        from network.StarGAN_network import StarGAN_model
        model = StarGAN_model()
        fake = model.network_G(input, label_trg_, name="g_main", cfg=args)
    elif args.model_net == 'STGAN':
        from network.STGAN_network import STGAN_model

        py_reader = fluid.io.PyReader(
            feed_list=[input, label_org_, label_trg_, image_name],
            capacity=32,
            iterable=True,
            use_double_buffer=True)

        model = STGAN_model()
        fake, _ = model.network_G(input,
                                  label_org_,
                                  label_trg_,
                                  cfg=args,
                                  name='generator',
                                  is_test=True)
    elif args.model_net == 'AttGAN':
        from network.AttGAN_network import AttGAN_model

        py_reader = fluid.io.PyReader(
            feed_list=[input, label_org_, label_trg_, image_name],
            capacity=32,
            iterable=True,
            use_double_buffer=True)

        model = AttGAN_model()
        fake, _ = model.network_G(input,
                                  label_org_,
                                  label_trg_,
                                  cfg=args,
                                  name='generator',
                                  is_test=True)
    elif args.model_net == 'CGAN':
        noise = fluid.layers.data(name='noise',
                                  shape=[args.noise_size],
                                  dtype='float32')
        conditions = fluid.layers.data(name='conditions',
                                       shape=[1],
                                       dtype='float32')

        from network.CGAN_network import CGAN_model
        model = CGAN_model(args.n_samples)
        fake = model.network_G(noise, conditions, name="G")
    elif args.model_net == 'DCGAN':
        noise = fluid.layers.data(name='noise',
                                  shape=[args.noise_size],
                                  dtype='float32')

        from network.DCGAN_network import DCGAN_model
        model = DCGAN_model(args.n_samples)
        fake = model.network_G(noise, name="G")
    else:
        raise NotImplementedError("model_net {} is not support".format(
            args.model_net))

    def _compute_start_end(image_name):
        image_name_start = np.array(image_name)[0].astype('int32')
        image_name_end = image_name_start + args.n_samples - 1
        image_name_save = str(np.array(image_name)[0].astype('int32')) + '.jpg'
        print("read {}.jpg ~ {}.jpg".format(image_name_start, image_name_end))
        return image_name_save

    # prepare environment
    place = fluid.CPUPlace()
    if args.use_gpu:
        place = fluid.CUDAPlace(0)
    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())
    for var in fluid.default_main_program().global_block().all_parameters():
        print(var.name)
    print(args.init_model + '/' + model_name)
    fluid.io.load_persistables(exe, os.path.join(args.init_model, model_name))
    print('load params done')
    if not os.path.exists(args.output):
        os.makedirs(args.output)

    attr_names = args.selected_attrs.split(',')

    if args.model_net == 'AttGAN' or args.model_net == 'STGAN':
        test_reader = celeba_reader_creator(image_dir=args.dataset_dir,
                                            list_filename=args.test_list,
                                            args=args,
                                            mode="VAL")
        reader_test = test_reader.make_reader(return_name=True)
        py_reader.decorate_batch_generator(
            reader_test,
            places=fluid.cuda_places() if args.use_gpu else fluid.cpu_places())
        for data in py_reader():
            real_img, label_org, label_trg, image_name = data[0][
                'input'], data[0]['label_org_'], data[0]['label_trg_'], data[
                    0]['image_name']
            image_name_save = _compute_start_end(image_name)
            real_img_temp = save_batch_image(np.array(real_img))
            images = [real_img_temp]
            for i in range(args.c_dim):
                label_trg_tmp = copy.deepcopy(np.array(label_trg))
                for j in range(len(label_trg_tmp)):
                    label_trg_tmp[j][i] = 1.0 - label_trg_tmp[j][i]
                    label_trg_tmp = check_attribute_conflict(
                        label_trg_tmp, attr_names[i], attr_names)
                label_org_tmp = list(
                    map(lambda x: ((x * 2) - 1) * 0.5, np.array(label_org)))
                label_trg_tmp = list(
                    map(lambda x: ((x * 2) - 1) * 0.5, label_trg_tmp))
                if args.model_net == 'AttGAN':
                    for k in range(len(label_trg_tmp)):
                        label_trg_tmp[k][i] = label_trg_tmp[k][i] * 2.0
                tensor_label_org_ = fluid.LoDTensor()
                tensor_label_trg_ = fluid.LoDTensor()
                tensor_label_org_.set(label_org_tmp, place)
                tensor_label_trg_.set(label_trg_tmp, place)
                out = exe.run(feed={
                    "input": real_img,
                    "label_org_": tensor_label_org_,
                    "label_trg_": tensor_label_trg_
                },
                              fetch_list=[fake.name])
                fake_temp = save_batch_image(out[0])
                images.append(fake_temp)
            images_concat = np.concatenate(images, 1)
            if len(np.array(label_org)) > 1:
                images_concat = np.concatenate(images_concat, 1)
            imageio.imwrite(
                os.path.join(args.output, "fake_img_" + image_name_save),
                ((images_concat + 1) * 127.5).astype(np.uint8))
    elif args.model_net == 'StarGAN':
        test_reader = celeba_reader_creator(image_dir=args.dataset_dir,
                                            list_filename=args.test_list,
                                            args=args,
                                            mode="VAL")
        reader_test = test_reader.make_reader(return_name=True)
        py_reader.decorate_batch_generator(
            reader_test,
            places=fluid.cuda_places() if args.use_gpu else fluid.cpu_places())
        for data in py_reader():
            real_img, label_org, label_trg, image_name = data[0][
                'input'], data[0]['label_org_'], data[0]['label_trg_'], data[
                    0]['image_name']
            image_name_save = _compute_start_end(image_name)
            real_img_temp = save_batch_image(np.array(real_img))
            images = [real_img_temp]
            for i in range(args.c_dim):
                label_trg_tmp = copy.deepcopy(np.array(label_org))
                for j in range(len(np.array(label_org))):
                    label_trg_tmp[j][i] = 1.0 - label_trg_tmp[j][i]
                    label_trg_tmp = check_attribute_conflict(
                        label_trg_tmp, attr_names[i], attr_names)
                tensor_label_trg_ = fluid.LoDTensor()
                tensor_label_trg_.set(label_trg_tmp, place)
                out = exe.run(feed={
                    "input": real_img,
                    "label_trg_": tensor_label_trg_
                },
                              fetch_list=[fake.name])
                fake_temp = save_batch_image(out[0])
                images.append(fake_temp)
            images_concat = np.concatenate(images, 1)
            if len(np.array(label_org)) > 1:
                images_concat = np.concatenate(images_concat, 1)
            imageio.imwrite(
                os.path.join(args.output, "fake_img_" + image_name_save),
                ((images_concat + 1) * 127.5).astype(np.uint8))

    elif args.model_net == 'Pix2pix' or args.model_net == 'CycleGAN':
        test_reader = reader_creator(image_dir=args.dataset_dir,
                                     list_filename=args.test_list,
                                     shuffle=False,
                                     batch_size=args.n_samples,
                                     mode="VAL")
        reader_test = test_reader.make_reader(args, return_name=True)
        py_reader.decorate_batch_generator(
            reader_test,
            places=fluid.cuda_places() if args.use_gpu else fluid.cpu_places())
        id2name = test_reader.id2name
        for data in py_reader():
            real_img, image_name = data[0]['input'], data[0]['image_name']
            image_name = id2name[np.array(image_name).astype('int32')[0]]
            print("read: ", image_name)
            fake_temp = exe.run(fetch_list=[fake.name],
                                feed={"input": real_img})
            fake_temp = np.squeeze(fake_temp[0]).transpose([1, 2, 0])
            input_temp = np.squeeze(np.array(real_img)[0]).transpose([1, 2, 0])

            imageio.imwrite(os.path.join(args.output, "fake_" + image_name),
                            ((fake_temp + 1) * 127.5).astype(np.uint8))

    elif args.model_net == 'CGAN':
        noise_data = np.random.uniform(low=-1.0,
                                       high=1.0,
                                       size=[args.n_samples, args.noise_size
                                             ]).astype('float32')
        label = np.random.randint(0, 9, size=[args.n_samples,
                                              1]).astype('float32')
        noise_tensor = fluid.LoDTensor()
        conditions_tensor = fluid.LoDTensor()
        noise_tensor.set(noise_data, place)
        conditions_tensor.set(label, place)
        fake_temp = exe.run(fetch_list=[fake.name],
                            feed={
                                "noise": noise_tensor,
                                "conditions": conditions_tensor
                            })[0]
        fake_image = np.reshape(fake_temp, (args.n_samples, -1))

        fig = utility.plot(fake_image)
        plt.savefig(os.path.join(args.output, 'fake_cgan.png'),
                    bbox_inches='tight')
        plt.close(fig)

    elif args.model_net == 'DCGAN':
        noise_data = np.random.uniform(low=-1.0,
                                       high=1.0,
                                       size=[args.n_samples, args.noise_size
                                             ]).astype('float32')
        noise_tensor = fluid.LoDTensor()
        noise_tensor.set(noise_data, place)
        fake_temp = exe.run(fetch_list=[fake.name],
                            feed={"noise": noise_tensor})[0]
        fake_image = np.reshape(fake_temp, (args.n_samples, -1))

        fig = utility.plot(fake_image)
        plt.savefig(os.path.join(args.output, 'fake_dcgan.png'),
                    bbox_inches='tight')
        plt.close(fig)
    else:
        raise NotImplementedError("model_net {} is not support".format(
            args.model_net))

test_program = fluid.default_main_program().clone(for_test=True)
test(exe, test_reader, test_program)

# In[109]:

# #预测
# 这里要准备预测用的输入数据,格式参考下面的a,这里先用训练数据代替

# 创建并使用 scope
inference_scope = fluid.core.Scope()

with fluid.scope_guard(inference_scope):
    # 加载预测模型
    path = 'model/epoch_49'
    [inference_program, feed_target_names,
     fetch_targets] = fluid.io.load_inference_model(dirname=path, executor=exe)

    for data in train_reader():
        t = fluid.LoDTensor()
        a = np.array(data[0][0], dtype='float32').reshape(64, 1)
        t.set(a, fluid.CPUPlace())
        t.set_lod([[0, 64]])
        result = exe.run(program=inference_program,
                         feed={feed_target_names[0]: t},
                         fetch_list=fetch_targets)
        print(result[0])
        result = [0 if i < 0.43 else 1 for i in result]  #float转int,最后要不要看情况
        print(result)
def infer(args):
    data_shape = [-1, 3, args.image_size, args.image_size]
    input = fluid.layers.data(name='input', shape=data_shape, dtype='float32')
    label_org_ = fluid.layers.data(
        name='label_org_', shape=[args.c_dim], dtype='float32')
    label_trg_ = fluid.layers.data(
        name='label_trg_', shape=[args.c_dim], dtype='float32')

    model_name = 'net_G'
    if args.model_net == 'CycleGAN':
        from network.CycleGAN_network import CycleGAN_model
        model = CycleGAN_model()
        if args.input_style == "A":
            fake = model.network_G(input, name="GA", cfg=args)
        elif args.input_style == "B":
            fake = model.network_G(input, name="GB", cfg=args)
        else:
            raise "Input with style [%s] is not supported." % args.input_style
    elif args.model_net == 'Pix2pix':
        from network.Pix2pix_network import Pix2pix_model
        model = Pix2pix_model()
        fake = model.network_G(input, "generator", cfg=args)
    elif args.model_net == 'StarGAN':
        from network.StarGAN_network import StarGAN_model
        model = StarGAN_model()
        fake = model.network_G(input, label_trg_, name="g_main", cfg=args)
    elif args.model_net == 'STGAN':
        from network.STGAN_network import STGAN_model
        model = STGAN_model()
        fake, _ = model.network_G(
            input,
            label_org_,
            label_trg_,
            cfg=args,
            name='generator',
            is_test=True)
    elif args.model_net == 'AttGAN':
        from network.AttGAN_network import AttGAN_model
        model = AttGAN_model()
        fake, _ = model.network_G(
            input,
            label_org_,
            label_trg_,
            cfg=args,
            name='generator',
            is_test=True)
    elif args.model_net == 'CGAN':
        noise = fluid.layers.data(
            name='noise', shape=[args.noise_size], dtype='float32')
        conditions = fluid.layers.data(
            name='conditions', shape=[1], dtype='float32')

        from network.CGAN_network import CGAN_model
        model = CGAN_model()
        fake = model.network_G(noise, conditions, name="G")
    elif args.model_net == 'DCGAN':
        noise = fluid.layers.data(
            name='noise', shape=[args.noise_size], dtype='float32')

        from network.DCGAN_network import DCGAN_model
        model = DCGAN_model()
        fake = model.network_G(noise, name="G")
    else:
        raise NotImplementedError("model_net {} is not support".format(
            args.model_net))

    # prepare environment
    place = fluid.CPUPlace()
    if args.use_gpu:
        place = fluid.CUDAPlace(0)
    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())
    for var in fluid.default_main_program().global_block().all_parameters():
        print(var.name)
    print(args.init_model + '/' + model_name)
    fluid.io.load_persistables(exe, args.init_model + "/" + model_name)
    print('load params done')
    if not os.path.exists(args.output):
        os.makedirs(args.output)

    attr_names = args.selected_attrs.split(',')

    if args.model_net == 'AttGAN' or args.model_net == 'STGAN':
        test_reader = celeba_reader_creator(
            image_dir=args.dataset_dir,
            list_filename=args.test_list,
            batch_size=args.batch_size,
            drop_last=False,
            args=args)
        reader_test = test_reader.get_test_reader(
            args, shuffle=False, return_name=True)
        for data in zip(reader_test()):
            real_img, label_org, name = data[0]
            print("read {}".format(name))
            label_trg = copy.deepcopy(label_org)
            tensor_img = fluid.LoDTensor()
            tensor_label_org = fluid.LoDTensor()
            tensor_label_trg = fluid.LoDTensor()
            tensor_label_org_ = fluid.LoDTensor()
            tensor_label_trg_ = fluid.LoDTensor()
            tensor_img.set(real_img, place)
            tensor_label_org.set(label_org, place)
            real_img_temp = save_batch_image(real_img)
            images = [real_img_temp]
            for i in range(args.c_dim):
                label_trg_tmp = copy.deepcopy(label_trg)
                for j in range(len(label_org)):
                    label_trg_tmp[j][i] = 1.0 - label_trg_tmp[j][i]
                    label_trg_tmp = check_attribute_conflict(
                        label_trg_tmp, attr_names[i], attr_names)
                label_org_ = list(map(lambda x: ((x * 2) - 1) * 0.5, label_org))
                label_trg_ = list(
                    map(lambda x: ((x * 2) - 1) * 0.5, label_trg_tmp))
                if args.model_net == 'AttGAN':
                    for k in range(len(label_org)):
                        label_trg_[k][i] = label_trg_[k][i] * 2.0
                tensor_label_org_.set(label_org_, place)
                tensor_label_trg.set(label_trg, place)
                tensor_label_trg_.set(label_trg_, place)
                out = exe.run(feed={
                    "input": tensor_img,
                    "label_org_": tensor_label_org_,
                    "label_trg_": tensor_label_trg_
                },
                              fetch_list=[fake.name])
                fake_temp = save_batch_image(out[0])
                images.append(fake_temp)
            images_concat = np.concatenate(images, 1)
            if len(label_org) > 1:
                images_concat = np.concatenate(images_concat, 1)
            imageio.imwrite(args.output + "/fake_img_" + name[0], (
                (images_concat + 1) * 127.5).astype(np.uint8))
    elif args.model_net == 'StarGAN':
        test_reader = celeba_reader_creator(
            image_dir=args.dataset_dir,
            list_filename=args.test_list,
            batch_size=args.batch_size,
            drop_last=False,
            args=args)
        reader_test = test_reader.get_test_reader(
            args, shuffle=False, return_name=True)
        for data in zip(reader_test()):
            real_img, label_org, name = data[0]
            print("read {}".format(name))
            tensor_img = fluid.LoDTensor()
            tensor_label_org = fluid.LoDTensor()
            tensor_img.set(real_img, place)
            tensor_label_org.set(label_org, place)
            real_img_temp = save_batch_image(real_img)
            images = [real_img_temp]
            for i in range(args.c_dim):
                label_trg_tmp = copy.deepcopy(label_org)
                for j in range(len(label_org)):
                    label_trg_tmp[j][i] = 1.0 - label_trg_tmp[j][i]
                    label_trg = check_attribute_conflict(
                        label_trg_tmp, attr_names[i], attr_names)
                tensor_label_trg = fluid.LoDTensor()
                tensor_label_trg.set(label_trg, place)
                out = exe.run(
                    feed={"input": tensor_img,
                          "label_trg_": tensor_label_trg},
                    fetch_list=[fake.name])
                fake_temp = save_batch_image(out[0])
                images.append(fake_temp)
            images_concat = np.concatenate(images, 1)
            if len(label_org) > 1:
                images_concat = np.concatenate(images_concat, 1)
            imageio.imwrite(args.output + "/fake_img_" + name[0], (
                (images_concat + 1) * 127.5).astype(np.uint8))

    elif args.model_net == 'Pix2pix' or args.model_net == 'CycleGAN':
        for file in glob.glob(args.dataset_dir):
            print("read {}".format(file))
            image_name = os.path.basename(file)
            image = Image.open(file).convert('RGB')
            image = image.resize((256, 256), Image.BICUBIC)
            image = np.array(image).transpose([2, 0, 1]).astype('float32')
            image = image / 255.0
            image = (image - 0.5) / 0.5
            data = image[np.newaxis, :]
            tensor = fluid.LoDTensor()
            tensor.set(data, place)

            fake_temp = exe.run(fetch_list=[fake.name], feed={"input": tensor})
            fake_temp = np.squeeze(fake_temp[0]).transpose([1, 2, 0])
            input_temp = np.squeeze(data).transpose([1, 2, 0])

            imageio.imwrite(args.output + "/fake_" + image_name, (
                (fake_temp + 1) * 127.5).astype(np.uint8))

    elif args.model_net == 'CGAN':
        noise_data = np.random.uniform(
            low=-1.0, high=1.0,
            size=[args.batch_size, args.noise_size]).astype('float32')
        label = np.random.randint(
            0, 9, size=[args.batch_size, 1]).astype('float32')
        noise_tensor = fluid.LoDTensor()
        conditions_tensor = fluid.LoDTensor()
        noise_tensor.set(noise_data, place)
        conditions_tensor.set(label, place)
        fake_temp = exe.run(
            fetch_list=[fake.name],
            feed={"noise": noise_tensor,
                  "conditions": conditions_tensor})[0]
        fake_image = np.reshape(fake_temp, (args.batch_size, -1))

        fig = utility.plot(fake_image)
        plt.savefig(args.output + '/fake_cgan.png', bbox_inches='tight')
        plt.close(fig)

    elif args.model_net == 'DCGAN':
        noise_data = np.random.uniform(
            low=-1.0, high=1.0,
            size=[args.batch_size, args.noise_size]).astype('float32')
        noise_tensor = fluid.LoDTensor()
        noise_tensor.set(noise_data, place)
        fake_temp = exe.run(fetch_list=[fake.name],
                            feed={"noise": noise_tensor})[0]
        fake_image = np.reshape(fake_temp, (args.batch_size, -1))

        fig = utility.plot(fake_image)
        plt.savefig(args.output + '/fake_dcgan.png', bbox_inches='tight')
        plt.close(fig)
    else:
        raise NotImplementedError("model_net {} is not support".format(
            args.model_net))
Esempio n. 22
0
def train(args):

    max_images_num = data_reader.max_images_num()
    shuffle = True
    if args.run_ce:
        np.random.seed(10)
        fluid.default_startup_program().random_seed = 90
        max_images_num = 1
        shuffle = False
    data_shape = [-1] + data_reader.image_shape()

    input_A = fluid.layers.data(name='input_A',
                                shape=data_shape,
                                dtype='float32')
    input_B = fluid.layers.data(name='input_B',
                                shape=data_shape,
                                dtype='float32')
    fake_pool_A = fluid.layers.data(name='fake_pool_A',
                                    shape=data_shape,
                                    dtype='float32')
    fake_pool_B = fluid.layers.data(name='fake_pool_B',
                                    shape=data_shape,
                                    dtype='float32')

    g_A_trainer = GATrainer(input_A, input_B)
    g_B_trainer = GBTrainer(input_A, input_B)
    d_A_trainer = DATrainer(input_A, fake_pool_A)
    d_B_trainer = DBTrainer(input_B, fake_pool_B)

    # prepare environment
    place = fluid.CPUPlace()
    if args.use_gpu:
        place = fluid.CUDAPlace(0)
    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())
    A_pool = ImagePool()
    B_pool = ImagePool()

    A_reader = paddle.batch(data_reader.a_reader(shuffle=shuffle),
                            args.batch_size)()
    B_reader = paddle.batch(data_reader.b_reader(shuffle=shuffle),
                            args.batch_size)()
    if not args.run_ce:
        A_test_reader = data_reader.a_test_reader()
        B_test_reader = data_reader.b_test_reader()

    def test(epoch):
        out_path = args.output + "/test"
        if not os.path.exists(out_path):
            os.makedirs(out_path)
        i = 0
        for data_A, data_B in zip(A_test_reader(), B_test_reader()):
            A_name = data_A[1]
            B_name = data_B[1]
            tensor_A = fluid.LoDTensor()
            tensor_B = fluid.LoDTensor()
            tensor_A.set(data_A[0], place)
            tensor_B.set(data_B[0], place)
            fake_A_temp, fake_B_temp, cyc_A_temp, cyc_B_temp = exe.run(
                g_A_trainer.infer_program,
                fetch_list=[
                    g_A_trainer.fake_A, g_A_trainer.fake_B, g_A_trainer.cyc_A,
                    g_A_trainer.cyc_B
                ],
                feed={
                    "input_A": tensor_A,
                    "input_B": tensor_B
                })
            fake_A_temp = np.squeeze(fake_A_temp[0]).transpose([1, 2, 0])
            fake_B_temp = np.squeeze(fake_B_temp[0]).transpose([1, 2, 0])
            cyc_A_temp = np.squeeze(cyc_A_temp[0]).transpose([1, 2, 0])
            cyc_B_temp = np.squeeze(cyc_B_temp[0]).transpose([1, 2, 0])
            input_A_temp = np.squeeze(data_A[0]).transpose([1, 2, 0])
            input_B_temp = np.squeeze(data_B[0]).transpose([1, 2, 0])

            imsave(out_path + "/fakeB_" + str(epoch) + "_" + A_name,
                   ((fake_B_temp + 1) * 127.5).astype(np.uint8))
            imsave(out_path + "/fakeA_" + str(epoch) + "_" + B_name,
                   ((fake_A_temp + 1) * 127.5).astype(np.uint8))
            imsave(out_path + "/cycA_" + str(epoch) + "_" + A_name,
                   ((cyc_A_temp + 1) * 127.5).astype(np.uint8))
            imsave(out_path + "/cycB_" + str(epoch) + "_" + B_name,
                   ((cyc_B_temp + 1) * 127.5).astype(np.uint8))
            imsave(out_path + "/inputA_" + str(epoch) + "_" + A_name,
                   ((input_A_temp + 1) * 127.5).astype(np.uint8))
            imsave(out_path + "/inputB_" + str(epoch) + "_" + B_name,
                   ((input_B_temp + 1) * 127.5).astype(np.uint8))
            i += 1

    def checkpoints(epoch):
        out_path = args.output + "/checkpoints/" + str(epoch)
        if not os.path.exists(out_path):
            os.makedirs(out_path)
        fluid.io.save_persistables(exe,
                                   out_path + "/g_a",
                                   main_program=g_A_trainer.program)
        fluid.io.save_persistables(exe,
                                   out_path + "/g_b",
                                   main_program=g_B_trainer.program)
        fluid.io.save_persistables(exe,
                                   out_path + "/d_a",
                                   main_program=d_A_trainer.program)
        fluid.io.save_persistables(exe,
                                   out_path + "/d_b",
                                   main_program=d_B_trainer.program)
        print("saved checkpoint to {}".format(out_path))
        sys.stdout.flush()

    def init_model():
        assert os.path.exists(
            args.init_model), "[%s] cann't be found." % args.init_mode
        fluid.io.load_persistables(exe,
                                   args.init_model + "/g_a",
                                   main_program=g_A_trainer.program)
        fluid.io.load_persistables(exe,
                                   args.init_model + "/g_b",
                                   main_program=g_B_trainer.program)
        fluid.io.load_persistables(exe,
                                   args.init_model + "/d_a",
                                   main_program=d_A_trainer.program)
        fluid.io.load_persistables(exe,
                                   args.init_model + "/d_b",
                                   main_program=d_B_trainer.program)
        print("Load model from {}".format(args.init_model))

    if args.init_model:
        init_model()
    losses = [[], []]
    t_time = 0
    build_strategy = fluid.BuildStrategy()
    build_strategy.enable_inplace = False
    build_strategy.memory_optimize = False

    exec_strategy = fluid.ExecutionStrategy()
    exec_strategy.num_threads = 1
    exec_strategy.use_experimental_executor = True

    g_A_trainer_program = fluid.CompiledProgram(
        g_A_trainer.program).with_data_parallel(
            loss_name=g_A_trainer.g_loss_A.name,
            build_strategy=build_strategy,
            exec_strategy=exec_strategy)
    g_B_trainer_program = fluid.CompiledProgram(
        g_B_trainer.program).with_data_parallel(
            loss_name=g_B_trainer.g_loss_B.name,
            build_strategy=build_strategy,
            exec_strategy=exec_strategy)
    d_B_trainer_program = fluid.CompiledProgram(
        d_B_trainer.program).with_data_parallel(
            loss_name=d_B_trainer.d_loss_B.name,
            build_strategy=build_strategy,
            exec_strategy=exec_strategy)
    d_A_trainer_program = fluid.CompiledProgram(
        d_A_trainer.program).with_data_parallel(
            loss_name=d_A_trainer.d_loss_A.name,
            build_strategy=build_strategy,
            exec_strategy=exec_strategy)
    for epoch in range(args.epoch):
        batch_id = 0
        for i in range(max_images_num):
            data_A = next(A_reader)
            data_B = next(B_reader)
            tensor_A = fluid.LoDTensor()
            tensor_B = fluid.LoDTensor()
            tensor_A.set(data_A, place)
            tensor_B.set(data_B, place)
            s_time = time.time()
            # optimize the g_A network
            g_A_loss, fake_B_tmp = exe.run(
                g_A_trainer_program,
                fetch_list=[g_A_trainer.g_loss_A, g_A_trainer.fake_B],
                feed={
                    "input_A": tensor_A,
                    "input_B": tensor_B
                })

            fake_pool_B = B_pool.pool_image(fake_B_tmp)

            # optimize the d_B network
            d_B_loss = exe.run(d_B_trainer_program,
                               fetch_list=[d_B_trainer.d_loss_B],
                               feed={
                                   "input_B": tensor_B,
                                   "fake_pool_B": fake_pool_B
                               })[0]

            # optimize the g_B network
            g_B_loss, fake_A_tmp = exe.run(
                g_B_trainer_program,
                fetch_list=[g_B_trainer.g_loss_B, g_B_trainer.fake_A],
                feed={
                    "input_A": tensor_A,
                    "input_B": tensor_B
                })

            fake_pool_A = A_pool.pool_image(fake_A_tmp)

            # optimize the d_A network
            d_A_loss = exe.run(d_A_trainer_program,
                               fetch_list=[d_A_trainer.d_loss_A],
                               feed={
                                   "input_A": tensor_A,
                                   "fake_pool_A": fake_pool_A
                               })[0]
            batch_time = time.time() - s_time
            t_time += batch_time
            print(
                "epoch{}; batch{}; g_A_loss: {}; d_B_loss: {}; g_B_loss: {}; d_A_loss: {}; "
                "Batch_time_cost: {}".format(epoch, batch_id, g_A_loss[0],
                                             d_B_loss[0], g_B_loss[0],
                                             d_A_loss[0], batch_time))
            losses[0].append(g_A_loss[0])
            losses[1].append(d_A_loss[0])
            sys.stdout.flush()
            batch_id += 1

        if args.run_test and not args.run_ce:
            test(epoch)
        if args.save_checkpoints and not args.run_ce:
            checkpoints(epoch)
    if args.run_ce:
        print("kpis,g_train_cost,{}".format(np.mean(losses[0])))
        print("kpis,d_train_cost,{}".format(np.mean(losses[1])))
        print("kpis,duration,{}".format(t_time / args.epoch))
Esempio n. 23
0
def to_lodtensor(data, place, lod=None):
    data_tensor = fluid.LoDTensor()
    data_tensor.set(data, place)
    if lod is not None:
        data_tensor.set_lod(lod)
    return data_tensor
Esempio n. 24
0
predict = fluid.layers.reshape(predict, shape=[-1, 19])
_, predict = fluid.layers.topk(predict, k=1)
predict1 = fluid.layers.reshape(predict,
                                shape=[data_shape[1], data_shape[2],
                                       -1])  # batch_size should be 1
inference_program = fluid.default_main_program().clone(for_test=True)
place = fluid.CPUPlace()
# if args.use_gpu:
#     place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
assert os.path.exists(model_path)
fluid.io.load_params(exe, model_path)
print("loaded model from: %s" % model_path)
# sys.stdout.flush()
# image -= IMG_MEAN
img = paddle.dataset.image.to_chw(image)[np.newaxis, :]
# print(image.shape())
image_t = fluid.LoDTensor()
image_t.set(img, place)
result = exe.run(inference_program,
                 feed={"image": image_t},
                 fetch_list=[predict1])
cv2.imwrite("output" + "/" + "_result.png", result[0])
# image = Image.fromarray(result)  #将之前的矩阵转换为图片
# image.show()            #调用本地软件显示图片,win10是叫照片的工具
q = result[0]
# print(type(q))
print(sum(q))
# image = Image.fromarray(q)  #将之前的矩阵转换为图片
# image.show()            #调用本地软件显示图片,win10是叫照片的工具
Esempio n. 25
0
def infer_from_ckpt(args):
    """Inference by using checkpoint."""

    if not os.path.exists(args.checkpoint):
        raise IOError("Invalid checkpoint!")

    feature = fluid.data(name='feature',
                         shape=[None, 3, 11, args.frame_dim],
                         dtype='float32',
                         lod_level=1)
    label = fluid.data(name='label',
                       shape=[None, 1],
                       dtype='int64',
                       lod_level=1)

    prediction, avg_cost, accuracy = stacked_lstmp_model(
        feature=feature,
        label=label,
        hidden_dim=args.hidden_dim,
        proj_dim=args.proj_dim,
        stacked_num=args.stacked_num,
        class_num=args.class_num,
        parallel=args.parallel)

    infer_program = fluid.default_main_program().clone()

    # optimizer, placeholder
    optimizer = fluid.optimizer.Adam(
        learning_rate=fluid.layers.exponential_decay(learning_rate=0.0001,
                                                     decay_steps=1879,
                                                     decay_rate=1 / 1.2,
                                                     staircase=True))
    optimizer.minimize(avg_cost)

    place = fluid.CPUPlace() if args.device == 'CPU' else fluid.CUDAPlace(0)
    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())

    # load checkpoint.
    fluid.io.load_persistables(exe, args.checkpoint)

    # init decoder
    decoder = Decoder(args.trans_model, args.vocabulary, args.graphs,
                      args.log_prior, args.beam_size, args.acoustic_scale)

    ltrans = [
        trans_add_delta.TransAddDelta(2, 2),
        trans_mean_variance_norm.TransMeanVarianceNorm(args.mean_var),
        trans_splice.TransSplice(5, 5),
        trans_delay.TransDelay(5)
    ]

    feature_t = fluid.LoDTensor()
    label_t = fluid.LoDTensor()

    # infer data reader
    infer_data_reader = reader.AsyncDataReader(args.infer_feature_lst,
                                               drop_frame_len=-1,
                                               split_sentence_threshold=-1)
    infer_data_reader.set_transformers(ltrans)

    decoding_result_writer = DecodingResultWriter(args.decode_to_path)
    post_matrix_writer = None if args.post_matrix_path is None \
                         else PostMatrixWriter(args.post_matrix_path)

    for batch_id, batch_data in enumerate(
            infer_data_reader.batch_iterator(args.batch_size,
                                             args.minimum_batch_size)):
        # load_data
        (features, labels, lod, name_lst) = batch_data
        features = np.reshape(features, (-1, 11, 3, args.frame_dim))
        features = np.transpose(features, (0, 2, 1, 3))
        feature_t.set(features, place)
        feature_t.set_lod([lod])
        label_t.set(labels, place)
        label_t.set_lod([lod])

        results = exe.run(infer_program,
                          feed={
                              "feature": feature_t,
                              "label": label_t
                          },
                          fetch_list=[prediction, avg_cost, accuracy],
                          return_numpy=False)

        probs, lod = lodtensor_to_ndarray(results[0])
        infer_batch = split_infer_result(probs, lod)

        print("Decoding batch %d ..." % batch_id)
        decoded = decoder.decode_batch(name_lst, infer_batch, args.num_threads)

        decoding_result_writer.write(decoded)

        if args.post_matrix_path is not None:
            post_matrix_writer.write(name_lst, infer_batch)
Esempio n. 26
0
    def build_model(self):
        data_shape = [-1, 3, self.cfg.crop_size, self.cfg.crop_size]

        input_A = fluid.layers.data(name='input_A',
                                    shape=data_shape,
                                    dtype='float32')
        input_B = fluid.layers.data(name='input_B',
                                    shape=data_shape,
                                    dtype='float32')
        fake_pool_A = fluid.layers.data(name='fake_pool_A',
                                        shape=data_shape,
                                        dtype='float32')
        fake_pool_B = fluid.layers.data(name='fake_pool_B',
                                        shape=data_shape,
                                        dtype='float32')

        gen_trainer = GTrainer(input_A, input_B, self.cfg, self.batch_num)
        d_A_trainer = DATrainer(input_B, fake_pool_B, self.cfg, self.batch_num)
        d_B_trainer = DBTrainer(input_A, fake_pool_A, self.cfg, self.batch_num)

        # prepare environment
        place = fluid.CUDAPlace(0) if self.cfg.use_gpu else fluid.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())

        A_pool = utility.ImagePool()
        B_pool = utility.ImagePool()

        if self.cfg.init_model:
            utility.init_checkpoints(self.cfg, exe, gen_trainer, "net_G")
            utility.init_checkpoints(self.cfg, exe, d_A_trainer, "net_DA")
            utility.init_checkpoints(self.cfg, exe, d_B_trainer, "net_DB")

        ### memory optim
        build_strategy = fluid.BuildStrategy()
        build_strategy.enable_inplace = False
        build_strategy.memory_optimize = False

        gen_trainer_program = fluid.CompiledProgram(
            gen_trainer.program).with_data_parallel(
                loss_name=gen_trainer.g_loss.name,
                build_strategy=build_strategy)
        d_A_trainer_program = fluid.CompiledProgram(
            d_A_trainer.program).with_data_parallel(
                loss_name=d_A_trainer.d_loss_A.name,
                build_strategy=build_strategy)
        d_B_trainer_program = fluid.CompiledProgram(
            d_B_trainer.program).with_data_parallel(
                loss_name=d_B_trainer.d_loss_B.name,
                build_strategy=build_strategy)

        losses = [[], []]
        t_time = 0

        for epoch_id in range(self.cfg.epoch):
            batch_id = 0
            for i in range(self.batch_num):
                data_A = next(self.A_reader())
                data_B = next(self.B_reader())
                tensor_A = fluid.LoDTensor()
                tensor_B = fluid.LoDTensor()
                tensor_A.set(data_A, place)
                tensor_B.set(data_B, place)
                s_time = time.time()
                # optimize the g_A network
                g_A_loss, g_A_cyc_loss, g_A_idt_loss, g_B_loss, g_B_cyc_loss,\
                g_B_idt_loss, fake_A_tmp, fake_B_tmp = exe.run(
                    gen_trainer_program,
                    fetch_list=[
                        gen_trainer.G_A, gen_trainer.cyc_A_loss,
                        gen_trainer.idt_loss_A, gen_trainer.G_B,
                        gen_trainer.cyc_B_loss, gen_trainer.idt_loss_B,
                        gen_trainer.fake_A, gen_trainer.fake_B
                    ],
                    feed={"input_A": tensor_A,
                          "input_B": tensor_B})

                fake_pool_B = B_pool.pool_image(fake_B_tmp)
                fake_pool_A = A_pool.pool_image(fake_A_tmp)

                # optimize the d_A network
                d_A_loss = exe.run(d_A_trainer_program,
                                   fetch_list=[d_A_trainer.d_loss_A],
                                   feed={
                                       "input_B": tensor_B,
                                       "fake_pool_B": fake_pool_B
                                   })[0]

                # optimize the d_B network
                d_B_loss = exe.run(d_B_trainer_program,
                                   fetch_list=[d_B_trainer.d_loss_B],
                                   feed={
                                       "input_A": tensor_A,
                                       "fake_pool_A": fake_pool_A
                                   })[0]

                batch_time = time.time() - s_time
                t_time += batch_time
                if batch_id % self.cfg.print_freq == 0:
                    print("epoch{}: batch{}: \n\
                         d_A_loss: {}; g_A_loss: {}; g_A_cyc_loss: {}; g_A_idt_loss: {}; \n\
                         d_B_loss: {}; g_B_loss: {}; g_B_cyc_loss: {}; g_B_idt_loss: {}; \n\
                         Batch_time_cost: {:.2f}".format(
                        epoch_id, batch_id, d_A_loss[0], g_A_loss[0],
                        g_A_cyc_loss[0], g_A_idt_loss[0], d_B_loss[0],
                        g_B_loss[0], g_B_cyc_loss[0], g_B_idt_loss[0],
                        batch_time))

                losses[0].append(g_A_loss[0])
                losses[1].append(d_A_loss[0])
                sys.stdout.flush()
                batch_id += 1

            if self.cfg.run_test:
                test_program = gen_trainer.infer_program
                utility.save_test_image(epoch_id, self.cfg, exe, place,
                                        test_program, gen_trainer,
                                        self.A_test_reader, self.B_test_reader)

            if self.cfg.save_checkpoints:
                utility.checkpoints(epoch_id, self.cfg, exe, gen_trainer,
                                    "net_G")
                utility.checkpoints(epoch_id, self.cfg, exe, d_A_trainer,
                                    "net_DA")
                utility.checkpoints(epoch_id, self.cfg, exe, d_B_trainer,
                                    "net_DB")
Esempio n. 27
0
def save_test_image(epoch,
                    cfg,
                    exe,
                    place,
                    test_program,
                    g_trainer,
                    A_test_reader,
                    B_test_reader=None,
                    A_id2name=None,
                    B_id2name=None):
    out_path = os.path.join(cfg.output, 'test')
    if not os.path.exists(out_path):
        os.makedirs(out_path)
    if cfg.model_net == "Pix2pix":
        for data in A_test_reader():
            A_data, B_data, image_name = data[0]['input_A'], data[0][
                'input_B'], data[0]['image_name']
            fake_B_temp = exe.run(test_program,
                                  fetch_list=[g_trainer.fake_B],
                                  feed={
                                      "input_A": A_data,
                                      "input_B": B_data
                                  })
            fake_B_temp = np.squeeze(fake_B_temp[0]).transpose([1, 2, 0])
            input_A_temp = np.squeeze(np.array(A_data)[0]).transpose([1, 2, 0])
            input_B_temp = np.squeeze(np.array(A_data)[0]).transpose([1, 2, 0])

            fakeB_name = "fakeB_" + str(epoch) + "_" + A_id2name[np.array(
                image_name).astype('int32')[0]]
            inputA_name = "inputA_" + str(epoch) + "_" + A_id2name[np.array(
                image_name).astype('int32')[0]]
            inputB_name = "inputB_" + str(epoch) + "_" + A_id2name[np.array(
                image_name).astype('int32')[0]]
            imageio.imwrite(os.path.join(out_path, fakeB_name),
                            ((fake_B_temp + 1) * 127.5).astype(np.uint8))
            imageio.imwrite(os.path.join(out_path, inputA_name),
                            ((input_A_temp + 1) * 127.5).astype(np.uint8))
            imageio.imwrite(os.path.join(out_path, inputB_name),
                            ((input_B_temp + 1) * 127.5).astype(np.uint8))
    elif cfg.model_net == "StarGAN":
        for data in A_test_reader():
            real_img, label_org, label_trg, image_name = data[0][
                'image_real'], data[0]['label_org'], data[0][
                    'label_trg'], data[0]['image_name']
            attr_names = cfg.selected_attrs.split(',')
            real_img_temp = save_batch_image(np.array(real_img))
            images = [real_img_temp]
            for i in range(cfg.c_dim):
                label_trg_tmp = copy.deepcopy(np.array(label_org))
                for j in range(len(np.array(label_org))):
                    label_trg_tmp[j][i] = 1.0 - label_trg_tmp[j][i]
                    np_label_trg = check_attribute_conflict(
                        label_trg_tmp, attr_names[i], attr_names)
                label_trg.set(np_label_trg, place)
                fake_temp, rec_temp = exe.run(
                    test_program,
                    feed={
                        "image_real": real_img,
                        "label_org": label_org,
                        "label_trg": label_trg
                    },
                    fetch_list=[g_trainer.fake_img, g_trainer.rec_img])
                fake_temp = save_batch_image(fake_temp)
                rec_temp = save_batch_image(rec_temp)
                images.append(fake_temp)
                images.append(rec_temp)
            images_concat = np.concatenate(images, 1)
            if len(np.array(label_org)) > 1:
                images_concat = np.concatenate(images_concat, 1)
            image_name_save = "fake_img" + str(epoch) + "_" + str(
                np.array(image_name)[0].astype('int32')) + '.jpg'
            imageio.imwrite(os.path.join(out_path, image_name_save),
                            ((images_concat + 1) * 127.5).astype(np.uint8))
    elif cfg.model_net == 'AttGAN' or cfg.model_net == 'STGAN':
        for data in A_test_reader():
            real_img, label_org, label_trg, image_name = data[0][
                'image_real'], data[0]['label_org'], data[0][
                    'label_trg'], data[0]['image_name']
            attr_names = cfg.selected_attrs.split(',')
            real_img_temp = save_batch_image(np.array(real_img))
            images = [real_img_temp]
            for i in range(cfg.c_dim):
                label_trg_tmp = copy.deepcopy(np.array(label_trg))

                for j in range(len(label_trg_tmp)):
                    label_trg_tmp[j][i] = 1.0 - label_trg_tmp[j][i]
                    label_trg_tmp = check_attribute_conflict(
                        label_trg_tmp, attr_names[i], attr_names)

                label_org_tmp = list(
                    map(lambda x: ((x * 2) - 1) * 0.5, np.array(label_org)))
                label_trg_tmp = list(
                    map(lambda x: ((x * 2) - 1) * 0.5, label_trg_tmp))

                if cfg.model_net == 'AttGAN':
                    for k in range(len(label_trg_tmp)):
                        label_trg_tmp[k][i] = label_trg_tmp[k][i] * 2.0
                tensor_label_org_ = fluid.LoDTensor()
                tensor_label_org_.set(label_org_tmp, place)
                tensor_label_trg_ = fluid.LoDTensor()
                tensor_label_trg_.set(label_trg_tmp, place)

                out = exe.run(test_program,
                              feed={
                                  "image_real": real_img,
                                  "label_org": label_org,
                                  "label_org_": tensor_label_org_,
                                  "label_trg": label_trg,
                                  "label_trg_": tensor_label_trg_
                              },
                              fetch_list=[g_trainer.fake_img])
                fake_temp = save_batch_image(out[0])
                images.append(fake_temp)
            images_concat = np.concatenate(images, 1)
            if len(label_trg_tmp) > 1:
                images_concat = np.concatenate(images_concat, 1)
            image_name_save = 'fake_img_' + str(epoch) + '_' + str(
                np.array(image_name)[0].astype('int32')) + '.jpg'
            image_path = os.path.join(out_path, image_name_save)
            imageio.imwrite(image_path,
                            ((images_concat + 1) * 127.5).astype(np.uint8))

    else:
        for data_A, data_B in zip(A_test_reader(), B_test_reader()):
            A_data, A_name = data_A[0]['input_A'], data_A[0]['A_image_name']
            B_data, B_name = data_B[0]['input_B'], data_B[0]['B_image_name']
            fake_A_temp, fake_B_temp, cyc_A_temp, cyc_B_temp = exe.run(
                test_program,
                fetch_list=[
                    g_trainer.fake_A, g_trainer.fake_B, g_trainer.cyc_A,
                    g_trainer.cyc_B
                ],
                feed={
                    "input_A": A_data,
                    "input_B": B_data
                })
            fake_A_temp = np.squeeze(fake_A_temp[0]).transpose([1, 2, 0])
            fake_B_temp = np.squeeze(fake_B_temp[0]).transpose([1, 2, 0])
            cyc_A_temp = np.squeeze(cyc_A_temp[0]).transpose([1, 2, 0])
            cyc_B_temp = np.squeeze(cyc_B_temp[0]).transpose([1, 2, 0])
            input_A_temp = np.squeeze(np.array(A_data)).transpose([1, 2, 0])
            input_B_temp = np.squeeze(np.array(B_data)).transpose([1, 2, 0])

            fakeA_name = "fakeA_" + str(epoch) + "_" + A_id2name[np.array(
                A_name).astype('int32')[0]]
            fakeB_name = "fakeB_" + str(epoch) + "_" + B_id2name[np.array(
                B_name).astype('int32')[0]]
            inputA_name = "inputA_" + str(epoch) + "_" + A_id2name[np.array(
                A_name).astype('int32')[0]]
            inputB_name = "inputB_" + str(epoch) + "_" + B_id2name[np.array(
                B_name).astype('int32')[0]]
            cycA_name = "cycA_" + str(epoch) + "_" + A_id2name[np.array(
                A_name).astype('int32')[0]]
            cycB_name = "cycB_" + str(epoch) + "_" + B_id2name[np.array(
                B_name).astype('int32')[0]]
            imageio.imwrite(os.path.join(out_path, fakeB_name),
                            ((fake_B_temp + 1) * 127.5).astype(np.uint8))
            imageio.imwrite(os.path.join(out_path, fakeA_name),
                            ((fake_A_temp + 1) * 127.5).astype(np.uint8))
            imageio.imwrite(os.path.join(out_path, cycA_name),
                            ((cyc_A_temp + 1) * 127.5).astype(np.uint8))
            imageio.imwrite(os.path.join(out_path, cycB_name),
                            ((cyc_B_temp + 1) * 127.5).astype(np.uint8))
            imageio.imwrite(os.path.join(out_path, inputA_name),
                            ((input_A_temp + 1) * 127.5).astype(np.uint8))
            imageio.imwrite(os.path.join(out_path, inputB_name),
                            ((input_B_temp + 1) * 127.5).astype(np.uint8))
Esempio n. 28
0
    def build_model(self):
        data_shape = [-1, 3, self.cfg.image_size, self.cfg.image_size]

        image_real = fluid.layers.data(
            name='image_real', shape=data_shape, dtype='float32')
        label_org = fluid.layers.data(
            name='label_org', shape=[self.cfg.c_dim], dtype='float32')
        label_trg = fluid.layers.data(
            name='label_trg', shape=[self.cfg.c_dim], dtype='float32')
        gen_trainer = GTrainer(image_real, label_org, label_trg, self.cfg,
                               self.batch_num)
        dis_trainer = DTrainer(image_real, label_org, label_trg, self.cfg,
                               self.batch_num)

        # prepare environment
        place = fluid.CUDAPlace(0) if self.cfg.use_gpu else fluid.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())

        if self.cfg.init_model:
            utility.init_checkpoints(self.cfg, exe, gen_trainer, "net_G")
            utility.init_checkpoints(self.cfg, exe, dis_trainer, "net_D")

        ### memory optim
        build_strategy = fluid.BuildStrategy()
        build_strategy.enable_inplace = False
        build_strategy.memory_optimize = False

        gen_trainer_program = fluid.CompiledProgram(
            gen_trainer.program).with_data_parallel(
                loss_name=gen_trainer.g_loss.name,
                build_strategy=build_strategy)
        dis_trainer_program = fluid.CompiledProgram(
            dis_trainer.program).with_data_parallel(
                loss_name=dis_trainer.d_loss.name,
                build_strategy=build_strategy)

        t_time = 0

        for epoch_id in range(self.cfg.epoch):
            batch_id = 0
            for i in range(self.batch_num):
                image, label_org = next(self.train_reader())
                label_trg = copy.deepcopy(label_org)
                np.random.shuffle(label_trg)

                tensor_img = fluid.LoDTensor()
                tensor_label_org = fluid.LoDTensor()
                tensor_label_trg = fluid.LoDTensor()
                tensor_img.set(image, place)
                tensor_label_org.set(label_org, place)
                tensor_label_trg.set(label_trg, place)
                s_time = time.time()
                # optimize the discriminator network
                d_loss_real, d_loss_fake, d_loss, d_loss_cls, d_loss_gp = exe.run(
                    dis_trainer_program,
                    fetch_list=[
                        dis_trainer.d_loss_real, dis_trainer.d_loss_fake,
                        dis_trainer.d_loss, dis_trainer.d_loss_cls,
                        dis_trainer.d_loss_gp
                    ],
                    feed={
                        "image_real": tensor_img,
                        "label_org": tensor_label_org,
                        "label_trg": tensor_label_trg
                    })
                # optimize the generator network
                if (batch_id + 1) % self.cfg.n_critic == 0:
                    g_loss_fake, g_loss_rec, g_loss_cls, fake_img, rec_img = exe.run(
                        gen_trainer_program,
                        fetch_list=[
                            gen_trainer.g_loss_fake, gen_trainer.g_loss_rec,
                            gen_trainer.g_loss_cls, gen_trainer.fake_img,
                            gen_trainer.rec_img
                        ],
                        feed={
                            "image_real": tensor_img,
                            "label_org": tensor_label_org,
                            "label_trg": tensor_label_trg
                        })
                    print("epoch{}: batch{}: \n\
                         g_loss_fake: {}; g_loss_rec: {}; g_loss_cls: {}"
                          .format(epoch_id, batch_id, g_loss_fake[0],
                                  g_loss_rec[0], g_loss_cls[0]))

                batch_time = time.time() - s_time
                t_time += batch_time
                if batch_id % self.cfg.print_freq == 0:
                    print("epoch{}: batch{}: \n\
                         d_loss_real: {}; d_loss_fake: {}; d_loss_cls: {}; d_loss_gp: {} \n\
                         Batch_time_cost: {:.2f}".format(
                        epoch_id, batch_id, d_loss_real[0], d_loss_fake[
                            0], d_loss_cls[0], d_loss_gp[0], batch_time))

                sys.stdout.flush()
                batch_id += 1

            if self.cfg.run_test:
                test_program = gen_trainer.infer_program
                utility.save_test_image(epoch_id, self.cfg, exe, place,
                                        test_program, gen_trainer,
                                        self.test_reader)

            if self.cfg.save_checkpoints:
                utility.checkpoints(epoch_id, self.cfg, exe, gen_trainer,
                                    "net_G")
                utility.checkpoints(epoch_id, self.cfg, exe, dis_trainer,
                                    "net_D")
def set_init_lod(data, lod, place):
    res = fluid.LoDTensor()
    res.set(data, place)
    res.set_lod(lod)
    return res
Esempio n. 30
0
    def build_model(self):
        data_shape = [-1, 3, self.cfg.crop_size, self.cfg.crop_size]

        input_A = fluid.layers.data(name='input_A',
                                    shape=data_shape,
                                    dtype='float32')
        input_B = fluid.layers.data(name='input_B',
                                    shape=data_shape,
                                    dtype='float32')
        input_fake = fluid.layers.data(name='input_fake',
                                       shape=data_shape,
                                       dtype='float32')

        gen_trainer = GTrainer(input_A, input_B, self.cfg, self.batch_num)
        dis_trainer = DTrainer(input_A, input_B, input_fake, self.cfg,
                               self.batch_num)

        # prepare environment
        place = fluid.CUDAPlace(0) if self.cfg.use_gpu else fluid.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())

        if self.cfg.init_model:
            utility.init_checkpoints(self.cfg, exe, gen_trainer, "net_G")
            utility.init_checkpoints(self.cfg, exe, dis_trainer, "net_D")

        ### memory optim
        build_strategy = fluid.BuildStrategy()
        build_strategy.enable_inplace = False
        build_strategy.memory_optimize = False

        gen_trainer_program = fluid.CompiledProgram(
            gen_trainer.program).with_data_parallel(
                loss_name=gen_trainer.g_loss.name,
                build_strategy=build_strategy)
        dis_trainer_program = fluid.CompiledProgram(
            dis_trainer.program).with_data_parallel(
                loss_name=dis_trainer.d_loss.name,
                build_strategy=build_strategy)

        t_time = 0

        for epoch_id in range(self.cfg.epoch):
            batch_id = 0
            for i in range(self.batch_num):
                data_A, data_B = next(self.train_reader())
                tensor_A = fluid.LoDTensor()
                tensor_B = fluid.LoDTensor()
                tensor_A.set(data_A, place)
                tensor_B.set(data_B, place)
                s_time = time.time()
                # optimize the generator network
                g_loss_gan, g_loss_l1, fake_B_tmp = exe.run(
                    gen_trainer_program,
                    fetch_list=[
                        gen_trainer.g_loss_gan, gen_trainer.g_loss_L1,
                        gen_trainer.fake_B
                    ],
                    feed={
                        "input_A": tensor_A,
                        "input_B": tensor_B
                    })

                # optimize the discriminator network
                d_loss_real, d_loss_fake = exe.run(dis_trainer_program,
                                                   fetch_list=[
                                                       dis_trainer.d_loss_real,
                                                       dis_trainer.d_loss_fake
                                                   ],
                                                   feed={
                                                       "input_A": tensor_A,
                                                       "input_B": tensor_B,
                                                       "input_fake": fake_B_tmp
                                                   })

                batch_time = time.time() - s_time
                t_time += batch_time
                if batch_id % self.cfg.print_freq == 0:
                    print("epoch{}: batch{}: \n\
                         g_loss_gan: {}; g_loss_l1: {}; \n\
                         d_loss_real: {}; d_loss_fake: {}; \n\
                         Batch_time_cost: {:.2f}".format(
                        epoch_id, batch_id, g_loss_gan[0], g_loss_l1[0],
                        d_loss_real[0], d_loss_fake[0], batch_time))

                sys.stdout.flush()
                batch_id += 1

            if self.cfg.run_test:
                test_program = gen_trainer.infer_program
                utility.save_test_image(epoch_id, self.cfg, exe, place,
                                        test_program, gen_trainer,
                                        self.test_reader)

            if self.cfg.save_checkpoints:
                utility.checkpoints(epoch_id, self.cfg, exe, gen_trainer,
                                    "net_G")
                utility.checkpoints(epoch_id, self.cfg, exe, dis_trainer,
                                    "net_D")