def test_image(self,
                   image,
                   save_path,
                   model_path,
                   psnr=False,
                   real_image_path=None):
        func_config = flow.FunctionConfig()
        func_config.default_data_type(flow.float)
        flow.config.gpu_device_num(self.gpu_num_per_node)

        @flow.global_function(type="predict", function_config=func_config)
        def eval_generator(input: tp.Numpy.Placeholder(
            (1, 3, H, W))) -> tp.Numpy:
            g_out = self.Generator(input, trainable=True)
            return g_out

        flow.load_variables(flow.checkpoint.get(model_path))
        if image.shape != (1, 3, H, W):
            image = np.expand_dims(image, axis=0)
        result = eval_generator(image)
        self.save_image(result[0], save_path)

        if psnr:
            pytorch_result = load_image(real_image_path)[0].transpose(1, 2, 0)
            pytorch_result = np.expand_dims(pytorch_result, axis=0)
            print(self.psnr(pytorch_result, result.transpose(0, 2, 3, 1)))

        flow.clear_default_session()
def main(args):
    input_image = load_image(args.input_image_path)
    height = input_image.shape[2]
    width = input_image.shape[3]

    flow.env.init()

    @flow.global_function("predict", get_predict_config())
    def PredictNet(image: tp.Numpy.Placeholder(
        (1, 3, height, width), dtype=flow.float32)) -> tp.Numpy:
        style_out = style_model.styleNet(image, trainable=True)
        return style_out

    flow.load_variables(flow.checkpoint.get(args.model_load_dir))

    import datetime
    a = datetime.datetime.now()

    style_out = PredictNet(input_image)

    b = datetime.datetime.now()
    c = b - a

    print("time: %s ms, height: %d, width: %d" %
          (c.microseconds / 1000, height, width))

    cv2.imwrite(args.output_image_path, recover_image(style_out))
Exemple #3
0
def _TestLoadCorrectness(test_case, model_getter, dtype, legacy_api):
    """
    Save weights by legacy model io, load weights by new model io,
    and check the equality.
    """
    with tempfile.TemporaryDirectory() as save_dir:
        refresh_session()
        flow.config.enable_legacy_model_io(True)

        large1 = get_checkpoint_ready_model(model_getter, dtype)

        check_point = flow.train.CheckPoint()
        check_point.init()

        check_point.save(save_dir)
        res1 = large1()

        flow.clear_default_session()
        flow.config.gpu_device_num(4)
        flow.config.enable_legacy_model_io(False)

        large2 = get_checkpoint_ready_model(model_getter, dtype)

        if legacy_api:
            check_point = flow.train.CheckPoint()
            check_point.load(save_dir)
        else:
            vars_in_file = flow.checkpoint.get(save_dir)
            flow.load_variables(vars_in_file)

        res2 = large2()

        test_case.assertTrue(np.array_equal(res1, res2))
Exemple #4
0
def _TestMixedModel(test_case, dtype):
    with tempfile.TemporaryDirectory(
    ) as save_dir1, tempfile.TemporaryDirectory() as save_dir2:

        def get_variable(name):
            return flow.get_variable(
                name=name,
                shape=(10, 80, 40, 20),
                dtype=dtype,
                initializer=flow.random_normal_initializer(mean=10, stddev=1),
                distribute=flow.distribute.split(0),
            )

        def get_part_of_mixed_model(dtype):
            @flow.global_function()
            def model() -> tp.Numpy:
                with get_placement():
                    x = get_variable("x")
                    return x

            return model

        def get_mixed_model(dtype):
            @flow.global_function()
            def model() -> tp.Numpy:
                with get_placement():
                    x1 = get_variable("x_from_model1")
                    x2 = get_variable("x_from_model2")
                    return x1 + x2

            return model

        refresh_session()
        model1 = get_checkpoint_ready_model(get_part_of_mixed_model, dtype)
        flow.checkpoint.save(save_dir1)

        refresh_session()
        model2 = get_checkpoint_ready_model(get_part_of_mixed_model, dtype)
        flow.checkpoint.save(save_dir2)

        refresh_session()
        mixed_model = get_checkpoint_ready_model(get_mixed_model, dtype)
        var_dict_from_model1 = flow.checkpoint.get(save_dir1)
        var_dict_from_model2 = flow.checkpoint.get(save_dir2)
        new_var_dict = {}
        for key, val in var_dict_from_model1.items():
            new_var_dict["{}_from_model1".format(key)] = val
        for key, val in var_dict_from_model2.items():
            new_var_dict["{}_from_model2".format(key)] = val
        flow.load_variables(new_var_dict)
        res = mixed_model()
        test_case.assertTrue(
            np.allclose(
                res,
                var_dict_from_model1["x"].numpy() +
                var_dict_from_model2["x"].numpy(),
            ))
Exemple #5
0
def _TestAssignmentBetweenMemory(test_case, dtype):
    refresh_session()

    model = get_checkpoint_ready_model(get_simple_model, dtype)
    all_vars = flow.get_all_variables()
    flow.load_variables({"x": all_vars["z"]})
    flow_res = model()
    np_res = all_vars["z"].numpy() * 2 + all_vars["y"].numpy()
    test_case.assertTrue(np.allclose(flow_res, np_res))
def main():
    flow.load_variables(flow.checkpoint.get("./mlp_models_1"))
    (train_images, train_labels), (test_images, test_labels) = flow.data.load_mnist(
        BATCH_SIZE
    )
    for epoch in range(1):
        for i, (images, labels) in enumerate(zip(test_images, test_labels)):
            eval_job(images, labels)(acc)

    print("accuracy: {0:.1f}%".format(g_correct * 100 / g_total))
Exemple #7
0
def main(filename):
    flow.load_variables(flow.checkpoint.get(cfg.TEST.WEIGHT_FILE))

    image = cv2.imread(filename)
    bboxes = predict(image)
    image = utils.draw_bbox(image, bboxes, [
        'car',
    ])
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    image = Image.fromarray(image)
    image.show()
def main():
    if len(sys.argv) != 2:
        usage()
        return
    flow.load_variables(flow.checkpoint.get("./lenet_models_1"))

    image = load_image(sys.argv[1])
    logits = test_job(image)

    prediction = np.argmax(logits, 1)
    print("prediction: {}".format(prediction[0]))
Exemple #9
0
def main():
    if len(sys.argv) != 2:
        usage()
        return
    flow.load_variables(flow.checkpoint.get("./lenet_models_1"))

    image = load_image(sys.argv[1])
    logits = eval_job(image, np.zeros((1, )).astype(np.int32))

    prediction = np.argmax(logits, 1)
    print("prediction: {}".format(prediction[0]))
Exemple #10
0
    def test_style_model(self):
        init_env()
        input_image = load_image(self.INPUT_IMAGE_FILE)
        image_height, image_width = input_image.shape[2:]
        style_transfer = make_style_transfer(image_height, image_width)
        flow.load_variables(flow.checkpoint.get(self.CHECKPOINT_DIR))

        # save
        saved_model_path = "style_models"
        model_version = 1
        saved_model_version_dir = os.path.join(saved_model_path, str(model_version))
        if not os.path.exists(saved_model_version_dir):
            saved_model_builder = (
                flow.saved_model.ModelBuilder(saved_model_path)
                .ModelName("style_transfer")
                .Version(model_version)
            )
            saved_model_builder.AddFunction(style_transfer).Finish()
            saved_model_builder.Save()

        flow.clear_default_session()

        # load
        sess = flow.serving.InferenceSession()
        sess.load_saved_model(saved_model_path)
        sess.launch()

        job_names = sess.list_jobs()
        print("job names:", job_names)
        input_names = sess.list_inputs()
        print("input names:", input_names)
        for input_name in input_names:
            print(
                'input "{}" info: {}'.format(
                    input_name, sess.input_info(input_name, job_names[0])
                )
            )
        output_names = sess.list_outputs()
        print("output names:", output_names)
        for output_name in output_names:
            print(
                'input "{}" info: {}'.format(
                    output_name, sess.output_info(output_name, job_names[0])
                )
            )

        input_dict = {input_names[0]: input_image}
        outputs = sess.run(style_transfer.__name__, **input_dict)
        if self.OUTPUT_IMAGE_FILE is not None:
            cv2.imwrite(self.OUTPUT_IMAGE_FILE, recover_image(outputs[0]))
            print("write styled output image to", self.OUTPUT_IMAGE_FILE)

        sess.close()
Exemple #11
0
def train(args):
    util.init_env(args)
    util.init_config(args)
    gpt2_trainer = make_gpt2_train_func(args)
    snapshot = None
    iteration = 0

    if args.model_save_dir is not None:
        snapshot = util.Snapshot(args.log_dir, args.model_save_dir)

    if args.model_load_dir is not None:
        print(f"Loading model from {args.model_load_dir}")
        var_dict = flow.checkpoint.get(args.model_load_dir)
        flow.load_variables(var_dict)
        train_step_variable_name = f"System-Train-TrainStep-{gpt2_trainer.__name__}"
        if train_step_variable_name in var_dict:
            iteration = var_dict[train_step_variable_name].numpy().item()

    print("Loading dataset...")
    enc = get_encoder(args)
    chunks = load_dataset(enc,
                          args.dataset,
                          args.combine,
                          encoding=args.encoding)
    data_sampler = Sampler(chunks, seed=1)
    print("dataset has", data_sampler.total_size, "tokens")

    metric = util.Metric(
        desc="train",
        print_steps=args.loss_print_every_n_iter,
        batch_size=args.batch_size,
        keys=["loss"],
        print_format=args.metric_print_format,
    )

    print("Training...")
    try:
        while iteration < args.iter_num:
            b = data_sampler.sample_batch(args.batch_size, args.seq_len)
            gpt2_trainer(b).async_get(metric.metric_cb(iteration))

            if (snapshot is not None and args.model_save_every_n_iter > 0
                    and (iteration + 1) % args.model_save_every_n_iter == 0):
                snapshot.save(f"iter{iteration+1}_snapshot")

            iteration += 1

        if snapshot is not None and args.save_last_snapshot:
            snapshot.save("last_snapshot")

    except KeyboardInterrupt:
        print("interrupted")
Exemple #12
0
def _TestPartiallyLoadNumpy(test_case, dtype):
    refresh_session()

    model = get_checkpoint_ready_model(get_add_and_reduce_mean_model, dtype)
    var_x = flow.get_all_variables()["x"]
    var_y_value_before_loading = flow.get_all_variables()["y"].numpy()
    new_val_np = np.random.random(var_x.shape).astype(np.float32)
    flow.load_variables({"x": new_val_np})
    var_y_value_after_loading = flow.get_all_variables()["y"].numpy()
    flow_res = model()
    np_res = (var_y_value_after_loading + new_val_np).mean()
    test_case.assertTrue(np.allclose(flow_res, np_res))
    test_case.assertTrue(
        np.array_equal(var_y_value_before_loading, var_y_value_after_loading))
def set_moving_max_min_value():
    max_key, min_key = None, None
    keys = flow.get_all_variables().keys()
    for key in keys:
        if max_key != "" and min_key != "":
            break
        if key[-3:] == "max":
            max_key = key
        if key[-3:] == "min":
            min_key = key
    if max_key is not None and min_key is not None:
        flow.load_variables({
            max_key: np.array([0.5]).astype(np.float32),
            min_key: np.array([-0.2]).astype(np.float32),
        })
Exemple #14
0
def _numpy_initializer_for_determining(tensor):
    assert not tensor.is_determined
    undetermined_tensor = tensor._undetermined_tensor
    numpy_data = undetermined_tensor.numpy_data
    assert numpy_data is not None

    if undetermined_tensor.is_consistent:
        variable_name = id_util.UniqueStr("tensor_")

        @global_function_or_identity()
        def set_numpy_data():
            with tensor._placement_scope():
                flow.get_variable(
                    name=variable_name,
                    shape=tuple(undetermined_tensor.shape),
                    dtype=undetermined_tensor.dtype,
                    initializer=undetermined_tensor.data_initializer,
                )

        set_numpy_data()
        flow.load_variables({variable_name: numpy_data})
        blob = flow.get_all_variables()[variable_name]

        determined_tensor = oneflow._oneflow_internal.ConsistentTensor(
            undetermined_tensor.shape,
            undetermined_tensor.dtype,
            undetermined_tensor.sbp,
            undetermined_tensor.placement,
            undetermined_tensor.is_lazy,
            undetermined_tensor.requires_grad,
            True,
            undetermined_tensor.retain_grad,
        )
        determined_tensor._set_blob_object(blob.blob_object)
    else:
        determined_tensor = oneflow._oneflow_internal.LocalTensor(
            undetermined_tensor.shape,
            undetermined_tensor.dtype,
            undetermined_tensor.device,
            undetermined_tensor.is_lazy,
            undetermined_tensor.requires_grad,
            True,
            undetermined_tensor.retain_grad,
        )
        _copy_from_numpy_to_eager_local_tensor(determined_tensor, numpy_data)

    return determined_tensor
Exemple #15
0
def _TestResumeTraining(test_case):
    with tempfile.TemporaryDirectory() as save_dir:
        refresh_session()
        model = get_checkpoint_ready_model(get_simple_momentum_training_model,
                                           flow.float32)
        model()
        flow.checkpoint.save(save_dir)
        model()
        w1 = flow.get_all_variables()["w"].numpy()

        refresh_session()
        model = get_checkpoint_ready_model(get_simple_momentum_training_model,
                                           flow.float32)
        flow.load_variables(flow.checkpoint.get(save_dir))
        model()
        w2 = flow.get_all_variables()["w"].numpy()

        test_case.assertTrue(np.array_equal(w1, w2))
Exemple #16
0
def _TestRoundTrip(test_case, model_getter, dtype):
    """
    Save weights by new model io, load weights by new model io,
    and check the equality.
    """
    with tempfile.TemporaryDirectory() as save_dir:
        refresh_session()

        large1 = get_checkpoint_ready_model(model_getter, dtype)

        flow.checkpoint.save(save_dir)
        res1 = large1()

        refresh_session()

        large2 = get_checkpoint_ready_model(model_getter, dtype)

        vars_in_file = flow.checkpoint.get(save_dir)
        flow.load_variables(vars_in_file)
        res2 = large2()

        test_case.assertTrue(np.array_equal(res1, res2))
Exemple #17
0
def _initialized_job(
    shape=None,
    dtype=None,
    device=None,
    requires_grad=None,
    retain_grad=None,
    is_lazy=False,
    numpy_data=None,
):
    assert numpy_data is not None
    variable_name = id_util.UniqueStr("tensor_")

    @global_function_or_identity()
    def set_data():
        flow.get_variable(
            name=variable_name,
            shape=tuple(shape),
            dtype=dtype,
            initializer=flow.zeros_initializer(dtype=dtype),
        )

    if not is_lazy:
        set_data()
    flow.load_variables({variable_name: numpy_data})
    blob = flow.get_all_variables()[variable_name]
    determined_tensor = oneflow_api.LocalTensor(
        shape,
        dtype,
        device,
        is_lazy,
        requires_grad,
        True,
        retain_grad,
    )
    determined_tensor._set_blob_object(blob.blob_object)
    return determined_tensor
Exemple #18
0
 def load_checkpoint(self):
     flow.load_variables(flow.checkpoint.get(self.args.model_load_dir))
def main(args):
    global CONSOLE_ARGUMENTS
    CONSOLE_ARGUMENTS = args

    flow.env.init()

    @flow.global_function("train", get_config())
    def TrainNet(
        image: tp.Numpy.Placeholder((1, 3, CONSOLE_ARGUMENTS.train_image_size, CONSOLE_ARGUMENTS.train_image_size), dtype = flow.float32),
        mean: tp.Numpy.Placeholder((1, 3, 1, 1), dtype = flow.float32),
        std: tp.Numpy.Placeholder((1, 3, 1, 1), dtype = flow.float32),
        style_image_relu1_2: tp.Numpy.Placeholder((1, 64, CONSOLE_ARGUMENTS.train_image_size, CONSOLE_ARGUMENTS.train_image_size), dtype = flow.float32),
        style_image_relu2_2: tp.Numpy.Placeholder((1, 128, CONSOLE_ARGUMENTS.train_image_size // 2, CONSOLE_ARGUMENTS.train_image_size // 2), dtype = flow.float32),
        style_image_relu3_3: tp.Numpy.Placeholder((1, 256, CONSOLE_ARGUMENTS.train_image_size // 4, CONSOLE_ARGUMENTS.train_image_size // 4), dtype = flow.float32),
        style_image_relu4_3: tp.Numpy.Placeholder((1, 512, CONSOLE_ARGUMENTS.train_image_size // 8, CONSOLE_ARGUMENTS.train_image_size // 8), dtype = flow.float32), 
    ):
        style_out = style_model.styleNet(image, trainable = True)

        image_norm = (image - mean) / std
        org_content_relu2_2 = vgg16_model.vgg16bn_content_layer(image_norm, trainable = False, training = False)
        
        style_out_norm = (style_out - mean) / std
        style_out_relu1_2, style_out_relu2_2, style_out_relu3_3, style_out_relu4_3 = vgg16_model.vgg16bn_style_layer(style_out_norm, trainable = False, training = False)

        # compute mean square error loss
        content_loss = style_model.mse_loss(org_content_relu2_2 - style_out_relu2_2)
        style_loss = style_model.mse_loss(style_model.gram_matrix(style_out_relu1_2) - style_model.gram_matrix(style_image_relu1_2)) \
                    + style_model.mse_loss(style_model.gram_matrix(style_out_relu2_2) - style_model.gram_matrix(style_image_relu2_2)) \
                    + style_model.mse_loss(style_model.gram_matrix(style_out_relu3_3) - style_model.gram_matrix(style_image_relu3_3)) \
                    + style_model.mse_loss(style_model.gram_matrix(style_out_relu4_3) - style_model.gram_matrix(style_image_relu4_3))

        loss = content_loss * CONSOLE_ARGUMENTS.content_weight + style_loss * CONSOLE_ARGUMENTS.style_weight

        flow.optimizer.Adam(flow.optimizer.PiecewiseConstantScheduler([], [CONSOLE_ARGUMENTS.learning_rate])).minimize(loss)

        return style_out, loss

    @flow.global_function("predict", get_config())
    def getVgg16MiddleLayers(
        style_image: tp.Numpy.Placeholder((1, 3, CONSOLE_ARGUMENTS.train_image_size, CONSOLE_ARGUMENTS.train_image_size), dtype = flow.float32),
        mean: tp.Numpy.Placeholder((1, 3, 1, 1), dtype = flow.float32),
        std: tp.Numpy.Placeholder((1, 3, 1, 1), dtype = flow.float32)):
        style_image = (style_image - mean) / std
        style_out_relu1_2, style_out_relu2_2, style_out_relu3_3, style_out_relu4_3 = vgg16_model.vgg16bn_style_layer(style_image, trainable = False, training = False)
        return style_out_relu1_2, style_out_relu2_2, style_out_relu3_3, style_out_relu4_3

    vars_in_file = flow.checkpoint.get(CONSOLE_ARGUMENTS.model_load_dir)
    flow.load_variables(vars_in_file)

    mean_nd = np.array(float_list(CONSOLE_ARGUMENTS.rgb_mean)).reshape((1, 3, 1, 1)).astype(np.float32)
    std_nd = np.array(float_list(CONSOLE_ARGUMENTS.rgb_std)).reshape((1, 3, 1, 1)).astype(np.float32)

    # prepare style image vgg16 middle layer outputs
    style_image = load_image(CONSOLE_ARGUMENTS.style_image_path)
    style_image_recover = recover_image(style_image)

    style_image_relu1_2, style_image_relu2_2, style_image_relu3_3, style_image_relu4_3 = \
        getVgg16MiddleLayers(style_image, mean_nd, std_nd).get()

    style_image_relu1_2 = style_image_relu1_2.numpy()
    style_image_relu2_2 = style_image_relu2_2.numpy()
    style_image_relu3_3 = style_image_relu3_3.numpy()
    style_image_relu4_3 = style_image_relu4_3.numpy()

    train_images = os.listdir(CONSOLE_ARGUMENTS.dataset_path)
    random.shuffle(train_images)
    images_num = len(train_images)
    print("dataset size: %d" % images_num)

    for e in range(CONSOLE_ARGUMENTS.train_epoch):
        for i in range(images_num):
            image = load_image("%s/%s" % (CONSOLE_ARGUMENTS.dataset_path, train_images[i]))
            style_out, loss = TrainNet(image, mean_nd, std_nd, style_image_relu1_2, style_image_relu2_2, style_image_relu3_3, style_image_relu4_3).get()

            if i % 100 == 0:
                image_recover = recover_image(image)
                style_out_recover = recover_image(style_out.numpy())
                result = np.concatenate((style_image_recover, image_recover), axis=1)
                result = np.concatenate((result, style_out_recover), axis=1)
                cv2.imwrite(CONSOLE_ARGUMENTS.save_tmp_image_path, result)

                cur_loss = loss.numpy().mean()
                
                flow.checkpoint.save("%s/lr_%f_cw_%f_sw_%f_epoch_%d_iter_%d_loss_%f" % \
                    (CONSOLE_ARGUMENTS.model_save_dir, CONSOLE_ARGUMENTS.learning_rate, CONSOLE_ARGUMENTS.content_weight, CONSOLE_ARGUMENTS.style_weight, e, i, cur_loss))

                print("epoch: %d, iter: %d, loss : %f" % (e, i, cur_loss))
    def test_alexnet(test_case, batch_size=DEFAULT_BATCH_SIZE, num_batchs=6):
        init_env()
        alexnet_infer, input_lbns, output_lbns = make_alexnet_infer_func(
            batch_size, (DEFAULT_IMAGE_SIZE, DEFAULT_IMAGE_SIZE, 3))
        flow.load_variables(flow.checkpoint.get(DEFAULT_CHECKPOINT_DIR))

        # save model
        saved_model_path = "alexnet_models"
        model_name = "alexnet"
        model_version = 1

        model_version_path = os.path.join(saved_model_path, str(model_version))
        if os.path.exists(saved_model_path) and os.path.isdir(
                saved_model_path):
            print("WARNING: The model version path '{}' already exist"
                  ", old version directory will be removed".format(
                      model_version_path))
            shutil.rmtree(saved_model_path)

        saved_model_builder = flow.saved_model.ModelBuilder(saved_model_path)
        signature_builder = (saved_model_builder.ModelName(model_name).Version(
            model_version).AddFunction(alexnet_infer).AddSignature("regress"))
        for input_name, lbn in input_lbns.items():
            signature_builder.Input(input_name, lbn)
        for output_name, lbn in output_lbns.items():
            signature_builder.Output(output_name, lbn)
        saved_model_builder.Save()

        # test data
        new_batch_size = int(batch_size / 2)
        dataset = ImageNetRecordDataset(
            batch_size=new_batch_size,
            image_resize_size=DEFAULT_IMAGE_SIZE,
            data_format="NHWC",
        )
        image_list, label_list = dataset.load_batchs(num_batchs)
        assert image_list[0].shape[0] == new_batch_size
        image_size = tuple(image_list[0].shape[1:])

        flow.clear_default_session()
        alexnet_infer, _, _ = make_alexnet_infer_func(new_batch_size,
                                                      image_size)
        flow.load_variables(flow.checkpoint.get(DEFAULT_CHECKPOINT_DIR))
        print("alexnet inference result:")
        origin_outputs = []
        for i, (image, label) in enumerate(zip(image_list, label_list)):
            output = alexnet_infer(image, label)
            # origin_outputs.append(output.item())
            # print("iter#{:<6} output:".format(i), output.item())
            origin_outputs.append(output)
            print("iter#{:<6} output:".format(i), output)

        origin_outputs = np.array(origin_outputs, dtype=np.float32)

        # load model and run
        flow.clear_default_session()
        model_meta_file_path = os.path.join(saved_model_path,
                                            str(model_version),
                                            "saved_model.prototxt")
        saved_model_proto = load_saved_model(model_meta_file_path)
        sess = flow.serving.InferenceSession()
        checkpoint_path = os.path.join(saved_model_path, str(model_version),
                                       saved_model_proto.checkpoint_dir)
        sess.set_checkpoint_path(checkpoint_path)

        graph_name = saved_model_proto.default_graph_name
        graph_def = saved_model_proto.graphs[graph_name]
        signature_def = graph_def.signatures[graph_def.default_signature_name]

        with sess.open(graph_name, signature_def, new_batch_size):
            sess.compile(graph_def.op_list)

        # sess.print_job_set()
        sess.launch()

        job_name = sess.list_jobs()[0]
        input_names = sess.list_inputs()
        print("input names:", input_names)
        for input_name in input_names:
            print('input "{}" info: {}'.format(
                input_name, sess.input_info(input_name, job_name)))
        output_names = sess.list_outputs()
        print("output names:", output_names)
        for output_name in output_names:
            print('output "{}" info: {}'.format(
                output_name, sess.output_info(output_name, job_name)))

        print("load saved alexnet and inference result:")
        print_input_info = False
        cmp_outputs = []
        for i, (image, label) in enumerate(zip(image_list, label_list)):
            if print_input_info:
                print("image shape: {}, dtype: {}".format(
                    image.shape, image.dtype))
                print("label shape: {}, dtype: {}, data: {}".format(
                    label.shape, label.dtype, label))
                if i > 1:
                    print((image - image_list[i - 1]).mean())

            outputs = sess.run(alexnet_infer.__name__,
                               image=image,
                               label=label)
            # cmp_outputs.append(outputs[0].item())
            # print("iter#{:<6} output:".format(i), outputs[0].item())
            cmp_outputs.append(outputs[0])
            print("iter#{:<6} output:".format(i), outputs[0])

        cmp_outputs = np.array(cmp_outputs, dtype=np.float32)
        test_case.assertTrue(np.allclose(origin_outputs, cmp_outputs))
        sess.close()
Exemple #21
0
 def load_checkpoint(self, model_path):
     flow.load_variables(flow.checkpoint.get(model_path))
Exemple #22
0
model = Yolov3_tiny(cfg, trainable=False)


@flow.global_function(type="predict", function_config=func_config)
def test_job(images: test_images, anchors_s: anchors_s, anchors_l: anchors_l) \
        -> tp.Numpy:
    pred = model.predict(images, anchors_s, anchors_l)

    return pred


anchors = np.array(utils.get_anchors(cfg.YOLO.ANCHORS))
anchors[0, ...] /= cfg.YOLO.STRIDES[0]
anchors[1, ...] /= cfg.YOLO.STRIDES[1]

flow.load_variables(flow.checkpoint.get(cfg.TEST.WEIGHT_FILE))


def predict(original_image):
    '''

    :param original_image: [H, W, 3]
    :return: (xmin, ymin, xmax, ymax, score, class)
    '''

    image = utils.image_preporcess(np.copy(original_image),
                                   [cfg.TEST.INPUT_SIZE, cfg.TEST.INPUT_SIZE])
    image = image[np.newaxis, ...]
    image_ = np.transpose(image, [0, 3, 1, 2])
    image_ = np.copy(image_, order='C')
    pred = test_job(image_, anchors[0], anchors[1])[0, ...]
Exemple #23
0
    def test_insightface(self):
        init_env()
        # test data
        print("Get data from FaceEmoreRecordDataset")
        dataset = FaceEmoreRecordDataset(
            data_dir=self.DATA_DIR,
            num_data_parts=self.NUM_DATA_PARTS,
            batch_size=self.BATCH_SIZE,
            image_width=self.IMAGE_SIZE,
            image_height=self.IMAGE_SIZE,
            data_format="NCHW",
        )
        image_list, issame_list = dataset.load_batchs(self.NUM_ITER)

        # define inference function
        print("Define inference function for insightface")
        infer_fn = make_insightface_resnet100_func(self.BATCH_SIZE,
                                                   self.IMAGE_SIZE,
                                                   self.IMAGE_SIZE)
        print("Load variables for insightface model")
        flow.load_variables(flow.checkpoint.get(self.MODEL_DIR))

        # call inference function to generate compare result
        print("Call inference function directly")
        features = []
        for i, image in enumerate(image_list):
            feature = infer_fn(image)
            features.append(feature)

        # save model
        print("Save model for insightface")
        saved_model_path = "insightface_models"
        model_version = 1

        model_version_path = os.path.join(saved_model_path, str(model_version))
        if os.path.exists(model_version_path) and os.path.isdir(
                model_version_path):
            print("WARNING: The model version path '{}' already exist"
                  ", old version directory will be removed".format(
                      model_version_path))
            shutil.rmtree(model_version_path)

        saved_model_builder = (flow.saved_model.ModelBuilder(
            saved_model_path).ModelName("insightface").Version(model_version))
        saved_model_builder.AddFunction(infer_fn).Finish()
        saved_model_builder.Save()
        flow.clear_default_session()

        # load model and run
        print("InferenceSession load model")
        flow.clear_default_session()
        sess = flow.serving.InferenceSession()
        sess.load_saved_model(saved_model_path)
        sess.launch()

        job_name = sess.list_jobs()[0]
        input_names = sess.list_inputs()
        print("input names:", input_names)
        for input_name in input_names:
            print('input "{}" info: {}'.format(
                input_name, sess.input_info(input_name, job_name)))

        print("Run model and compare ")
        for i, (image, feature) in enumerate(zip(image_list, features)):
            input_dict = {input_names[0]: image}
            infer_result = sess.run(job_name, **input_dict)
            self.assertTrue(np.allclose(infer_result, feature))

        sess.close()
Exemple #24
0
def main(args):
    cfg = get_config(args.config)

    cfg.device_num_per_node = args.device_num_per_node
    cfg.total_batch_size = cfg.batch_size * cfg.device_num_per_node * cfg.num_nodes
    cfg.steps_per_epoch = math.ceil(cfg.num_image / cfg.total_batch_size)
    cfg.total_step = cfg.num_epoch * cfg.steps_per_epoch
    cfg.lr_steps = (np.array(cfg.decay_epoch) * cfg.steps_per_epoch).tolist()
    lr_scales = [0.1, 0.01, 0.001, 0.0001]
    cfg.lr_scales = lr_scales[:len(cfg.lr_steps)]
    cfg.output = os.path.join("work_dir", cfg.output, cfg.loss)

    world_size = cfg.num_nodes
    os.makedirs(cfg.output, exist_ok=True)

    log_root = logging.getLogger()
    init_logging(log_root, cfg.output)
    flow.config.gpu_device_num(cfg.device_num_per_node)
    logging.info("gpu num: %d" % cfg.device_num_per_node)
    if cfg.num_nodes > 1:
        assert cfg.num_nodes <= len(
            cfg.node_ips
        ), "The number of nodes should not be greater than length of node_ips list."
        flow.env.ctrl_port(12138)
        nodes = []
        for ip in cfg.node_ips:
            addr_dict = {}
            addr_dict["addr"] = ip
            nodes.append(addr_dict)
        flow.env.machine(nodes)
    flow.env.log_dir(cfg.output)

    for key, value in cfg.items():
        num_space = 35 - len(key)
        logging.info(": " + key + " " * num_space + str(value))

    train_func = make_train_func(cfg)
    val_infer = Validator(cfg)

    callback_verification = CallBackVerification(3000, cfg.val_targets,
                                                 cfg.eval_ofrecord_path)
    callback_logging = CallBackLogging(50, cfg.total_step,
                                       cfg.total_batch_size, world_size, None)

    if cfg.resume and os.path.exists(cfg.model_load_dir):
        logging.info("Loading model from {}".format(cfg.model_load_dir))
        variables = flow.checkpoint.get(cfg.model_load_dir)
        flow.load_variables(variables)

    start_epoch = 0
    global_step = 0
    lr = cfg.lr
    for epoch in range(start_epoch, cfg.num_epoch):
        for steps in range(cfg.steps_per_epoch):
            train_func().async_get(
                callback_logging.metric_cb(global_step, epoch, lr))
            callback_verification(global_step, val_infer.get_symbol_val_fn)
            global_step += 1
        if epoch in cfg.decay_epoch:
            lr *= 0.1
            logging.info("lr_steps: %d" % global_step)
            logging.info("lr change to %f" % lr)

        # snapshot
        path = os.path.join(cfg.output, "snapshot_" + str(epoch))
        flow.checkpoint.save(path)
        logging.info("oneflow Model Saved in '{}'".format(path))
Exemple #25
0
def make_repvgg_infer_func():
    input_lbns = {}
    output_lbns = {}

    @flow.global_function("predict", function_config=func_config)
    def repvgg_inference(image: tp.Numpy.Placeholder(shape=(1, 1, 28,
                                                            28))) -> tp.Numpy:
        input_lbns["image"] = image.logical_blob_name
        output = Lenet(image)
        output_lbns["output"] = output.logical_blob_name
        return output

    return repvgg_inference, input_lbns, output_lbns


if __name__ == "__main__":
    repvgg_infer, input_lbns, output_lbns = make_repvgg_infer_func()
    flow.load_variables(flow.checkpoint.get("./lenet_models"))

    x = np.ones(shape=(1, 1, 28, 28))
    original_out = repvgg_infer(x)
    model_builder = flow.saved_model.ModelBuilder("./output")
    signature_builder = (model_builder.ModelName("Lenet").Version(
        1).AddFunction(repvgg_infer).AddSignature("regress"))
    for input_name, lbn in input_lbns.items():
        signature_builder.Input(input_name, lbn)
    for output_name, lbn in output_lbns.items():
        signature_builder.Output(output_name, lbn)
    model_builder.Save(False)
    def train(self, epochs):
        # download data npy
        train_hr_data_path = os.path.join(
            self.data_dir, "{}_{}hr_imgs.npy".format("train", self.hr_size))
        train_lr_data_path = os.path.join(
            self.data_dir, "{}_{}lr_imgs.npy".format("train", self.lr_size))
        val_hr_data_path = os.path.join(
            self.data_dir, "{}_{}hr_imgs.npy".format("val", self.hr_size))
        val_lr_data_path = os.path.join(
            self.data_dir, "{}_{}lr_imgs.npy".format("val", self.lr_size))

        train_hr_data = np.load(train_hr_data_path)
        train_lr_data = np.load(train_lr_data_path)
        val_hr_data = np.load(val_hr_data_path)
        val_lr_data = np.load(val_lr_data_path)

        assert train_hr_data.shape == (
            16700, 3, self.hr_size,
            self.hr_size), "The shape of train_hr_data is {}".format(
                train_hr_data.shape)
        assert val_lr_data.shape == (
            425, 3, self.lr_size,
            self.lr_size), "The shape of val_lr_data is {}".format(
                val_lr_data.shape)

        # save loss
        G_l2_loss = []
        G_gan_loss = []
        G_perceptual_loss = []
        G_tv_loss = []
        G_total_loss = []
        D_total_loss = []
        Val_l2_error = []
        Val_ssim = []
        Val_psnr = []

        # config
        func_config = flow.FunctionConfig()
        func_config.default_data_type(flow.float)
        flow.config.gpu_device_num(self.gpu_num_per_node)
        # train config
        lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [self.lr])

        @flow.global_function(type="predict", function_config=func_config)
        def eval_generator(input: tp.Numpy.Placeholder(
            (self.batch_size, 3, self.lr_size, self.lr_size))) -> tp.Numpy:
            g_out = self.Generator(input, trainable=False)
            return g_out

        @flow.global_function(type="train", function_config=func_config)
        def train_generator(input: tp.Numpy.Placeholder(
            (self.batch_size, 3, self.lr_size,
             self.lr_size)), target: tp.Numpy.Placeholder(
                 (self.batch_size, 3, self.hr_size,
                  self.hr_size))) -> Tuple[tp.Numpy, tp.Numpy, tp.Numpy,
                                           tp.Numpy, tp.Numpy, tp.Numpy]:
            g_out = self.Generator(input, trainable=True)
            g_logits = self.Discriminator(g_out, trainable=False)
            # Adversarial Loss
            g_gan_loss = 0.001 * flow.math.reduce_mean(1 - g_logits)
            # Image Loss
            g_l2_loss = self.mseloss(g_out, target)
            # TV Loss
            g_tv_loss = self.total_variance_loss(g_out, weight=2e-8)

            # Perceptual loss
            def perceptual_loss(fake, real, weight=1.0):
                fake_feature = self.vgg16bn(fake, trainable=False)
                real_feature = self.vgg16bn(real, trainable=False, reuse=True)

                return self.mseloss(fake_feature, real_feature, weight=weight)

            g_perceptual_loss = perceptual_loss(g_out, target, weight=0.006)

            g_total_loss = g_l2_loss + g_gan_loss + g_perceptual_loss + g_tv_loss

            flow.optimizer.Adam(lr_scheduler, beta1=0.5,
                                beta2=0.999).minimize(g_total_loss)

            return g_l2_loss, g_gan_loss, g_perceptual_loss, g_tv_loss, g_total_loss, g_out

        @flow.global_function(type="train", function_config=func_config)
        def train_discriminator(input: tp.Numpy.Placeholder(
            (self.batch_size, 3, self.lr_size, self.lr_size)),
                                target: tp.Numpy.Placeholder(
                                    (self.batch_size, 3, self.hr_size,
                                     self.hr_size))) -> tp.Numpy:
            g_out = self.Generator(input, trainable=False)
            g_logits = self.Discriminator(g_out, trainable=True)
            d_logits = self.Discriminator(target, trainable=True, reuse=True)

            d_loss = 1 - flow.math.reduce_mean(d_logits - g_logits)

            flow.optimizer.Adam(lr_scheduler, beta1=0.5,
                                beta2=0.999).minimize(d_loss)

            return d_loss

        # load trained weight of vgg16bn and initialize automatically GAN model
        flow.load_variables(flow.checkpoint.get(self.vgg_path))

        # trained weights of vgg need to be changed, because vgg is used twice like Discriminator. Please use weights in of_vgg16bn_reuse path to load vgg for perceptual loss.
        # flow.checkpoint.save("vgg_checkpoint")

        batch_num = len(train_hr_data) // self.batch_size
        pre_best, best_psnr = -1, 0
        print("****************** start training *****************")
        for epoch_idx in range(epochs):
            start = time.time()
            print("****************** train  *****************")
            for batch_idx in range(batch_num):
                inputs = train_lr_data[batch_idx *
                                       self.batch_size:(batch_idx + 1) *
                                       self.batch_size].astype(np.float32,
                                                               order="C")
                target = train_hr_data[batch_idx *
                                       self.batch_size:(batch_idx + 1) *
                                       self.batch_size].astype(np.float32,
                                                               order="C")
                d_loss = train_discriminator(inputs, target)
                g_l2_loss, g_gan_loss, g_perceptual_loss, g_tv_loss, g_total_loss, g_out = train_generator(
                    inputs, target)

                d_loss = d_loss.mean()
                g_l2_loss = g_l2_loss.mean()
                g_gan_loss = g_gan_loss.mean()
                g_perceptual_loss = g_perceptual_loss.mean()
                g_tv_loss = g_tv_loss.mean()
                g_total_loss = g_total_loss.mean()

                if (batch_idx + 1) % self.print_interval == 0:
                    print(
                        "{}th epoch, {}th batch, g_l2_loss:{}, g_gan_loss:{}, g_perceptual_loss:{}, g_tv_loss:{}, gloss:{}, dloss:{} "
                        .format(epoch_idx + 1, batch_idx + 1, g_l2_loss,
                                g_gan_loss, g_perceptual_loss, g_tv_loss,
                                g_total_loss, d_loss))

                    G_l2_loss.append(g_l2_loss)
                    G_gan_loss.append(g_gan_loss)
                    G_perceptual_loss.append(g_perceptual_loss)
                    G_tv_loss.append(g_tv_loss)
                    G_total_loss.append(g_total_loss)
                    D_total_loss.append(d_loss)

            print("Time for epoch {} is {} sec.".format(
                epoch_idx + 1,
                time.time() - start))

            if (epoch_idx + 1) % 1 == 0:
                # save train images
                # self.save_images(g_out, inputs, target, epoch_idx, name="train")

                # save val images, trainable = False
                # and calculate MSE, SSIMs, SSIM, PSNR
                val_l2_error, val_ssim, val_psnr = 0, 0, 0
                val_batch_num = len(val_hr_data) // self.batch_size
                for val_batch_idx in range(val_batch_num):
                    val_inputs = val_lr_data[val_batch_idx *
                                             self.batch_size:(val_batch_idx +
                                                              1) *
                                             self.batch_size].astype(
                                                 np.float32, order="C")
                    val_target = val_hr_data[val_batch_idx *
                                             self.batch_size:(val_batch_idx +
                                                              1) *
                                             self.batch_size].astype(
                                                 np.float32, order="C")
                    val_g_out = eval_generator(val_inputs)

                    val_l2_error += (np.square(val_g_out - val_target).mean())
                    val_ssim += self.ssim(val_target.transpose(0, 2, 3, 1),
                                          val_g_out.transpose(0, 2, 3, 1))
                    # val_ssims += (pytorch_ssim.ssim(val_g_out, val_target, oneflow=True).item())
                    val_psnr += self.psnr(val_target.transpose(0, 2, 3, 1),
                                          val_g_out.transpose(0, 2, 3, 1))

                # save val images
                self.save_images(val_g_out,
                                 val_inputs,
                                 val_target,
                                 epoch_idx,
                                 name="val")

                val_l2_error = val_l2_error / val_batch_num
                val_ssim = val_ssim / val_batch_num
                val_psnr = val_psnr / val_batch_num
                # val_psnr = 10 * np.log10(1 / val_l2_error)

                Val_l2_error.append(val_l2_error)
                Val_ssim.append(val_ssim)
                Val_psnr.append(val_psnr)
                print("****************** evalute  *****************")
                print(
                    "{}th epoch, {}th batch, val_l2_error:{}, val_ssim:{}, val_psnr:{}."
                    .format(epoch_idx + 1, batch_idx + 1, val_l2_error,
                            val_ssim, val_psnr))
                if epoch_idx + 1 > 50 and val_psnr > best_psnr:
                    best_psnr = val_psnr
                    if pre_best != -1:
                        # delete the previous best checkpoint
                        print(
                            "delete the previous best {}th epoch model".format(
                                pre_best))
                        shutil.rmtree(
                            os.path.join(self.checkpoint_path,
                                         "{}th_epoch".format(pre_best)))

                    # save parameters
                    flow.checkpoint.save(
                        os.path.join(self.checkpoint_path,
                                     "{}th_epoch".format(epoch_idx + 1)))
                    pre_best = epoch_idx + 1
                    print("save the best {}th epoch model at {}.".format(
                        epoch_idx + 1,
                        str(datetime.now().strftime("%Y-%m-%d-%H:%M:%S"))))

        # save train loss and val error to plot
        np.save(
            os.path.join(self.loss_path, 'G_l2_loss_{}.npy'.format(epochs)),
            G_l2_loss)
        np.save(
            os.path.join(self.loss_path, 'G_gan_loss_{}.npy'.format(epochs)),
            G_gan_loss)
        np.save(
            os.path.join(self.loss_path,
                         'G_perceptual_loss_{}.npy'.format(epochs)),
            G_perceptual_loss)
        np.save(
            os.path.join(self.loss_path, 'G_tv_loss_{}.npy'.format(epochs)),
            G_tv_loss)
        np.save(
            os.path.join(self.loss_path, 'G_total_loss_{}.npy'.format(epochs)),
            G_total_loss)
        np.save(
            os.path.join(self.loss_path, 'D_total_loss_{}.npy'.format(epochs)),
            D_total_loss)

        np.save(
            os.path.join(self.loss_path, 'Val_l2_error_{}.npy'.format(epochs)),
            Val_l2_error)
        np.save(os.path.join(self.loss_path, 'Val_ssim_{}.npy'.format(epochs)),
                Val_ssim)
        np.save(os.path.join(self.loss_path, 'Val_psnr_{}.npy'.format(epochs)),
                Val_psnr)
        print("*************** Train {} done ***************** ".format(
            self.path))
Exemple #27
0
def main(args):
    flow.config.gpu_device_num(args.device_num_per_node)
    print("gpu num: ", args.device_num_per_node)
    if not os.path.exists(args.models_root):
        os.makedirs(args.models_root)

    def IsFileOrNonEmptyDir(path):
        if os.path.isfile(path):
            return True
        if os.path.isdir(path) and len(os.listdir(path)) != 0:
            return True
        return False

    assert not IsFileOrNonEmptyDir(
        args.models_root), "Non-empty directory {} already exists!".format(
            args.models_root)
    prefix = os.path.join(args.models_root,
                          "%s-%s-%s" % (args.network, args.loss, args.dataset),
                          "model")
    prefix_dir = os.path.dirname(prefix)
    print("prefix: ", prefix)
    if not os.path.exists(prefix_dir):
        os.makedirs(prefix_dir)

    default.num_nodes = args.num_nodes
    default.node_ips = args.node_ips
    if args.num_nodes > 1:
        assert args.num_nodes <= len(
            args.node_ips
        ), "The number of nodes should not be greater than length of node_ips list."
        flow.env.ctrl_port(12138)
        nodes = []
        for ip in args.node_ips:
            addr_dict = {}
            addr_dict["addr"] = ip
            nodes.append(addr_dict)

        flow.env.machine(nodes)
    if config.data_format.upper() != "NCHW" and config.data_format.upper(
    ) != "NHWC":
        raise ValueError("Invalid data format")
    flow.env.log_dir(args.log_dir)
    train_func = make_train_func(args)
    if args.do_validation_while_train:
        validator = Validator(args)

    if os.path.exists(args.model_load_dir):
        assert os.path.abspath(
            os.path.dirname(os.path.split(
                args.model_load_dir)[0])) != os.path.abspath(
                    os.path.join(
                        args.models_root,
                        args.network + "-" + args.loss + "-" + args.dataset)
                ), "You should specify a new path to save new models."
        print("Loading model from {}".format(args.model_load_dir))
        variables = flow.checkpoint.get(args.model_load_dir)
        flow.load_variables(variables)

    print("num_classes ", config.num_classes)
    print("Called with argument: ", args, config)
    train_metric = TrainMetric(desc="train",
                               calculate_batches=args.loss_print_frequency,
                               batch_size=args.train_batch_size)
    lr = args.lr

    for step in range(args.total_iter_num):
        # train
        train_func().async_get(train_metric.metric_cb(step))

        # validation
        if default.do_validation_while_train and (
                step + 1) % args.validation_interval == 0:
            for ds in config.val_targets:
                issame_list, embeddings_list = validator.do_validation(
                    dataset=ds)
                validation_util.cal_validation_metrics(
                    embeddings_list,
                    issame_list,
                    nrof_folds=args.nrof_folds,
                )
        if step in args.lr_steps:
            lr *= 0.1
            print("lr_steps: ", step)
            print("lr change to ", lr)

        # snapshot
        if (step + 1) % args.iter_num_in_snapshot == 0:
            path = os.path.join(
                prefix_dir,
                "snapshot_" + str(step // args.iter_num_in_snapshot))
            flow.checkpoint.save(path)

    if args.save_last_snapshot is True:
        flow.checkpoint.save(os.path.join(prefix_dir, "snapshot_last"))
Exemple #28
0
from model import get_job_function

flow.config.enable_legacy_model_io(False)


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--batch-size", default=100, type=int)
    parser.add_argument("--save-name", default="lenet", type=str)
    parser.add_argument("--disable-qat", action="store_true")
    return parser.parse_args()


if __name__ == "__main__":
    args = parse_args()
    # precit
    data = np.ones((args.batch_size, 1, 28, 28))
    predict_job = get_job_function("predict", not args.disable_qat,
                                   args.batch_size)
    flow.load_variables(flow.checkpoint.get("./" + args.save_name + "_models"))
    print(predict_job(data))
    # export
    flow.onnx.export(
        predict_job,
        args.save_name + "_models",
        args.save_name + ".onnx",
        opset=10,
        external_data=False,
    )
    print("onnx saved.")
Exemple #29
0
            name="dense1",
        )
        dense2 = flow.layers.dense(hidden,
                                   10,
                                   kernel_initializer=initializer,
                                   name="dense2")

        dense3 = flow.layers.dense(dense2,
                                   10,
                                   kernel_initializer=initializer,
                                   name="dense3")
        loss = flow.nn.sparse_softmax_cross_entropy_with_logits(labels, dense3)

    lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [0.1])
    flow.optimizer.SGD(lr_scheduler, momentum=0).minimize(loss)

    return loss


if __name__ == "__main__":
    flow.load_variables(flow.checkpoint.get("./mlp_models_1"))

    (train_images, train_labels), (test_images,
                                   test_labels) = flow.data.load_mnist(
                                       BATCH_SIZE, BATCH_SIZE)
    for i, (images, labels) in enumerate(zip(train_images, train_labels)):
        loss = train_job(images, labels)
        if i % 20 == 0:
            print(loss.mean())
    flow.checkpoint.save("./mlp_ext_models_1")
Exemple #30
0
    def test_resnet(test_case, batch_size=DEFAULT_BATCH_SIZE, num_batchs=6):
        init_env()
        # input image format NCHW
        image_size = (3, DEFAULT_IMAGE_SIZE, DEFAULT_IMAGE_SIZE)
        resnet_infer, input_lbns, output_lbns = make_resnet_infer_func(
            batch_size, image_size
        )

        # resnet inference model parameters
        flow.load_variables(flow.checkpoint.get(DEFAULT_CHECKPOINT_DIR))

        # test data
        dataset = ImageNetRecordDataset(
            batch_size=batch_size,
            image_resize_size=DEFAULT_IMAGE_SIZE,
            data_format="NCHW",
        )
        image_list, label_list = dataset.load_batchs(num_batchs)

        print("resnet inference result:")
        origin_outputs = []
        for i, (image, label) in enumerate(zip(image_list, label_list)):
            output = resnet_infer(image)
            arg_max = np.argmax(output, axis=1)
            origin_outputs.append(arg_max)
            print("iter#{:<6} predict: ".format(i), arg_max, "label: ", label)

        origin_outputs = np.array(origin_outputs, dtype=np.float32)

        # save model
        saved_model_path = "resnet50_models"
        model_version = 1

        model_version_path = os.path.join(saved_model_path, str(model_version))
        if os.path.exists(model_version_path) and os.path.isdir(model_version_path):
            print(
                "WARNING: The model version path '{}' already exist"
                ", old version directory will be removed".format(model_version_path)
            )
            shutil.rmtree(model_version_path)

        saved_model_builder = flow.saved_model.ModelBuilder(saved_model_path)
        signature_builder = (
            saved_model_builder.ModelName("resnet50")
            .Version(model_version)
            .AddFunction(resnet_infer)
            .AddSignature("regress")
        )
        for input_name, lbn in input_lbns.items():
            signature_builder.Input(input_name, lbn)
        for output_name, lbn in output_lbns.items():
            signature_builder.Output(output_name, lbn)
        saved_model_builder.Save()

        # load model and run
        flow.clear_default_session()
        sess = flow.serving.InferenceSession()
        sess.load_saved_model(saved_model_path)
        # sess.print_job_set()
        sess.launch()

        input_names = sess.list_inputs()
        print("input names:", input_names)
        for input_name in input_names:
            print('input "{}" info: {}'.format(input_name, sess.input_info(input_name)))

        print("load saved resnet and inference result:")
        cmp_outputs = []
        for i, (image, label) in enumerate(zip(image_list, label_list)):
            outputs = sess.run(resnet_infer.__name__, image=image)
            arg_max = np.argmax(outputs[0], axis=1)
            cmp_outputs.append(arg_max)
            print("iter#{:<6} output:".format(i), arg_max, "label: ", label)

        cmp_outputs = np.array(cmp_outputs, dtype=np.float32)

        test_case.assertTrue(np.allclose(origin_outputs, cmp_outputs))