Exemplo n.º 1
0
def _TestLoadCorrectness(test_case, model_getter, dtype, legacy_api):
    """
    Save weights by legacy model io, load weights by new model io,
    and check the equality.
    """
    with tempfile.TemporaryDirectory() as save_dir:
        refresh_session()
        flow.config.enable_legacy_model_io(True)
        large1 = get_checkpoint_ready_model(model_getter, dtype)
        check_point = flow.train.CheckPoint()
        check_point.init()
        check_point.save(save_dir)
        res1 = large1()
        flow.clear_default_session()
        flow.config.gpu_device_num(4)
        flow.config.enable_legacy_model_io(False)
        large2 = get_checkpoint_ready_model(model_getter, dtype)
        if legacy_api:
            check_point = flow.train.CheckPoint()
            check_point.load(save_dir)
        else:
            vars_in_file = flow.checkpoint.get(save_dir)
            flow.load_variables(vars_in_file)
        res2 = large2()
        test_case.assertTrue(np.array_equal(res1, res2))
Exemplo n.º 2
0
def main():
    flow.env.log_dir(args.log_dir)
    assert os.path.isdir(args.model_load_dir)
    flow.load_variables(flow.checkpoint.get(args.model_load_dir))
    image = load_image(args.image_path)
    predictions = InferenceNet(image)
    clsidx = predictions.argmax()
    print(predictions.max(), clsidx_2_labels[clsidx])
Exemplo n.º 3
0
def _TestAssignmentBetweenMemory(test_case, dtype):
    refresh_session()
    model = get_checkpoint_ready_model(get_simple_model, dtype)
    all_vars = flow.get_all_variables()
    flow.load_variables({"x": all_vars["z"]})
    flow_res = model()
    np_res = all_vars["z"].numpy() * 2 + all_vars["y"].numpy()
    test_case.assertTrue(np.allclose(flow_res, np_res))
Exemplo n.º 4
0
 def __init__(self, model_save_dir, model_load_dir, model_save_init=False):
     self._model_save_dir = model_save_dir
     if model_load_dir:
         assert os.path.isdir(model_load_dir)
         print("Restoring model from {}.".format(model_load_dir))
         flow.load_variables(flow.checkpoint.get(model_load_dir))
     elif model_save_init:
         flow.checkpoint.save("initial_model")
         print("Init model on demand.")
Exemplo n.º 5
0
 def test_resnet(test_case, batch_size=DEFAULT_BATCH_SIZE, num_batchs=6):
     init_env()
     image_size = (3, DEFAULT_IMAGE_SIZE, DEFAULT_IMAGE_SIZE)
     (resnet_infer, input_lbns,
      output_lbns) = make_resnet_infer_func(batch_size, image_size)
     flow.load_variables(flow.checkpoint.get(DEFAULT_CHECKPOINT_DIR))
     dataset = ImageNetRecordDataset(
         batch_size=batch_size,
         image_resize_size=DEFAULT_IMAGE_SIZE,
         data_format="NCHW",
     )
     (image_list, label_list) = dataset.load_batchs(num_batchs)
     print("resnet inference result:")
     origin_outputs = []
     for (i, (image, label)) in enumerate(zip(image_list, label_list)):
         output = resnet_infer(image)
         arg_max = np.argmax(output, axis=1)
         origin_outputs.append(arg_max)
         print("iter#{:<6} predict: ".format(i), arg_max, "label: ", label)
     origin_outputs = np.array(origin_outputs, dtype=np.float32)
     saved_model_path = "resnet50_models"
     model_version = 1
     model_version_path = os.path.join(saved_model_path, str(model_version))
     if os.path.exists(model_version_path) and os.path.isdir(
             model_version_path):
         print(
             "WARNING: The model version path '{}' already exist, old version directory will be removed"
             .format(model_version_path))
         shutil.rmtree(model_version_path)
     saved_model_builder = flow.saved_model.ModelBuilder(saved_model_path)
     signature_builder = (saved_model_builder.ModelName("resnet50").Version(
         model_version).AddFunction(resnet_infer).AddSignature("regress"))
     for (input_name, lbn) in input_lbns.items():
         signature_builder.Input(input_name, lbn)
     for (output_name, lbn) in output_lbns.items():
         signature_builder.Output(output_name, lbn)
     saved_model_builder.Save()
     flow.clear_default_session()
     sess = flow.serving.InferenceSession()
     sess.load_saved_model(saved_model_path)
     sess.launch()
     job_name = sess.list_jobs()[0]
     input_names = sess.list_inputs()
     print("input names:", input_names)
     for input_name in input_names:
         print('input "{}" info: {}'.format(
             input_name, sess.input_info(input_name, job_name)))
     print("load saved resnet and inference result:")
     cmp_outputs = []
     for (i, (image, label)) in enumerate(zip(image_list, label_list)):
         outputs = sess.run(resnet_infer.__name__, image=image)
         arg_max = np.argmax(outputs[0], axis=1)
         cmp_outputs.append(arg_max)
         print("iter#{:<6} output:".format(i), arg_max, "label: ", label)
     cmp_outputs = np.array(cmp_outputs, dtype=np.float32)
     test_case.assertTrue(np.allclose(origin_outputs, cmp_outputs))
     sess.close()
 def test_insightface(self):
     init_env()
     print("Get data from FaceEmoreRecordDataset")
     dataset = FaceEmoreRecordDataset(
         data_dir=self.DATA_DIR,
         num_data_parts=self.NUM_DATA_PARTS,
         batch_size=self.BATCH_SIZE,
         image_width=self.IMAGE_SIZE,
         image_height=self.IMAGE_SIZE,
         data_format="NCHW",
     )
     (image_list, issame_list) = dataset.load_batchs(self.NUM_ITER)
     print("Define inference function for insightface")
     infer_fn = make_insightface_resnet100_func(self.BATCH_SIZE,
                                                self.IMAGE_SIZE,
                                                self.IMAGE_SIZE)
     print("Load variables for insightface model")
     flow.load_variables(flow.checkpoint.get(self.MODEL_DIR))
     print("Call inference function directly")
     features = []
     for (i, image) in enumerate(image_list):
         feature = infer_fn(image)
         features.append(feature)
     print("Save model for insightface")
     saved_model_path = "insightface_models"
     model_version = 1
     model_version_path = os.path.join(saved_model_path, str(model_version))
     if os.path.exists(model_version_path) and os.path.isdir(
             model_version_path):
         print(
             "WARNING: The model version path '{}' already exist, old version directory will be removed"
             .format(model_version_path))
         shutil.rmtree(model_version_path)
     saved_model_builder = (flow.saved_model.ModelBuilder(
         saved_model_path).ModelName("insightface").Version(model_version))
     saved_model_builder.AddFunction(infer_fn).Finish()
     saved_model_builder.Save()
     flow.clear_default_session()
     print("InferenceSession load model")
     flow.clear_default_session()
     sess = flow.serving.InferenceSession()
     sess.load_saved_model(saved_model_path)
     sess.launch()
     job_name = sess.list_jobs()[0]
     input_names = sess.list_inputs()
     print("input names:", input_names)
     for input_name in input_names:
         print('input "{}" info: {}'.format(
             input_name, sess.input_info(input_name, job_name)))
     print("Run model and compare ")
     for (i, (image, feature)) in enumerate(zip(image_list, features)):
         input_dict = {input_names[0]: image}
         infer_result = sess.run(job_name, **input_dict)
         self.assertTrue(np.allclose(infer_result, feature))
     sess.close()
Exemplo n.º 7
0
def _TestMixedModel(test_case, dtype):
    with tempfile.TemporaryDirectory() as save_dir1, tempfile.TemporaryDirectory() as save_dir2:

        def get_variable(name):
            return flow.get_variable(
                name=name,
                shape=(10, 80, 40, 20),
                dtype=dtype,
                initializer=flow.random_normal_initializer(mean=10, stddev=1),
                distribute=flow.distribute.split(0),
            )

        def get_part_of_mixed_model(dtype):
            @flow.global_function()
            def model() -> tp.Numpy:
                with get_placement():
                    x = get_variable("x")
                    return x

            return model

        def get_mixed_model(dtype):
            @flow.global_function()
            def model() -> tp.Numpy:
                with get_placement():
                    x1 = get_variable("x_from_model1")
                    x2 = get_variable("x_from_model2")
                    return x1 + x2

            return model

        refresh_session()
        model1 = get_checkpoint_ready_model(get_part_of_mixed_model, dtype)
        flow.checkpoint.save(save_dir1)
        refresh_session()
        model2 = get_checkpoint_ready_model(get_part_of_mixed_model, dtype)
        flow.checkpoint.save(save_dir2)
        refresh_session()
        mixed_model = get_checkpoint_ready_model(get_mixed_model, dtype)
        var_dict_from_model1 = flow.checkpoint.get(save_dir1)
        var_dict_from_model2 = flow.checkpoint.get(save_dir2)
        new_var_dict = {}
        for (key, val) in var_dict_from_model1.items():
            new_var_dict["{}_from_model1".format(key)] = val
        for (key, val) in var_dict_from_model2.items():
            new_var_dict["{}_from_model2".format(key)] = val
        flow.load_variables(new_var_dict)
        res = mixed_model()
        test_case.assertTrue(
            np.allclose(
                res,
                var_dict_from_model1["x"].numpy() + var_dict_from_model2["x"].numpy(),
            )
        )
Exemplo n.º 8
0
def main():
    InitNodes(args)
    assert args.model_load_dir, "Must have model load dir!"

    flow.env.log_dir(args.log_dir)
    # snapshot = Snapshot(args.model_save_dir, args.model_load_dir)
    print("Restoring model from {}.".format(args.model_load_dir))
    flow.load_variables(flow.checkpoint.get(args.model_load_dir))
    metric = Metric(desc="validation",
                    calculate_batches=num_val_steps,
                    batch_size=val_batch_size)

    for i in range(args.num_epochs):
        for j in range(num_val_steps):
            InferenceNet().async_get(metric.metric_cb(0, j))
Exemplo n.º 9
0
def _TestRoundTrip(test_case, model_getter, dtype):
    """
    Save weights by new model io, load weights by new model io,
    and check the equality.
    """
    with tempfile.TemporaryDirectory() as save_dir:
        refresh_session()
        large1 = get_checkpoint_ready_model(model_getter, dtype)
        flow.checkpoint.save(save_dir)
        res1 = large1()
        refresh_session()
        large2 = get_checkpoint_ready_model(model_getter, dtype)
        vars_in_file = flow.checkpoint.get(save_dir)
        flow.load_variables(vars_in_file)
        res2 = large2()
        test_case.assertTrue(np.array_equal(res1, res2))
Exemplo n.º 10
0
def _TestPartiallyLoadNumpy(test_case, dtype):
    refresh_session()
    model = get_checkpoint_ready_model(get_add_and_reduce_mean_model, dtype)
    var_x = flow.get_all_variables()["x"]
    var_y_value_before_loading = flow.get_all_variables()["y"].numpy()
    new_val_np = np.random.random(var_x.shape).astype(np.float32)
    flow.load_variables({"x": new_val_np})
    var_y_value_after_loading = flow.get_all_variables()["y"].numpy()
    flow_res = model()
    np_res = (var_y_value_after_loading + new_val_np).mean()
    test_case.assertTrue(
        np.allclose(flow_res, np_res),
        {"flow_res": flow_res, "np_res": np_res, "diff": flow_res - np_res},
    )
    test_case.assertTrue(
        np.array_equal(var_y_value_before_loading, var_y_value_after_loading)
    )
Exemplo n.º 11
0
def _TestResumeTraining(test_case):
    with tempfile.TemporaryDirectory() as save_dir:
        refresh_session()
        model = get_checkpoint_ready_model(
            get_simple_momentum_training_model, flow.float32
        )
        model()
        flow.checkpoint.save(save_dir)
        model()
        w1 = flow.get_all_variables()["w"].numpy()
        refresh_session()
        model = get_checkpoint_ready_model(
            get_simple_momentum_training_model, flow.float32
        )
        flow.load_variables(flow.checkpoint.get(save_dir))
        model()
        w2 = flow.get_all_variables()["w"].numpy()
        test_case.assertTrue(np.array_equal(w1, w2))
 def test_style_model(self):
     init_env()
     input_image = load_image(self.INPUT_IMAGE_FILE)
     (image_height, image_width) = input_image.shape[2:]
     style_transfer = make_style_transfer(image_height, image_width)
     flow.load_variables(flow.checkpoint.get(self.CHECKPOINT_DIR))
     saved_model_path = "style_models"
     model_version = 1
     saved_model_version_dir = os.path.join(saved_model_path,
                                            str(model_version))
     if not os.path.exists(saved_model_version_dir):
         saved_model_builder = (
             flow.saved_model.ModelBuilder(saved_model_path).ModelName(
                 "style_transfer").Version(model_version))
         saved_model_builder.AddFunction(style_transfer).Finish()
         saved_model_builder.Save()
     flow.clear_default_session()
     sess = flow.serving.InferenceSession()
     sess.load_saved_model(saved_model_path)
     sess.launch()
     job_names = sess.list_jobs()
     print("job names:", job_names)
     input_names = sess.list_inputs()
     print("input names:", input_names)
     for input_name in input_names:
         print('input "{}" info: {}'.format(
             input_name, sess.input_info(input_name, job_names[0])))
     output_names = sess.list_outputs()
     print("output names:", output_names)
     for output_name in output_names:
         print('input "{}" info: {}'.format(
             output_name, sess.output_info(output_name, job_names[0])))
     input_dict = {input_names[0]: input_image}
     outputs = sess.run(style_transfer.__name__, **input_dict)
     if self.OUTPUT_IMAGE_FILE is not None:
         cv2.imwrite(self.OUTPUT_IMAGE_FILE, recover_image(outputs[0]))
         print("write styled output image to", self.OUTPUT_IMAGE_FILE)
     sess.close()
Exemplo n.º 13
0

def check_equality(job_func: Callable, onnx_model: onnx.ModelProto,
                   image_path: Text) -> (bool, np.ndarray):
    image = load_image(image_path)
    onnx_res = onnx_inference(image, onnx_model)
    oneflow_res = job_func(image)
    is_equal = np.allclose(onnx_res, oneflow_res, rtol=1e-4, atol=1e-5)
    return is_equal, onnx_res


if __name__ == "__main__":
    image_path = "data/tiger.jpg"
    # set up your model path
    flow_weights_path = "resnet_v15_of_best_model_val_top1_77318"
    onnx_model_dir = "onnx/model"

    flow.load_variables(flow.checkpoint.get(flow_weights_path))

    # conver oneflow to onnx
    onnx_model = oneflow_to_onnx(InferenceNet,
                                 flow_weights_path,
                                 onnx_model_dir,
                                 external_data=False)

    # check equality
    are_equal, onnx_res = check_equality(InferenceNet, onnx_model, image_path)
    clsidx_onnx = onnx_res.argmax()
    print("Are the results equal? {}".format("Yes" if are_equal else "No"))
    print("Class: {}; score: {}".format(clsidx_2_labels[clsidx_onnx],
                                        onnx_res.max()))
Exemplo n.º 14
0
 def test_alexnet(test_case, batch_size=DEFAULT_BATCH_SIZE, num_batchs=6):
     init_env()
     (alexnet_infer, input_lbns, output_lbns) = make_alexnet_infer_func(
         batch_size, (DEFAULT_IMAGE_SIZE, DEFAULT_IMAGE_SIZE, 3)
     )
     flow.load_variables(flow.checkpoint.get(DEFAULT_CHECKPOINT_DIR))
     saved_model_path = "alexnet_models"
     model_name = "alexnet"
     model_version = 1
     model_version_path = os.path.join(saved_model_path, str(model_version))
     if os.path.exists(saved_model_path) and os.path.isdir(saved_model_path):
         print(
             "WARNING: The model version path '{}' already exist, old version directory will be removed".format(
                 model_version_path
             )
         )
         shutil.rmtree(saved_model_path)
     saved_model_builder = flow.saved_model.ModelBuilder(saved_model_path)
     signature_builder = (
         saved_model_builder.ModelName(model_name)
         .Version(model_version)
         .AddFunction(alexnet_infer)
         .AddSignature("regress")
     )
     for (input_name, lbn) in input_lbns.items():
         signature_builder.Input(input_name, lbn)
     for (output_name, lbn) in output_lbns.items():
         signature_builder.Output(output_name, lbn)
     saved_model_builder.Save()
     new_batch_size = int(batch_size / 2)
     dataset = ImageNetRecordDataset(
         batch_size=new_batch_size,
         image_resize_size=DEFAULT_IMAGE_SIZE,
         data_format="NHWC",
     )
     (image_list, label_list) = dataset.load_batchs(num_batchs)
     assert image_list[0].shape[0] == new_batch_size
     image_size = tuple(image_list[0].shape[1:])
     flow.clear_default_session()
     (alexnet_infer, _, _) = make_alexnet_infer_func(new_batch_size, image_size)
     flow.load_variables(flow.checkpoint.get(DEFAULT_CHECKPOINT_DIR))
     print("alexnet inference result:")
     origin_outputs = []
     for (i, (image, label)) in enumerate(zip(image_list, label_list)):
         output = alexnet_infer(image, label)
         origin_outputs.append(output)
         print("iter#{:<6} output:".format(i), output)
     origin_outputs = np.array(origin_outputs, dtype=np.float32)
     flow.clear_default_session()
     model_meta_file_path = os.path.join(
         saved_model_path, str(model_version), "saved_model.prototxt"
     )
     saved_model_proto = load_saved_model(model_meta_file_path)
     sess = flow.serving.InferenceSession()
     checkpoint_path = os.path.join(
         saved_model_path, str(model_version), saved_model_proto.checkpoint_dir
     )
     sess.set_checkpoint_path(checkpoint_path)
     graph_name = saved_model_proto.default_graph_name
     graph_def = saved_model_proto.graphs[graph_name]
     signature_def = graph_def.signatures[graph_def.default_signature_name]
     with sess.open(graph_name, signature_def, new_batch_size):
         sess.compile(graph_def.op_list)
     sess.launch()
     job_name = sess.list_jobs()[0]
     input_names = sess.list_inputs()
     print("input names:", input_names)
     for input_name in input_names:
         print(
             'input "{}" info: {}'.format(
                 input_name, sess.input_info(input_name, job_name)
             )
         )
     output_names = sess.list_outputs()
     print("output names:", output_names)
     for output_name in output_names:
         print(
             'output "{}" info: {}'.format(
                 output_name, sess.output_info(output_name, job_name)
             )
         )
     print("load saved alexnet and inference result:")
     print_input_info = False
     cmp_outputs = []
     for (i, (image, label)) in enumerate(zip(image_list, label_list)):
         if print_input_info:
             print("image shape: {}, dtype: {}".format(image.shape, image.dtype))
             print(
                 "label shape: {}, dtype: {}, data: {}".format(
                     label.shape, label.dtype, label
                 )
             )
             if i > 1:
                 print((image - image_list[i - 1]).mean())
         outputs = sess.run(alexnet_infer.__name__, image=image, label=label)
         cmp_outputs.append(outputs[0])
         print("iter#{:<6} output:".format(i), outputs[0])
     cmp_outputs = np.array(cmp_outputs, dtype=np.float32)
     test_case.assertTrue(np.allclose(origin_outputs, cmp_outputs))
     sess.close()