Exemple #1
0
def GetFunctionConfig(args):
    config = flow.function_config()
    config.enable_auto_mixed_precision(args.use_fp16)
    config.train.num_gradient_accumulation_steps(args.num_accumulation_steps)
    if args.use_xla:
        config.use_xla_jit(True)
    config.enable_fuse_add_to_output(True)
    config.enable_fuse_model_update_ops(True)
    return config
def _default_config(args):
    config = flow.function_config()
    config.default_logical_view(flow.scope.consistent_view())
    config.default_data_type(flow.float)
    if args.use_fp16:
        config.enable_auto_mixed_precision(True)
    if args.use_xla:
        config.use_xla_jit(True)
    config.enable_fuse_add_to_output(True)
    return config
Exemple #3
0
def _make_func_config(args):
    func_cfg = flow.function_config()
    if args.fp16:
        func_cfg.enable_auto_mixed_precision(True)
    func_cfg.prune_parallel_cast_ops(True)
    func_cfg.enable_fuse_add_to_output(True)
    func_cfg.enable_fuse_model_update_ops(True)
    func_cfg.enable_fuse_cast_scale(True)
    # turn on this flag when match ZeRO & DeepSpeed
    func_cfg.enable_non_distributed_optimizer(False)
    if args.num_accumulation_steps > 1:
        if hasattr(func_cfg.train, "num_gradient_accumulation_steps"):
            func_cfg.train.num_gradient_accumulation_steps(
                args.num_accumulation_steps)
        else:
            args.num_accumulation_steps = 1
            print(
                "WARNING: This version of OneFlow dose not support gradient accumulation"
                " please try newer version.")

    return func_cfg
Exemple #4
0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""

import unittest

import numpy as np

import oneflow.compatible.single_client.unittest
from oneflow.compatible import single_client as flow

config = flow.function_config()


def make_job(input_shape, permute, dtype=flow.float32):
    config.use_xla_jit(False)
    config.use_tensorrt(False)

    @flow.global_function(config)
    def transpose_job(x=flow.FixedTensorDef(input_shape, dtype=dtype)):
        return flow.transpose(x, perm=permute)

    return transpose_job


def make_xla_job(input_shape, permute, dtype=flow.float32):
    config.use_xla_jit(True)
Exemple #5
0
}


def load_image(image_path="test_img/ILSVRC2012_val_00020287.JPEG"):
    print(image_path)
    im = Image.open(image_path)
    im = im.resize((224, 224))
    im = im.convert("RGB")  # 有的图像是单通道的,不加转换会报错
    im = np.array(im).astype("float32")
    im = (im - args.rgb_mean) / args.rgb_std
    im = np.transpose(im, (2, 0, 1))
    im = np.expand_dims(im, axis=0)
    return np.ascontiguousarray(im, "float32")


@flow.global_function("predict", flow.function_config())
def InferenceNet(
    images: tp.Numpy.Placeholder((1, 3, 224, 224), dtype=flow.float)
) -> tp.Numpy:
    logits = model_dict[args.model](images, args)
    predictions = flow.nn.softmax(logits)
    return predictions


def main():
    flow.env.log_dir(args.log_dir)
    assert os.path.isdir(args.model_load_dir)
    flow.load_variables(flow.checkpoint.get(args.model_load_dir))
    image = load_image(args.image_path)
    predictions = InferenceNet(image)
    clsidx = predictions.argmax()