SubOpr, TanHOpr, TransposeOpr, TrueDivOpr, TypeCvtOpr, VolatileSharedDeviceTensorOpr, ) from ...converter_ir.ir_tensor import IRTensor from ...frontend.mge_to_ir.mge_utils import get_symvar_value if "USE_CAFFE_PROTO" not in os.environ: from .caffe_pb import caffe_pb2 as cp else: from caffe.proto import caffe_pb2 as cp logger = get_logger(__name__) MGE2CAFFE = {} class BackEnd(IntEnum): CAFFE = 1 SNPE = 2 TRT = 3 NNIE = 4 def isconst(x): return x.np_data is not None def _register_op(*oprs):
import os import time import numpy as np import megengine as mge import megengine.distributed as dist import megengine.functional as F from megengine.autodiff import GradManager from megengine.data import DataLoader, Infinite, RandomSampler, dataset from megengine.data import transform as T from megengine.optimizer import SGD from official.vision.segmentation.tools.utils import AverageMeter, get_config_info, import_from_file logger = mge.get_logger(__name__) logger.setLevel("INFO") def main(): parser = argparse.ArgumentParser() parser.add_argument("-f", "--file", default="net.py", type=str, help="net description file") parser.add_argument("-n", "--devices", type=int, default=8, help="batch size for training")
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # # This file has been modified by Megvii ("Megvii Modifications"). # All Megvii Modifications are Copyright (C) 2014-2021 Megvii Inc. All rights reserved. # ---------------------------------------------------------------------- """Tokenization classes.""" from __future__ import absolute_import, division, print_function, unicode_literals import collections import os import unicodedata from io import open import megengine logger = megengine.get_logger(__name__) VOCAB_NAME = "vocab.txt" def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() index = 0 with open(vocab_file, "r", encoding="utf-8") as reader: while True: token = reader.readline() if not token: break token = token.strip() vocab[token] = index