示例#1
0
def get_config():
    time_prefix = time.strftime("-%Y%m%d-%H%M%S", time.localtime())
    prefix = "AVA-cifar10-resnet18"
    config = ed({
        # base setting
        "description": "Your description for training",
        "prefix": prefix,
        "time_prefix": time_prefix,
        "net_work": "resnet18",
        "low_dims": 128,
        "use_MLP": False,

        # save
        "save_checkpoint": True,
        "save_checkpoint_epochs": 5,
        "keep_checkpoint_max": 2,

        # optimizer
        "base_lr": 0.03,
        "type": "SGD",
        "momentum": 0.9,
        "weight_decay": 5e-4,
        "loss_scale": 1,
        "sigma": 0.1,

        # trainer
        "batch_size": 128,
        "epochs": 1000,
        "epoch_stage": [600, 400],
        "lr_schedule": "cosine_lr",
        "lr_mode": "epoch",
        "warmup_epoch": 0,
    })
    return config
示例#2
0
def config_quant(device_target):
    if device_target not in ["Ascend", "GPU"]:
        raise ValueError(
            "Unsupported device target: {}.".format(device_target))
    configs = ed({"Ascend": config_ascend_quant, "GPU": config_gpu_quant})
    config = configs.Ascend if device_target == "Ascend" else configs.GPU
    config["device_target"] = device_target
    return config
示例#3
0
 def norm(self):
     norm = ed()
     norm.focus = 200.
     norm.speedX = 300.0
     norm.speedY = 300.0
     norm.speedZ = 300.0
     norm.angle = 3.1416
     norm.damage = None
     norm.opponents = 200.
     norm.rpm = 10000.
     norm.track = 200.
     norm.trackPos = None
     norm.wheelSpinVel = 100.
     norm.radius = None
     norm.toleft = None
     norm.toright = None
     return norm
示例#4
0
# limitations under the License.
# ============================================================================
"""
network config setting, will be used in train.py and eval.py
"""
from easydict import EasyDict as ed
config_ascend_quant = ed({
    "num_classes": 1000,
    "image_height": 224,
    "image_width": 224,
    "batch_size": 192,
    "data_load_mode": "mindata",
    "epoch_size": 60,
    "start_epoch": 200,
    "warmup_epochs": 1,
    "lr": 0.3,
    "momentum": 0.9,
    "weight_decay": 4e-5,
    "label_smooth": 0.1,
    "loss_scale": 1024,
    "save_checkpoint": True,
    "save_checkpoint_epochs": 1,
    "keep_checkpoint_max": 300,
    "save_checkpoint_path": "./checkpoint",
    "quantization_aware": True,
})

config_gpu_quant = ed({
    "num_classes": 1000,
    "image_height": 224,
    "image_width": 224,
    "batch_size": 134,
示例#5
0
config = ed({
    "INFER_LONG_SIZE": 1920,
    "KERNEL_NUM": 7,
    "INFERENCE": True,  # INFER MODE\TRAIN MODE

    # backbone
    "BACKBONE_LAYER_NUMS": [3, 4, 6, 3],
    "BACKBONE_IN_CHANNELS": [64, 256, 512, 1024],
    "BACKBONE_OUT_CHANNELS": [256, 512, 1024, 2048],

    # neck
    "NECK_OUT_CHANNEL": 256,

    # lr
    "BASE_LR": 2e-3,
    "TRAIN_TOTAL_ITER": 58000,
    "WARMUP_STEP": 620,
    "WARMUP_RATIO": 1 / 3,

    # dataset for train
    "TRAIN_ROOT_DIR": "psenet/ic15/",
    "TRAIN_LONG_SIZE": 640,
    "TRAIN_MIN_SCALE": 0.4,
    "TRAIN_BATCH_SIZE": 4,
    "TRAIN_REPEAT_NUM": 1800,
    "TRAIN_DROP_REMAINDER": True,
    "TRAIN_MODEL_SAVE_PATH": "./checkpoints/",

    # dataset for test
    "TEST_ROOT_DIR": "psenet/ic15/",
    "TEST_BUFFER_SIZE": 4,
    "TEST_DROP_REMAINDER": False,

    # air config
    "air_filename": "psenet_bs_1",
})
示例#6
0
config = ed({
    "img_shape": [300, 300],
    "num_ssd_boxes":
    1917,
    "neg_pre_positive":
    3,
    "match_thershold":
    0.5,
    "nms_thershold":
    0.6,
    "min_score":
    0.1,
    "max_boxes":
    100,

    # learing rate settings
    "global_step":
    0,
    "lr_init":
    0.001,
    "lr_end_rate":
    0.001,
    "warmup_epochs":
    2,
    "momentum":
    0.9,
    "weight_decay":
    1.5e-4,

    # network
    "num_default": [3, 6, 6, 6, 6, 6],
    "extras_in_channels": [256, 576, 1280, 512, 256, 256],
    "extras_out_channels": [576, 1280, 512, 256, 256, 128],
    "extras_srides": [1, 1, 2, 2, 2, 2],
    "extras_ratio": [0.2, 0.2, 0.2, 0.25, 0.5, 0.25],
    "feature_size": [19, 10, 5, 3, 2, 1],
    "min_scale":
    0.2,
    "max_scale":
    0.95,
    "aspect_ratios": [(2, ), (2, 3), (2, 3), (2, 3), (2, 3), (2, 3)],
    "steps": (16, 32, 64, 100, 150, 300),
    "prior_scaling": (0.1, 0.2),
    "gamma":
    2.0,
    "alpha":
    0.75,

    # `mindrecord_dir` and `coco_root` are better to use absolute path.
    "mindrecord_dir":
    "/data/MindRecord_COCO",
    "coco_root":
    "/data/coco2017",
    "train_data_type":
    "train2017",
    "val_data_type":
    "val2017",
    "instances_set":
    "annotations/instances_{}.json",
    "coco_classes":
    ('background', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
     'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
     'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
     'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
     'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
     'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
     'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
     'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
     'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
     'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
     'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
     'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
     'hair drier', 'toothbrush'),
    "num_classes":
    81,

    # if coco used, `image_dir` and `anno_path` are useless.
    "image_dir":
    "",
    "anno_path":
    "",
})
示例#7
0
from easydict import EasyDict as ed

access_config = ed({
    # 登录需要的ak sk信息
    'access_key': '',
    'secret_access_key': '',
    # 连接OBS的服务地址。可包含协议类型、域名、端口号。(出于安全性考虑,建议使用https协议)
    # 如果是计算中心,需要联系运维同事获取
    'server': '',
    # project_id/region_name:
    # 项目ID/区域ID,获取方式参考链接
    # https://support.huaweicloud.com/api-iam/iam_17_0002.html
    # 如果是计算中心,请咨询相关维护同事
    'region_name': '',
    'project_id': '',

    # 如下配置针对计算中心等专有云 通用云不需要设置 设置为空 请咨询相关维护同事
    # 设置该信息后 需要设置相关的域名解析地址
    'iam_endpoint': '',
    'obs_endpoint': '',
    'modelarts_endpoint': '',
})

session_config = ed({
    'hyperparameters': [
        {
            'label': 'config_path',
            'value': 'resnet50_imagenet2012_Boost_config.yaml'
        },
        {
            'label': 'enable_modelarts',
示例#8
0
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
network config setting, will be used in train.py and eval.py
"""
from easydict import EasyDict as ed

config = ed({
    "class_num": 1000,
    "batch_size": 32,
    "loss_scale": 128,
    "momentum": 0.9,
    "weight_decay": 5e-4,
    "epoch_size": 45,
    "buffer_size": 1000,
    "image_height": 224,
    "image_width": 224,
    "save_checkpoint": True,
    "save_checkpoint_steps": 5004,
    "keep_checkpoint_max": 20,
    "save_checkpoint_path": "./",
    "label_smooth": 1,
    "label_smooth_factor": 0.1,
    "frequency": 834,
    "eval_interval": 1,
    "eval_batch_size": 32
})
示例#9
0
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
network config setting, will be used in train.py and eval.py
"""
from easydict import EasyDict as ed

config = ed({
    "class_num": 10572,
    "batch_size": 128,
    "learning_rate": 0.01,
    "lr_decay_epochs": [40, 80, 100],
    "lr_decay_factor": 0.1,
    "lr_warmup_epochs": 20,
    "p": 16,
    "k": 8,
    "loss_scale": 1024,
    "momentum": 0.9,
    "weight_decay": 1e-4,
    "epoch_size": 120,
    "buffer_size": 10000,
    "image_height": 128,
    "image_width": 128,
    "save_checkpoint": True,
    "save_checkpoint_steps": 195,
    "keep_checkpoint_max": 2,
    "save_checkpoint_path": "./"
})
示例#10
0
 def make_obs_origin(self):
     obs = ed()
     t = self.tool.get29Data
     for k, v in self.norm_factory.items():
         setattr(obs, k, t[k])
     return obs
示例#11
0
"""
network config setting, will be used in train.py and eval.py
"""
from easydict import EasyDict as ed

# config on GPU for Xception, imagenet2012.
config_gpu = ed({
    "class_num": 1000,
    "batch_size": 64,
    "loss_scale": 1024,
    "momentum": 0.9,
    "weight_decay": 1e-4,
    "epoch_size": 250,
    "save_checkpoint": True,
    "save_checkpoint_epochs": 1,
    "keep_checkpoint_max": 5,
    "save_checkpoint_path": "./gpu-ckpt",
    "warmup_epochs": 1,
    "lr_decay_mode": "linear",
    "use_label_smooth": True,
    "finish_epoch": 0,
    "label_smooth_factor": 0.1,
    "lr_init": 0.00004,
    "lr_max": 0.4,
    "lr_end": 0.00004
})

# config on Ascend for Xception, imagenet2012.
config_ascend = ed({
    "class_num": 1000,
    "batch_size": 128,
    "loss_scale": 1024,
示例#12
0
config = ed({

    # dataset-related
    "mindrecord_dir":
    "",
    "data_root":
    "",
    "annotation_file":
    "",
    "val_data_root":
    "",
    "val_annotation_file":
    "",
    "data_json":
    "",
    "characters_dictionary": {
        "pad_id": 0,
        "go_id": 1,
        "eos_id": 2,
        "unk_id": 3
    },
    "labels_not_use":
    [u'%#�?%', u'%#背景#%', u'%#不识�?%', u'#%不识�?#', u'%#模糊#%', u'%#模糊#%'],
    "vocab_path":
    "./general_chars.txt",

    #model-related
    "img_width":
    512,
    "img_height":
    128,
    "channel_size":
    3,
    "conv_out_dim":
    384,
    "encoder_hidden_size":
    128,
    "decoder_hidden_size":
    128,
    "decoder_output_size":
    10000,  # vocab_size is the decoder_output_size, characters_class+1, last 9999 is the space
    "dropout_p":
    0.1,
    "max_length":
    64,
    "attn_num_layers":
    1,
    "teacher_force_ratio":
    0.5,

    #optimizer-related
    "lr":
    0.0008,
    "adam_beta1":
    0.5,
    "adam_beta2":
    0.999,
    "loss_scale":
    1024,

    #train-related
    "batch_size":
    32,
    "num_epochs":
    20,
    "keep_checkpoint_max":
    20,

    #eval-related
    "eval_batch_size":
    32
})
示例#13
0
def parser_args(block, config_path=None):
    # params
    parser = argparse.ArgumentParser()
    # parser.add_argument('-config', default='./config/shrec/shrec_gpt_28_SSL.yaml')
    # parser.add_argument('-config', default='./work_dir/shrec28/gptSSL_SSLTrue_ReOnly/shrec_gpt_28_SSL_val.yaml')
    # parser.add_argument('-config', default='./work_dir/shrec28/gptSSL_SSLTrue_PTOnly/shrec_gpt_28_SSL_eval.yaml')
    # parser.add_argument('-config', default='./work_dir/shrec28/gptSSL_SSLTrue_PTContra/shrec_gpt_28_SSL_PTContra_eval.yaml')
    # parser.add_argument('-config', default='./work_dir/shrec28/gptSSL_SSLTrue_PSOnly/shrec_gpt_28_SSL_PSOnly_eval.yaml')
    parser.add_argument('-config',
                        default='./config/shrec/shrec_gpt_lay_28_SSL.yaml')
    # parser.add_argument('-config', default='./config/val/ntu60_dstanet_bi_val.yaml')
    parser.add_argument('-model', default='resnet3d_50')
    parser.add_argument('-model_param', default={}, help=None)
    # classify_multi_crop classify classify_pose
    parser.add_argument('-train', default='classify')
    parser.add_argument('-val_first', default=False)
    parser.add_argument('-data', default='jmdbgulp')
    parser.add_argument('-data_param', default={}, help='')
    # train_val test train_test
    parser.add_argument('-mode', default='train_val')
    # cross_entropy mse_ce
    parser.add_argument('-loss', default='cross_entropy')
    parser.add_argument('-ls_param', default={})
    # reduce_by_acc reduce_by_loss reduce_by_epoch cosine_annealing_lr
    parser.add_argument('-lr_scheduler', default='reduce_by_acc')
    parser.add_argument('-lr_param', default={})
    parser.add_argument('-warm_up_epoch', default=0)
    parser.add_argument('-step', default=[
        80,
    ])
    parser.add_argument('-lr', default=0.01)  # 0.001
    parser.add_argument('-wd', default=1e-4)  # 5e-4
    parser.add_argument('-lr_decay_ratio', default=0.1)
    parser.add_argument('-lr_multi_keys',
                        default=[
                            ['fc', 1, 1, 0],
                            ['bn', 1, 1, 0],
                        ],
                        help='key, lr ratio, wd ratio, epoch')
    parser.add_argument('-optimizer', default='sgd_nev')
    parser.add_argument('-freeze_keys',
                        default=[
                            ['PA', 5],
                        ],
                        help='key, epoch')

    parser.add_argument('-class_num', default=12)
    parser.add_argument('-batch_size', default=32)
    parser.add_argument('-worker', default=16)
    parser.add_argument('-pin_memory', default=False)
    parser.add_argument('-max_epoch', default=50)

    parser.add_argument('-num_epoch_per_save', default=2)
    parser.add_argument('-model_saved_name', default='')
    parser.add_argument('-last_model', default=None, help='')
    parser.add_argument('-ignore_weights', default=['fc'])
    parser.add_argument('-pre_trained_model', default='')
    parser.add_argument('--label_smoothing_num',
                        default=0,
                        help='0-1: 0 denotes no smoothing')
    parser.add_argument(
        '--mix_up_num',
        default=0,
        help='0-1: 1 denotes uniform distribution, smaller, more concave')
    parser.add_argument('-device_id', default=[0, 1, 2, 3])
    parser.add_argument('-debug', default=False)
    parser.add_argument('-cuda_visible_device',
                        default='0, 1, 2, 3, 4, 5, 6, 7')
    parser.add_argument('-grad_clip', default=0)
    p = parser.parse_args()
    if config_path is not None:
        p.config = config_path
    if p.config is not None:
        with open(p.config, 'r') as f:
            default_arg = yaml.load(f)
        key = vars(p).keys()
        for k in default_arg.keys():
            if k not in key:
                print('WRONG ARG: {}'.format(k))
                assert (k in key)
        parser.set_defaults(**default_arg)

    args = parser.parse_args()

    os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda_visible_device

    if args.debug:
        args.device_id = [0]
        args.batch_size = 1
        args.worker = 0
        os.environ['DISPLAY'] = 'localhost:10.0'

    block.addr = os.path.join(args.model_saved_name, 'log.txt')

    if os.path.isdir(
            args.model_saved_name) and not args.last_model and not args.debug:
        print('log_dir: ' + args.model_saved_name + ' already exist')
        answer = input('delete it? y/n:')
        if answer == 'y':
            shutil.rmtree(args.model_saved_name)
            print('Dir removed: ' + args.model_saved_name)
            input('refresh it')
        else:
            print('Dir not removed: ' + args.model_saved_name)

    if not os.path.exists(args.model_saved_name):
        os.makedirs(args.model_saved_name)
    # Get argument defaults (has tag #this is a hack)
    parser.add_argument('--IGNORE', action='store_true')
    # 会返回列表
    defaults = vars(parser.parse_args(['--IGNORE']))
    # Print all arguments, color the non-defaults
    for argument, value in sorted(vars(args).items()):
        reset = colorama.Style.RESET_ALL
        color = reset if value == defaults[argument] else colorama.Fore.MAGENTA
        block.log('{}{}: {}{}'.format(color, argument, value, reset))

    shutil.copy2(__file__, args.model_saved_name)
    shutil.copy2(args.config, args.model_saved_name)

    args = ed(vars(args))
    return args
示例#14
0
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
network config setting, will be used in train.py and eval.py
"""
from easydict import EasyDict as ed

config = ed({
    "class_num": 3755,
    "batch_size": 32,
    "loss_scale": 1024,
    "momentum": 0.9,
    "weight_decay": 1e-4,
    "epoch_size": 20,
    "buffer_size": 100,
    "image_height": 112,
    "image_width": 112,
    "save_checkpoint": True,
    "save_checkpoint_steps": 1759,  #1759,14072
    "keep_checkpoint_max": 10,
    "save_checkpoint_path": "./",
    "lr_init": 0.05,
    "lr_end": 0.00001,
    "lr_max": 0.1,
    "warmup_epochs": 5,
    "lr_decay_mode": "steps"
})
示例#15
0
config = ed({
    "image_size": '224,224',
    "num_classes": 1000,

    "lr": 0.4,
    "lr_scheduler": 'cosine_annealing',
    "lr_epochs": '30,60,90,120',
    "lr_gamma": 0.1,
    "eta_min": 0,
    "T_max": 150,
    "max_epoch": 150,
    "warmup_epochs": 1,

    "weight_decay": 0.0001,
    "momentum": 0.9,
    "is_dynamic_loss_scale": 0,
    "loss_scale": 1024,
    "label_smooth": 1,
    "label_smooth_factor": 0.1,

    "ckpt_interval": 5,
    "ckpt_save_max": 5,
    "ckpt_path": 'outputs/',
    "is_save_on_master": 1,

    # this two parameter is used for mindspore distributed configuration
    "rank": 0,
    "group_size": 1
})
示例#16
0
# ============================================================================
"""
network config setting, will be used in train.py and eval.py
"""
from easydict import EasyDict as ed

# config for squeezenet, cifar10
config1 = ed({
    "class_num": 10,
    "batch_size": 32,
    "loss_scale": 1024,
    "momentum": 0.9,
    "weight_decay": 1e-4,
    "epoch_size": 120,
    "pretrain_epoch_size": 0,
    "save_checkpoint": True,
    "save_checkpoint_epochs": 1,
    "keep_checkpoint_max": 10,
    "save_checkpoint_path": "./",
    "warmup_epochs": 5,
    "lr_decay_mode": "poly",
    "lr_init": 0,
    "lr_end": 0,
    "lr_max": 0.01
})

# config for squeezenet, imagenet
config2 = ed({
    "class_num": 1000,
    "batch_size": 32,
    "loss_scale": 1024,
    "momentum": 0.9,
示例#17
0
import numpy as np 
from easydict import EasyDict as ed 

config = ed()

config.num_classes = 101

config.train_img = ed()
config.train_img.batch_size = 60
config.train_img.epoch = 1
config.train_img.drop_out = 1.0

config.train_flow = ed()
config.train_flow.batch_size = 80
config.train_flow.epoch = 1
config.train_flow.drop_out = 1.0

ucf_img = ed()

ucf_img.data_root = 'data/'
ucf_img.data_list_path = ucf_img.data_root + 'ucfTrainTestlist/'
ucf_img.train_list = ucf_img.data_list_path + 'trainlist01.txt'
ucf_img.test_list = ucf_img.data_list_path + 'testlist01.txt'
ucf_img.label_list = ucf_img.data_list_path + 'classInd.txt'
ucf_img.data_dir = ucf_img.data_root + 'jpegs_256/'
ucf_img.train_lst = ucf_img.data_list_path + 'train.lst'
ucf_img.valid_lst = ucf_img.data_list_path + 'valid.lst'
ucf_img.test_lst = ucf_img.data_list_path + 'test.lst'

ucf_flow = ed()
示例#18
0
# Copyright(C) 2021. Huawei Technologies Co.,Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
network config setting, will be used in train.py
"""

from easydict import EasyDict as ed

# config
lenet_cfg = ed({
    "batch_size": 32,
    "lr": 0.01,
    "momentum": 0.9,
    "epoch_size": 10,
    "repeat_size": 1,
    "save_checkpoint_epochs": 1,
    "keep_checkpoint_max": 10
})
from easydict import EasyDict as ed
import os

cfg = ed()
root = os.path.dirname(__file__)

# data path
cfg.dataloader = ed()
cfg.dataloader.data_path = os.path.join(root, 'data')
cfg.dataloader.feature_raw = os.path.join(cfg.dataloader.data_path,
                                          'feature.xlsx')
cfg.dataloader.feature_cell = ['C3', 'DB985']
cfg.dataloader.feature_saved = None
cfg.dataloader.train_label = os.path.join(cfg.dataloader.data_path,
                                          'train_label.csv')
cfg.dataloader.num_train_data = 683
cfg.dataloader.num_validation_portion = 0.1

# probalility of sampling important data for training
cfg.dataloader.im_sampling_rate = 0.3

# data preprocess config
cfg.dataloader.pca = ed()
cfg.dataloader.pca.enable_pca = False
cfg.dataloader.pca.max_to_keep = 120

# remove empty column
cfg.dataloader.remove_empty_col = True

# normalize each column
cfg.dataloader.columnwise_normalize = False
示例#20
0
# ============================================================================
"""
network config setting, will be used in train.py and eval.py
"""
from easydict import EasyDict as ed

config_gpu = ed({
    "num_classes": 1000,
    "image_height": 224,
    "image_width": 224,
    "batch_size": 150,
    "epoch_size": 370,
    "warmup_epochs": 4,
    "lr": 1.54,
    "momentum": 0.9,
    "weight_decay": 4e-5,
    "label_smooth": 0.1,
    "loss_scale": 1024,
    "save_checkpoint": True,
    "save_checkpoint_epochs": 1,
    "keep_checkpoint_max": 500,
    "save_checkpoint_path": "./checkpoint",
    "export_format": "MINDIR",
    "export_file": "mobilenetv3"
})

config_cpu = ed({
    "num_classes": 10,
    "image_height": 224,
    "image_width": 224,
    "batch_size": 32,
示例#21
0
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
network config setting, will be used in train.py and eval.py
"""
from easydict import EasyDict as ed

config = ed({
    "class_num": 1001,
    "batch_size": 32,
    "loss_scale": 1024,
    "momentum": 0.9,
    "weight_decay": 1e-4,
    "epoch_size": 120,
    "buffer_size": 1000,
    "image_height": 224,
    "image_width": 224,
    "save_checkpoint": True,
    "save_checkpoint_epochs": 1,
    "keep_checkpoint_max": 10,
    "save_checkpoint_path": "./",
    "warmup_epochs": 0,
    "lr_decay_mode": "cosine",
    "label_smooth": 1,
    "label_smooth_factor": 0.1,
    "lr": 0.1
})
示例#22
0
def set_config(args):
    config_cpu = ed({
        "num_classes": 26,
        "image_height": 224,
        "image_width": 224,
        "batch_size": 150,
        "epoch_size": 15,
        "warmup_epochs": 0,
        "lr_init": .0,
        "lr_end": 0.03,
        "lr_max": 0.03,
        "momentum": 0.9,
        "weight_decay": 4e-5,
        "label_smooth": 0.1,
        "loss_scale": 1024,
        "save_checkpoint": True,
        "save_checkpoint_epochs": 1,
        "keep_checkpoint_max": 20,
        "save_checkpoint_path": "./checkpoint",
        "platform": args.platform
    })
    config_gpu = ed({
        "num_classes": 1000,
        "image_height": 224,
        "image_width": 224,
        "batch_size": 150,
        "epoch_size": 200,
        "warmup_epochs": 0,
        "lr_init": .0,
        "lr_end": .0,
        "lr_max": 0.8,
        "momentum": 0.9,
        "weight_decay": 4e-5,
        "label_smooth": 0.1,
        "loss_scale": 1024,
        "save_checkpoint": True,
        "save_checkpoint_epochs": 1,
        "keep_checkpoint_max": 200,
        "save_checkpoint_path": "./checkpoint",
        "platform": args.platform,
        "ccl": "nccl",
        "run_distribute": args.run_distribute
    })
    config_ascend = ed({
        "num_classes": 1000,
        "image_height": 224,
        "image_width": 224,
        "batch_size": 256,
        "epoch_size": 200,
        "warmup_epochs": 4,
        "lr_init": 0.00,
        "lr_end": 0.00,
        "lr_max": 0.4,
        "momentum": 0.9,
        "weight_decay": 4e-5,
        "label_smooth": 0.1,
        "loss_scale": 1024,
        "save_checkpoint": True,
        "save_checkpoint_epochs": 1,
        "keep_checkpoint_max": 200,
        "save_checkpoint_path": "./checkpoint",
        "platform": args.platform,
        "ccl": "hccl",
        "device_id": int(os.getenv('DEVICE_ID', '0')),
        "rank_id": int(os.getenv('RANK_ID', '0')),
        "rank_size": int(os.getenv('RANK_SIZE', '1')),
        "run_distribute": int(os.getenv('RANK_SIZE', '1')) > 1.
    })
    config = ed({
        "CPU": config_cpu,
        "GPU": config_gpu,
        "Ascend": config_ascend
    })

    if args.platform not in config.keys():
        raise ValueError("Unsupport platform.")

    return config[args.platform]
示例#23
0
config = ed({
    "img_width": 960,
    "img_height": 576,
    "keep_ratio": False,
    "flip_ratio": 0.0,
    "photo_ratio": 0.0,
    "expand_ratio": 0.3,

    # anchor
    "feature_shapes": [(36, 60)],
    "anchor_scales": [2, 4, 6, 8, 12],
    "anchor_ratios": [0.2, 0.5, 0.8, 1.0, 1.2, 1.5],
    "anchor_strides": [16],
    "num_anchors": 5 * 6,

    # rpn
    "rpn_in_channels": 512,
    "rpn_feat_channels": 640,
    "rpn_loss_cls_weight": 1.0,
    "rpn_loss_reg_weight": 3.0,
    "rpn_cls_out_channels": 1,
    "rpn_target_means": [0., 0., 0., 0.],
    "rpn_target_stds": [1.0, 1.0, 1.0, 1.0],

    # bbox_assign_sampler
    "neg_iou_thr": 0.3,
    "pos_iou_thr": 0.5,
    "min_pos_iou": 0.3,
    "num_bboxes": 5 * 6 * 36 * 60,
    "num_gts": 128,
    "num_expected_neg": 256,
    "num_expected_pos": 128,

    # proposal
    "activate_num_classes": 2,
    "use_sigmoid_cls": True,

    # roi_align
    "roi_layer": dict(type='RoIAlign', out_size=7, sample_num=2),

    # bbox_assign_sampler_stage2
    "neg_iou_thr_stage2": 0.2,
    "pos_iou_thr_stage2": 0.5,
    "min_pos_iou_stage2": 0.5,
    "num_bboxes_stage2": 2000,
    "use_ambigous_sample": True,
    "num_expected_pos_stage2": 128,
    "num_expected_amb_stage2": 128,
    "num_expected_neg_stage2": 640,
    "num_expected_total_stage2": 640,

    # rcnn
    "rcnn_in_channels": 512,
    "rcnn_fc_out_channels": 4096,
    "rcnn_loss_cls_weight": 1,
    "rcnn_loss_reg_weight": 1,
    "rcnn_target_means": [0., 0., 0., 0.],
    "rcnn_target_stds": [0.1, 0.1, 0.2, 0.2],

    # train proposal
    "rpn_proposal_nms_across_levels": False,
    "rpn_proposal_nms_pre": 2000,
    "rpn_proposal_nms_post": 2000,
    "rpn_proposal_max_num": 2000,
    "rpn_proposal_nms_thr": 0.7,
    "rpn_proposal_min_bbox_size": 0,

    # test proposal
    "rpn_nms_across_levels": False,
    "rpn_nms_pre": 1000,
    "rpn_nms_post": 1000,
    "rpn_max_num": 1000,
    "rpn_nms_thr": 0.7,
    "rpn_min_bbox_min_size": 0,
    "test_score_thr": 0.80,
    "test_iou_thr": 0.5,
    "test_max_per_img": 100,
    "test_batch_size": 2,
    "rpn_head_loss_type": "CrossEntropyLoss",
    "rpn_head_use_sigmoid": True,
    "rpn_head_weight": 1.0,

    # LR
    "base_lr": 0.02,
    "base_step": 982 * 8,
    "total_epoch": 70,
    "warmup_step": 50,
    "warmup_mode": "linear",
    "warmup_ratio": 1 / 3.0,
    "sgd_step": [8, 11],
    "sgd_momentum": 0.9,

    # train
    "batch_size": 2,
    "loss_scale": 1,
    "momentum": 0.91,
    "weight_decay": 1e-4,
    "epoch_size": 70,
    "save_checkpoint": True,
    "save_checkpoint_epochs": 10,
    "keep_checkpoint_max": 5,
    "save_checkpoint_path": "./",
    "mindrecord_dir": "/home/deeptext_sustech/data/mindrecord/full_ori",
    "use_coco": True,
    "coco_root": "/d0/dataset/coco2017",
    "cocotext_json": "/home/deeptext_sustech/data/cocotext.v2.json",
    "coco_train_data_type": "train2017",
    "num_classes": 3
})
  orig = np.load('drive/orig_images.npy')
  crop_train = [image64[0] for i in range(4)]
  hole_train = [hole64[i] for i in range(4)]
  orig_train = [orig[i] for i in range(4)]
  
  crop_train = process_oneimg(crop_train)
  hole_train = process_oneimg(hole_train)
  orig_train = process_oneimg(orig_train)
  g1_feed = np.concatenate([crop_train,hole_train],axis=3)
  return g1_feed,crop_train,hole_train,orig_train

g1_feed,crop_train,hole_train = next_scale_batch()

from easydict import EasyDict as ed

cfg = ed()

cfg.IMAGE_SHAPE = [64, 64, 3]
cfg.G1_INPUT_DATA_SHAPE = cfg.IMAGE_SHAPE[:2] + [6]
cfg.BATCH_SIZE = 4
cfg.BATCH_SIZE_G2D = 4
cfg.N = 6  # number of residual blocks
cfg.WEIGHT_DECAY = 0.005
cfg.LAMBDA = 50
cfg.MAXITERATION = 5000
cfg.LOGDIR = 'drive/logs'
cfg.MODE = 'train'
cfg.RESULT_DIR = 'drive/result'

WEIGHT_DECAY = cfg.WEIGHT_DECAY
示例#25
0
config = ed({
    "img_width":
    1280,
    "img_height":
    768,
    "keep_ratio":
    True,
    "flip_ratio":
    0.5,
    "photo_ratio":
    0.5,
    "expand_ratio":
    1.0,
    "max_instance_count":
    128,
    "mask_shape": (28, 28),

    # anchor
    "feature_shapes": [(192, 320), (96, 160), (48, 80), (24, 40), (12, 20)],
    "anchor_scales": [8],
    "anchor_ratios": [0.5, 1.0, 2.0],
    "anchor_strides": [4, 8, 16, 32, 64],
    "num_anchors":
    3,

    # resnet
    "resnet_block": [3, 4, 6, 3],
    "resnet_in_channels": [64, 256, 512, 1024],
    "resnet_out_channels": [256, 512, 1024, 2048],

    # fpn
    "fpn_in_channels": [256, 512, 1024, 2048],
    "fpn_out_channels":
    256,
    "fpn_num_outs":
    5,

    # rpn
    "rpn_in_channels":
    256,
    "rpn_feat_channels":
    256,
    "rpn_loss_cls_weight":
    1.0,
    "rpn_loss_reg_weight":
    1.0,
    "rpn_cls_out_channels":
    1,
    "rpn_target_means": [0., 0., 0., 0.],
    "rpn_target_stds": [1.0, 1.0, 1.0, 1.0],

    # bbox_assign_sampler
    "neg_iou_thr":
    0.3,
    "pos_iou_thr":
    0.7,
    "min_pos_iou":
    0.3,
    "num_bboxes":
    245520,
    "num_gts":
    128,
    "num_expected_neg":
    256,
    "num_expected_pos":
    128,

    # proposal
    "activate_num_classes":
    2,
    "use_sigmoid_cls":
    True,

    # roi_align
    "roi_layer":
    dict(type='RoIAlign', out_size=7, mask_out_size=14, sample_num=2),
    "roi_align_out_channels":
    256,
    "roi_align_featmap_strides": [4, 8, 16, 32],
    "roi_align_finest_scale":
    56,
    "roi_sample_num":
    640,

    # bbox_assign_sampler_stage2
    "neg_iou_thr_stage2":
    0.5,
    "pos_iou_thr_stage2":
    0.5,
    "min_pos_iou_stage2":
    0.5,
    "num_bboxes_stage2":
    2000,
    "num_expected_pos_stage2":
    128,
    "num_expected_neg_stage2":
    512,
    "num_expected_total_stage2":
    512,

    # rcnn
    "rcnn_num_layers":
    2,
    "rcnn_in_channels":
    256,
    "rcnn_fc_out_channels":
    1024,
    "rcnn_mask_out_channels":
    256,
    "rcnn_loss_cls_weight":
    1,
    "rcnn_loss_reg_weight":
    1,
    "rcnn_loss_mask_fb_weight":
    1,
    "rcnn_target_means": [0., 0., 0., 0.],
    "rcnn_target_stds": [0.1, 0.1, 0.2, 0.2],

    # train proposal
    "rpn_proposal_nms_across_levels":
    False,
    "rpn_proposal_nms_pre":
    2000,
    "rpn_proposal_nms_post":
    2000,
    "rpn_proposal_max_num":
    2000,
    "rpn_proposal_nms_thr":
    0.7,
    "rpn_proposal_min_bbox_size":
    0,

    # test proposal
    "rpn_nms_across_levels":
    False,
    "rpn_nms_pre":
    1000,
    "rpn_nms_post":
    1000,
    "rpn_max_num":
    1000,
    "rpn_nms_thr":
    0.7,
    "rpn_min_bbox_min_size":
    0,
    "test_score_thr":
    0.05,
    "test_iou_thr":
    0.5,
    "test_max_per_img":
    100,
    "test_batch_size":
    2,
    "rpn_head_loss_type":
    "CrossEntropyLoss",
    "rpn_head_use_sigmoid":
    True,
    "rpn_head_weight":
    1.0,
    "mask_thr_binary":
    0.5,

    # LR
    "base_lr":
    0.02,
    "base_step":
    58633,
    "total_epoch":
    13,
    "warmup_step":
    500,
    "warmup_mode":
    "linear",
    "warmup_ratio":
    1 / 3.0,
    "sgd_step": [8, 11],
    "sgd_momentum":
    0.9,

    # train
    "batch_size":
    2,
    "loss_scale":
    1,
    "momentum":
    0.91,
    "weight_decay":
    1e-4,
    "pretrain_epoch_size":
    0,
    "epoch_size":
    12,
    "save_checkpoint":
    True,
    "save_checkpoint_epochs":
    1,
    "keep_checkpoint_max":
    12,
    "save_checkpoint_path":
    "./",
    "mindrecord_dir":
    "/home/mask_rcnn/MindRecord_COCO2017_Train",
    "coco_root":
    "/home/mask_rcnn/coco2017/",
    "train_data_type":
    "train2017",
    "val_data_type":
    "val2017",
    "instance_set":
    "annotations/instances_{}.json",
    "coco_classes":
    ('background', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
     'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
     'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
     'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
     'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
     'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
     'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
     'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
     'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
     'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
     'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
     'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
     'hair drier', 'toothbrush'),
    "num_classes":
    81
})
示例#26
0
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================

from easydict import EasyDict as ed

from tinyms import model

MODEL_HUB = ed({
    "alexnet_v1": model.alexnet,
    "lenet5_v1": model.lenet5,
    "resnet50_v1": model.resnet50,
    "mobilenet_v2": model.mobilenetv2,
    "ssd300_v1": model.ssd300_mobilenetv2,
    "vgg16_v1": model.vgg16,
})
示例#27
0
from dataset import create_dataset
from lr_generator import get_lr
from utils import Monitor, CrossEntropy

config_quant = ed({
    "class_num": 10,
    "batch_size": 128,
    "step_threshold": 20,
    "loss_scale": 1024,
    "momentum": 0.9,
    "weight_decay": 1e-4,
    "epoch_size": 1,
    "pretrained_epoch_size": 90,
    "buffer_size": 1000,
    "image_height": 224,
    "image_width": 224,
    "data_load_mode": "mindata",
    "save_checkpoint": True,
    "save_checkpoint_epochs": 1,
    "keep_checkpoint_max": 50,
    "save_checkpoint_path": "./",
    "warmup_epochs": 0,
    "lr_decay_mode": "cosine",
    "use_label_smooth": True,
    "label_smooth_factor": 0.1,
    "lr_init": 0,
    "lr_max": 0.005,
})

dataset_path = "/home/workspace/mindspore_dataset/cifar-10-batches-bin/"

from dataset import create_dataset
from lr_generator import get_lr
from utils import Monitor, CrossEntropyWithLabelSmooth
from mobilenetV2 import mobilenetV2

config_ascend_quant = ed({
    "num_classes": 10,
    "image_height": 224,
    "image_width": 224,
    "batch_size": 300,
    "step_threshold": 10,
    "data_load_mode": "mindata",
    "epoch_size": 1,
    "start_epoch": 200,
    "warmup_epochs": 1,
    "lr": 0.05,
    "momentum": 0.997,
    "weight_decay": 4e-5,
    "label_smooth": 0.1,
    "loss_scale": 1024,
    "save_checkpoint": True,
    "save_checkpoint_epochs": 1,
    "keep_checkpoint_max": 300,
    "save_checkpoint_path": "./checkpoint",
})

dataset_path = "/home/workspace/mindspore_dataset/cifar-10-batches-bin/"


@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
示例#29
0
# limitations under the License.
# ============================================================================
"""
network config setting, will be used in train.py and eval.py
"""
from easydict import EasyDict as ed

config_ascend = ed({
    "num_classes": 1000,
    "image_height": 224,
    "image_width": 224,
    "batch_size": 256,
    "epoch_size": 200,
    "warmup_epochs": 4,
    "lr": 0.4,
    "momentum": 0.9,
    "weight_decay": 4e-5,
    "label_smooth": 0.1,
    "loss_scale": 1024,
    "save_checkpoint": True,
    "save_checkpoint_epochs": 1,
    "keep_checkpoint_max": 200,
    "save_checkpoint_path": "./checkpoint",
})

config_gpu = ed({
    "num_classes": 1000,
    "image_height": 224,
    "image_width": 224,
    "batch_size": 64,
    "epoch_size": 300,
config = ed({
    "model": "ssd_mobilenet_v1_fpn",
    "img_shape": [640, 640],
    "num_ssd_boxes": 51150,
    "neg_pre_positive": 3,
    "match_threshold": 0.5,
    "nms_threshold": 0.6,
    "min_score": 0.1,
    "max_boxes": 100,

    # learning rate settings
    "global_step": 0,
    "lr_init": 0.01333,
    "lr_end_rate": 0.0,
    "warmup_epochs": 2,
    "momentum": 0.9,
    "weight_decay": 1.5e-4,

    # network
    "num_default": [6, 6, 6, 6, 6],
    "extras_in_channels": [256, 512, 1024, 256, 256],
    "extras_out_channels": [256, 256, 256, 256, 256],
    "extras_strides": [1, 1, 2, 2, 2, 2],
    "extras_ratio": [0.2, 0.2, 0.2, 0.25, 0.5, 0.25],
    "feature_size": [80, 40, 20, 10, 5],
    "min_scale": 0.2,
    "max_scale": 0.95,
    "aspect_ratios": [(2, 3), (2, 3), (2, 3), (2, 3), (2, 3), (2, 3)],
    "steps": (8, 16, 32, 64, 128),
    "prior_scaling": (0.1, 0.2),
    "gamma": 2.0,
    "alpha": 0.75,
    "num_addition_layers": 4,
    "use_anchor_generator": True,
    "use_global_norm": True,

    # `mindrecord_dir` and `coco_root` are better to use absolute path.
    "feature_extractor_base_param": "/ckpt/mobilenet_v1.ckpt",
    "mindrecord_dir": "/data/MindRecord_COCO",
    "coco_root": "/data/coco2017",
    "train_data_type": "train2017",
    "val_data_type": "val2017",
    "instances_set": "annotations/instances_{}.json",
    "classes": ('background', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
                'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
                'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',
                'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra',
                'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
                'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
                'kite', 'baseball bat', 'baseball glove', 'skateboard',
                'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
                'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
                'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
                'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
                'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
                'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
                'refrigerator', 'book', 'clock', 'vase', 'scissors',
                'teddy bear', 'hair drier', 'toothbrush'),
    "num_classes": 81,
    # The annotation.json position of voc validation dataset.
    "voc_json": "annotations/voc_instances_val.json",
    # voc original dataset.
    "voc_root": "/data/voc_dataset",
    # if coco or voc used, `image_dir` and `anno_path` are useless.
    "image_dir": "",
    "anno_path": "",
    "export_format": "MINDIR",
    "export_file": "ssd.mindir"
})