from deepvac import AttrDict, new config = new() config.synthesis = AttrDict() #synthesis config.synthesis.total_num = 10 config.synthesis.txt_file = 'your lexicon txt' config.synthesis.images_dir = 'your image path' config.synthesis.video_file = 'your video path' config.synthesis.sample_rate = 1 config.synthesis.fonts_dir = 'your font ttf path' config.synthesis.chars = 'your char set path' config.synthesis.dense_ratio = 0.2 config.synthesis.same_font_ratio = 0.5 config.synthesis.one_way_ratio = 0.3 config.synthesis.border_ratio = 0 config.synthesis.vertical_ratio = 0 config.synthesis.random_space_ratio = 0 config.synthesis.random_space_min = -0.1 config.synthesis.random_space_max = 0.1 config.synthesis.min_font = 8 config.synthesis.max_font = 60
# import second: third party libs import torch from torchvision import transforms # import third: libs in your program from deepvac import AttrDict, new from deepvac.aug.yolo_aug import * from aug.aug import Yolov5TrainComposer, Yolov5ValComposer from data.datasets import Yolov5MosaicDataset, Yolov5Dataset from modules import Yolov5S, Yolov5L, Yolov5Loss ################################################################################ ### TRAIN ################################################################################ config = new("Yolov5Train") ### ---------------------------------- common ---------------------------------- config.core.Yolov5Train.class_num = 80 config.core.Yolov5Train.device = torch.device( 'cuda' if torch.cuda.is_available() else 'cpu') config.core.Yolov5Train.output_dir = "output" config.core.Yolov5Train.log_every = 10 config.core.Yolov5Train.disable_git = True config.core.Yolov5Train.model_reinterpret_cast = True config.core.Yolov5Train.cast_state_dict_strict = True ### ---------------------------------- training -------------------------------- config.core.Yolov5Train.ema = True # define ema_decay with other func # config.ema_decay = lambda x: 0.9999 * (1 - math.exp(-x / 2000)) config.core.Yolov5Train.amp = False
import torch import torch.optim as optim from deepvac import AttrDict, new from data.dataloader import PseTrainDataset, PseTestDataset from modules.model_mv3fpn import FpnMobileNetv3 from modules.loss import PSELoss config = new('PSENetTrain') ## ------------------ common ------------------ config.core.PSENetTrain.device = torch.device( 'cuda' if torch.cuda.is_available() else 'cpu') config.core.PSENetTrain.output_dir = 'output' config.core.PSENetTrain.log_every = 10 config.core.PSENetTrain.disable_git = True config.core.PSENetTrain.model_reinterpret_cast = True config.core.PSENetTrain.cast_state_dict_strict = False #config.core.PSENetTrain.jit_model_path = "./output/script.pt" ## -------------------- training ------------------ ## train runtime config.core.PSENetTrain.epoch_num = 200 config.core.PSENetTrain.save_num = 1 ## -------------------- tensorboard ------------------ # config.core.PSENetTrain.tensorboard_port = "6007" # config.core.PSENetTrain.tensorboard_ip = None ## -------------------- script and quantize ------------------ config.cast.script_model_dir = "./output/script.pt"
import torch import torch.optim as optim from deepvac import AttrDict, new from data.dataloader import DBTrainDataset, DBTrainCocoDataset, DBTestDataset from modules.model_db import Resnet18DB, Mobilenetv3LargeDB from modules.loss import DBLoss config = new('DBNetTrain') ## ------------------ common ------------------ config.core.DBNetTrain.device = torch.device( 'cuda' if torch.cuda.is_available() else 'cpu') config.core.DBNetTrain.output_dir = 'output' config.core.DBNetTrain.log_every = 100 config.core.DBNetTrain.disable_git = True config.core.DBNetTrain.model_reinterpret_cast = True config.core.DBNetTrain.cast_state_dict_strict = True # config.core.DBNetTrain.jit_model_path = "./output/script.pt" ## -------------------- training ------------------ ## train runtime config.core.DBNetTrain.epoch_num = 200 config.core.DBNetTrain.save_num = 1 ## -------------------- tensorboard ------------------ #config.core.DBNetTrain.tensorboard_port = "6007" #config.core.DBNetTrain.tensorboard_ip = None ## -------------------- script and quantize ------------------ config.cast.ScriptCast = AttrDict()
import torch from torchvision import transforms as trans from deepvac import AttrDict, new from network import HRNet config = new('PortraitSegTest') config.core.PortraitSegTest.disable_git = True config.core.PortraitSegTest.device = torch.device( 'cuda' if torch.cuda.is_available() else 'cpu') num_classes = 2 config.core.PortraitSegTest.net = HRNet(num_classes=num_classes) config.core.PortraitSegTest.portrait_mask_output_dir = '<your-portrait-mask-dir>' config.core.PortraitSegTest.model_path = 'weights/portrait.pth' config.core.PortraitSegTest.test_loader = '' config.core.PortraitSegTest.compose = trans.Compose([ trans.Resize([448, 448]), trans.ToTensor(), trans.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) config.core.Synthesis = config.core.PortraitSegTest.clone() config.core.Synthesis.gen_portrait_mask = True config.core.Synthesis.is_clothes_task = True # True: clothes , False: human config.core.Synthesis.input_image_dir = '<your-input-image-dir>' config.core.Synthesis.input_label_dir = '<your-input-label-dir>'
import torch from deepvac import AttrDict, new from modules.model import RetinaFaceMobileNet, RetinaFaceResNet from aug import SynthesisHatComposer config = new('RetinaTest') config.core.RetinaTest.disable_git = True config.core.RetinaTest.device = torch.device( 'cuda' if torch.cuda.is_available() else 'cpu') config.core.RetinaTest.model_path = "/ your face det model path /" config.core.RetinaTest.confidence_threshold = 0.02 config.core.RetinaTest.nms_threshold = 0.4 config.core.RetinaTest.top_k = 5000 config.core.RetinaTest.keep_top_k = 1 config.core.RetinaTest.max_edge = 2000 config.core.RetinaTest.rgb_means = (104, 117, 123) config.core.RetinaTest.net = RetinaFaceResNet() config.core.RetinaTest.test_loader = '' config.core.Synthesis2D = config.core.RetinaTest.clone() config.core.Synthesis2D.input_image_dir = '/ your input image path /' config.core.Synthesis2D.input_hat_mask_dir = '/ your hat mask path /' config.core.Synthesis2D.input_hat_image_dir = '/ your hat(chartlet) image path /' config.core.Synthesis2D.output_image_dir = '/ your output image path /' config.core.Synthesis2D.output_anno_dir = '/ your output annotation path /'
import torch from deepvac import new, AttrDict from deepvac.datasets import CocoCVContoursDataset config = new(None) config.datasets.CocoCVContoursDataset = AttrDict() config.datasets.CocoCVContoursDataset.auto_detect_subdir_with_basenum = 0 sample_path_prefix_list = ['your sample path prefix list'] target_path_list = ['your json file path list'] config.output_label_dir = 'your output label dir' config.output_image_dir = 'your output image dir' config.show = True config.test_loader_list = [] for i in range(len(sample_path_prefix_list)): test_dataset = CocoCVContoursDataset(config, sample_path_prefix_list[i], target_path_list[i]) config.test_loader_list.append(torch.utils.data.DataLoader(test_dataset, batch_size=1, pin_memory=False))