def __init__(self): self.VocTrainDataLoader = DataLoader(COCODataset(is_train=True), batch_size=opt.batch_size, shuffle=True, num_workers=opt.num_workers, drop_last=True, pin_memory=True) self.VocTestDataLoader = DataLoader(COCODataset(is_train=False), batch_size=1, shuffle=False, num_workers=0, drop_last=True, pin_memory=True) self.img_size = opt.img_size self.anchors = parse_anchors(opt.anchors_path) self.train_data_length = len(COCODataset(is_train=True)) self.val_data_length = len(COCODataset(is_train=False)) self.trainer = Yolov3COCOTrainer() self.testDataset = COCODataset(is_train=False) self.test_imgs = np.random.randint(0, self.val_data_length, 10) # self.test_imgs = np.random.randint(low=0, high=len(self.testDataset), size=35) self.normailze = transforms.Compose( [transforms.ToTensor(), transforms.Normalize(opt.mean, opt.std)]) self.logger = opt.logger self.writer = SummaryWriter(log_dir=os.path.join( opt.base_path, 'logs', f'summary_{opt.optimizer}'))
def main(): print("ann file:", annotations_file_path) print("root:", sperm_images_path) albumentations_transforms = get_albumentations_transforms( mode = 'train') dataset = COCODataset(root_dir = sperm_images_path, coco_path=annotations_file_path, transforms = albumentations_transforms) image, target = dataset[0] print(type(image)) print(type(target)) print(target.keys())
def test(args): dataset_dir = args.dataset_dir with open(args.config_file, 'r') as f: try: config = yaml.safe_load(f) except yaml.YAMLError as exc: logging.error("Invalid YAML") logging.error(exc) if (args.dataset_type == 0): coco_dataset_dir = path.join(dataset_dir, 'coco') train_dataset = COCODataset(coco_dataset_dir, config, mode='train') val_dataset = COCODataset(coco_dataset_dir, config, mode='validation') else: raise NotImplementedError images, corrs = train_dataset[0] print(images.shape, corrs.shape) print('training image loading working properly') images, corrs = val_dataset[0] print(images.shape, corrs.shape) print('validation image loading working properly')
def test_COCODataset(self): from transform import get_transforms from dataset import COCODataset from utils import read_yaml # read config file for transforms config_path = "configs/default_config.yml" config = read_yaml(config_path) # form basic albumentation transform transforms = get_transforms(config=config, mode="val") # init COCODataset DATA_ROOT = "tests/data/" COCO_PATH = "tests/data/coco_true_1.json" dataset = COCODataset(DATA_ROOT, COCO_PATH, transforms) # iterate over the dataset # and get annotations (target dict) for the first image image_tensor = next(iter(dataset))[0] target_tensor = next(iter(dataset))[1] # apply checks for image tensor self.assertEqual(image_tensor.type(), "torch.FloatTensor") self.assertEqual(list(image_tensor.size()), [3, 1920, 1080]) self.assertAlmostEqual(float(image_tensor.max()), 1.0, places=2) self.assertAlmostEqual(float(image_tensor.mean()), 0.39, places=2) # apply checks for each field in the target tensor dict boxes_tensor_0 = target_tensor["boxes"][0] self.assertEqual(boxes_tensor_0.type(), "torch.FloatTensor") self.assertEqual(boxes_tensor_0.cpu().numpy().tolist(), [97.0, 643.0, 931.0, 1185.0]) labels_tensor_0 = target_tensor["labels"][0] self.assertEqual(labels_tensor_0.type(), "torch.LongTensor") self.assertEqual(labels_tensor_0.cpu().numpy().item(), 1) masks_tensor_0 = target_tensor["masks"][0] self.assertEqual(masks_tensor_0.type(), "torch.ByteTensor") self.assertEqual(list(masks_tensor_0.size()), [1920, 1080]) self.assertAlmostEqual(float(masks_tensor_0.max()), 1.0, places=1) image_id_tensor_0 = target_tensor["image_id"][0] self.assertEqual(image_id_tensor_0.type(), "torch.LongTensor") self.assertEqual(image_id_tensor_0.cpu().numpy().item(), 0) area_tensor_0 = target_tensor["area"][0] self.assertEqual(area_tensor_0.type(), "torch.FloatTensor") self.assertEqual(area_tensor_0.cpu().numpy().item(), 452028.0) iscrowd_tensor_0 = target_tensor["iscrowd"][0] self.assertEqual(iscrowd_tensor_0.type(), "torch.LongTensor") self.assertEqual(iscrowd_tensor_0.cpu().numpy().item(), 0) boxes_tensor_1 = target_tensor["boxes"][1] self.assertEqual(boxes_tensor_1.type(), "torch.FloatTensor") self.assertEqual(boxes_tensor_1.cpu().numpy().tolist(), [97.0, 500.0, 931.0, 1185.0]) labels_tensor_1 = target_tensor["labels"][1] self.assertEqual(labels_tensor_1.type(), "torch.LongTensor") self.assertEqual(labels_tensor_1.cpu().numpy().item(), 2) masks_tensor_1 = target_tensor["masks"][1] self.assertEqual(masks_tensor_1.type(), "torch.ByteTensor") self.assertEqual(list(masks_tensor_1.size()), [1920, 1080]) self.assertAlmostEqual(float(masks_tensor_1.max()), 1.0, places=1) area_tensor_1 = target_tensor["area"][1] self.assertEqual(area_tensor_1.type(), "torch.FloatTensor") self.assertEqual(area_tensor_1.cpu().numpy().item(), 571290.0)
os.makedirs(working_dir) logger = SummaryWriter(working_dir) n_gpu = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1 args.n_gpu = n_gpu args.distributed = n_gpu > 1 if args.distributed: torch.cuda.set_device(args.local_rank) torch.distributed.init_process_group(backend='gloo', init_method='env://') synchronize() device = 'cuda' train_set = COCODataset(args.path, 'train', preset_transform(args, train=True)) valid_set = COCODataset(args.path, 'val', preset_transform(args, train=False)) # backbone = vovnet39(pretrained=True) # backbone = vovnet57(pretrained=True) # backbone = resnet18(pretrained=True) backbone = resnet50(pretrained=True) #backbone = resnet101(pretrained=True) model = FCOS(args, backbone) model = model.to(device) optimizer = optim.SGD( model.parameters(), lr=args.lr, momentum=0.9,
if __name__ == '__main__': args = get_args() n_gpu = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1 args.distributed = n_gpu > 1 if args.distributed: torch.cuda.set_device(args.local_rank) torch.distributed.init_process_group(backend='nccl', init_method='env://') synchronize() device = 'cuda' train_set = COCODataset(args.path, args.domain, 'train', preset_transform(args, train=True)) valid_set = COCODataset(args.path, args.domain, 'val', preset_transform(args, train=False)) target_train_set = COCODataset(args.path, args.target_domain, 'train', preset_transform(args, train=True)) target_valid_set = COCODataset(args.path, args.target_domain, 'val', preset_transform(args, train=False)) backbone = resnet50(pretrained=False, if_include_top=False) model = FCOS(args, backbone) model = model.to(device) g_params = [ p for n, p in model.named_parameters()
# img = self.transforms(img) # # return img, my_annotation # # def __len__(self): # return len(self.ids) # # # def collate_fn(batch): # return tuple(zip(*batch)) if __name__ == '__main__': root = 'C://DeepLearningData/COCOdataset2017/images/val' annotation = 'C://DeepLearningData/COCOdataset2017/annotations/instances_val2017.json' coco_dset = COCODataset(root, annotation, transforms.Compose([transforms.ToTensor()]), True) data_loader = torch.utils.data.DataLoader(coco_dset, batch_size=1, shuffle=False, num_workers=0, collate_fn=collate_fn) for i, [image, annotation] in enumerate(data_loader): image = image[0].permute(1, 2, 0) ann = annotation[0] print(ann['label']) print(ann['category']) print(ann['mask'][250]) plt.subplot(1, 2, 1) plt.imshow(image)
if args.distributed: torch.cuda.set_device(args.local_rank) torch.distributed.init_process_group(backend='gloo', init_method='env://') synchronize() device = 'cuda' valid_trans = transform.Compose([ transform.Resize(args.test_min_size, args.test_max_size), transform.ToTensor(), transform.Normalize(args.pixel_mean, args.pixel_std) ]) valid_set = COCODataset("/data/COCO_17/", 'val', valid_trans) # backbone = vovnet39(pretrained=False) # backbone = resnet18(pretrained=False) backbone = resnet50(pretrained=False) model = ATSS(args, backbone) # load weight model_file = "./training_dir/epoch-12.pt" chkpt = torch.load(model_file, map_location='cpu') # load checkpoint model.load_state_dict(chkpt['model']) print('load weights from ' + model_file) model = model.to(device) if args.distributed:
def train(config: dict = None): # fix the seed for reproduce results SEED = config["SEED"] torch.manual_seed(SEED) torch.cuda.manual_seed(SEED) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False random.seed(SEED) # parse config parameters DATA_ROOT = config["DATA_ROOT"] COCO_PATH = config["COCO_PATH"] DATA_ROOT_VAL = config["DATA_ROOT_VAL"] COCO_PATH_VAL = config["COCO_PATH_VAL"] EXPERIMENT_NAME = config["EXPERIMENT_NAME"] OPTIMIZER_NAME = config["OPTIMIZER_NAME"] OPTIMIZER_WEIGHT_DECAY = config["OPTIMIZER_WEIGHT_DECAY"] OPTIMIZER_MOMENTUM = config["OPTIMIZER_MOMENTUM"] OPTIMIZER_BETAS = config["OPTIMIZER_BETAS"] OPTIMIZER_EPS = config["OPTIMIZER_EPS"] OPTIMIZER_AMSGRAD = config["OPTIMIZER_AMSGRAD"] OPTIMIZER_ADABOUND_GAMMA = config["OPTIMIZER_ADABOUND_GAMMA"] OPTIMIZER_ADABOUND_FINAL_LR = config["OPTIMIZER_ADABOUND_FINAL_LR"] LEARNING_RATE = config["LEARNING_RATE"] LEARNING_RATE_STEP_SIZE = config["LEARNING_RATE_STEP_SIZE"] LEARNING_RATE_GAMMA = config["LEARNING_RATE_GAMMA"] TRAINABLE_BACKBONE_LAYERS = config["TRAINABLE_BACKBONE_LAYERS"] RPN_ANCHOR_SIZES = config["RPN_ANCHOR_SIZES"] RPN_ANCHOR_ASPECT_RATIOS = config["RPN_ANCHOR_ASPECT_RATIOS"] RPN_PRE_NMS_TOP_N_TRAIN = config["RPN_PRE_NMS_TOP_N_TRAIN"] RPN_PRE_NMS_TOP_N_TEST = config["RPN_PRE_NMS_TOP_N_TEST"] RPN_POST_NMS_TOP_N_TRAIN = config["RPN_POST_NMS_TOP_N_TRAIN"] RPN_POST_NMS_TOP_N_TEST = config["RPN_POST_NMS_TOP_N_TEST"] RPN_NMS_THRESH = config["RPN_NMS_THRESH"] RPN_FG_IOU_THRESH = config["RPN_FG_IOU_THRESH"] RPN_BG_IOU_THRESH = config["RPN_BG_IOU_THRESH"] BOX_DETECTIONS_PER_IMAGE = config["BOX_DETECTIONS_PER_IMAGE"] LOG_FREQ = config["LOG_FREQ"] COCO_AP_TYPE = config["COCO_AP_TYPE"] TRAIN_SPLIT_RATE = config["TRAIN_SPLIT_RATE"] BATCH_SIZE = config["BATCH_SIZE"] NUM_EPOCH = config["NUM_EPOCH"] DEVICE = config["DEVICE"] NUM_WORKERS = config["NUM_WORKERS"] # init directories directories = Directories(experiment_name=EXPERIMENT_NAME) # copy config file to experiment dir yaml_path = os.path.join(directories.experiment_dir, "config.yml") save_yaml(config, yaml_path) # init tensorboard summary writer writer = SummaryWriter(directories.tensorboard_dir) # set pytorch device device = torch.device(DEVICE) if "cuda" in DEVICE and not torch.cuda.is_available(): print("CUDA not available, switching to CPU") device = torch.device("cpu") # use our dataset and defined transformations dataset = COCODataset( DATA_ROOT, COCO_PATH, get_transforms(config=config, mode="train") ) if COCO_PATH_VAL: dataset_val = COCODataset( DATA_ROOT_VAL, COCO_PATH_VAL, get_transforms(config=config, mode="val") ) else: dataset_val = COCODataset( DATA_ROOT, COCO_PATH, get_transforms(config=config, mode="val") ) # +1 for background class num_classes = dataset.num_classes + 1 config["NUM_CLASSES"] = num_classes # add category mappings to config, will be used at prediction category_mapping = get_category_mapping_from_coco_file(COCO_PATH) config["CATEGORY_MAPPING"] = category_mapping # split the dataset in train and val set if val path is not defined if not COCO_PATH_VAL: indices = torch.randperm(len(dataset)).tolist() num_train = int(len(indices) * TRAIN_SPLIT_RATE) train_indices = indices[:num_train] val_indices = indices[num_train:] dataset = torch.utils.data.Subset(dataset, train_indices) dataset_val = torch.utils.data.Subset(dataset_val, val_indices) # define training and val data loaders data_loader_train = torch.utils.data.DataLoader( dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS, collate_fn=core.utils.collate_fn, ) data_loader_val = torch.utils.data.DataLoader( dataset_val, batch_size=1, shuffle=False, num_workers=NUM_WORKERS, collate_fn=core.utils.collate_fn, ) # get the model using our helper function model = get_torchvision_maskrcnn( num_classes=num_classes, trainable_backbone_layers=TRAINABLE_BACKBONE_LAYERS, anchor_sizes=RPN_ANCHOR_SIZES, anchor_aspect_ratios=RPN_ANCHOR_ASPECT_RATIOS, rpn_pre_nms_top_n_train=RPN_PRE_NMS_TOP_N_TRAIN, rpn_pre_nms_top_n_test=RPN_PRE_NMS_TOP_N_TEST, rpn_post_nms_top_n_train=RPN_POST_NMS_TOP_N_TRAIN, rpn_post_nms_top_n_test=RPN_POST_NMS_TOP_N_TEST, rpn_nms_thresh=RPN_NMS_THRESH, rpn_fg_iou_thresh=RPN_FG_IOU_THRESH, rpn_bg_iou_thresh=RPN_BG_IOU_THRESH, box_detections_per_img=BOX_DETECTIONS_PER_IMAGE, pretrained=True, ) # move model to the right device model.to(device) # construct an optimizer params = [p for p in model.parameters() if p.requires_grad] optimizer_factory = OptimizerFactory( learning_rate=LEARNING_RATE, momentum=OPTIMIZER_MOMENTUM, weight_decay=OPTIMIZER_WEIGHT_DECAY, betas=OPTIMIZER_BETAS, eps=OPTIMIZER_EPS, amsgrad=OPTIMIZER_AMSGRAD, adabound_gamma=OPTIMIZER_ADABOUND_GAMMA, adabound_final_lr=OPTIMIZER_ADABOUND_FINAL_LR, ) optimizer = optimizer_factory.get(params, OPTIMIZER_NAME) # and a learning rate scheduler lr_scheduler = torch.optim.lr_scheduler.StepLR( optimizer, step_size=LEARNING_RATE_STEP_SIZE, gamma=LEARNING_RATE_GAMMA ) # create coco index print("Creating COCO index...") coco_api_train = get_coco_api_from_dataset(data_loader_train.dataset) coco_api_val = get_coco_api_from_dataset(data_loader_val.dataset) # train it for NUM_EPOCH epochs for epoch in range(NUM_EPOCH): best_bbox_05095_ap = -1 # train for one epoch, printing every PRINT_FREQ iterations train_one_epoch( model=model, optimizer=optimizer, data_loader=data_loader_train, coco_api=coco_api_train, device=device, epoch=epoch, log_freq=LOG_FREQ, coco_ap_type=COCO_AP_TYPE, writer=writer, ) # update the learning rate lr_scheduler.step() # get iteration number num_images = len(data_loader_train.dataset) iter_num = epoch * num_images # evaluate on the val dataset loss_lists, coco_evaluator = evaluate( model=model, data_loader=data_loader_val, coco_api=coco_api_val, device=device, iter_num=iter_num, coco_ap_type=COCO_AP_TYPE, writer=writer, ) # update best model if it has the best bbox 0.50:0.95 AP bbox_05095_ap = coco_evaluator.coco_eval["bbox"].stats[0] if bbox_05095_ap > best_bbox_05095_ap: model_dict = {"state_dict": model.state_dict(), "config": config} torch.save(model_dict, directories.best_weight_path) best_bbox_05095_ap = bbox_05095_ap # save final model model_dict = {"state_dict": model.state_dict(), "config": config} torch.save(model_dict, directories.last_weight_path)
args = get_args() args.threshold = 0.2 n_gpu = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1 args.distributed = n_gpu > 1 if args.distributed: torch.cuda.set_device(args.local_rank) torch.distributed.init_process_group(backend='gloo', init_method='env://') synchronize() device = 'cuda' valid_set = COCODataset("/data/COCO_17/", 'val', preset_transform(args, train=False)) # backbone = vovnet39(pretrained=False) backbone = resnet18(pretrained=False) model = FCOS(args, backbone) # load weight model_file = "./training_dir/epoch-1.pt" chkpt = torch.load(model_file, map_location='cpu') # load checkpoint model.load_state_dict(chkpt['model']) print('load weights from ' + model_file) model = model.to(device) if args.distributed: model = nn.parallel.DistributedDataParallel(
def main(): config_path = "config.yaml" with open(config_path, 'r') as fp: config = yaml.safe_load(fp) coco_train_imgs = os.path.join(config['path'], "images/train2017") coco_train_annos_path = os.path.join( config['path'], "annotations/person_keypoints_train2017.json") coco_val_imgs = os.path.join(config['path'], "images/val2017") coco_val_annos_path = os.path.join( config['path'], "annotations/person_keypoints_val2017.json") epochs = config['num_epochs'] device = torch.device("cuda" if torch.cuda.is_available() else "cpu") train_dataset = COCODataset(coco_train_imgs, coco_train_annos_path, num_keypoints=17, transform=get_transform(train=True)) val_dataset = COCODataset(coco_val_imgs, coco_val_annos_path, num_keypoints=17, transform=get_transform(train=False)) train_dataloader = DataLoader(train_dataset, batch_size=8, shuffle=True, num_workers=4) val_dataloader = DataLoader(val_dataset, batch_size=4, shuffle=True, num_workers=4) model = PoseModel(num_kpts=17) model.to(device) model.train() criterion = JointMSELoss() optimizer = optim.SGD(model.parameters(), lr=config['learning_rate'], momentum=config['momentum']) logs = [] for e in range(epochs): running_loss = 0.0 for i, data in enumerate(train_dataloader): imgs, _, heatmaps = data imgs = imgs.to(device) heatmaps = heatmaps.to(device) optimizer.zero_grad() outs = model(imgs) loss = criterion(outs.float(), heatmaps.float()) loss.backward() optimizer.step() running_loss += loss.item() if i % 1000 == 999: print("Epoch {} | Iteration {} | Loss {:.4f}".format( e + 1, i + 1, running_loss / 2000.)) logs.append("Epoch {} | Iteration {} | Loss {:.4f}".format( e + 1, i + 1, running_loss / 2000.)) running_loss = 0.0 validate(model, val_dataloader, criterion, device) model.train() print("Save model") torch.save(model.state_dict(), f'model_epochs_{e}.pth') print("Test training finished.") torch.save(model.state_dict(), f'model_epochs_{epochs}.pth') with open("logs.txt", 'a') as fp: for el in logs: fp.write(el + "\n") print("Logs saved")
import torch.nn as nn import torch from loss import YOLO_loss from dataset import COCODataset, print_result from saver import Saver from network import YOLO from utils import join num_epochs = 10 learning_rate = 1e-6 data_root = r'E:/LocalData/coco/train2017' save_root = r'C:\LocalData' data_loader = torch.utils.data.DataLoader(dataset=COCODataset(root=data_root), batch_size=20, shuffle=True) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model = YOLO() if torch.cuda.device_count() > 1: print("Let's use", torch.cuda.device_count(), "GPUs!") model = nn.DataParallel(model) model.to(device) criterion = YOLO_loss() optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) yolo_saver = Saver(model, join(save_root, r'yolo', r'save'), "yolo", max_to_keep=20) loaded_index = yolo_saver.load()
device = 'cuda' train_trans = transform.Compose([ transform.RandomResize(args.train_min_size_range, args.train_max_size), transform.RandomHorizontalFlip(0.5), transform.ToTensor(), transform.Normalize(args.pixel_mean, args.pixel_std) ]) valid_trans = transform.Compose([ transform.Resize(args.test_min_size, args.test_max_size), transform.ToTensor(), transform.Normalize(args.pixel_mean, args.pixel_std) ]) train_set = COCODataset(args.path, 'train', train_trans) valid_set = COCODataset(args.path, 'val', valid_trans) # backbone = vovnet39(pretrained=True) # backbone = vovnet57(pretrained=True) # backbone = resnet18(pretrained=True) backbone = resnet50(pretrained=True) #backbone = resnet101(pretrained=True) model = ATSS(args, backbone) model = model.to(device) optimizer = optim.SGD( model.parameters(), lr=args.lr, momentum=0.9, weight_decay=0.0001,
def main(args): # setup writer = SummaryWriter(log_dir='./logs/{}'.format(args.run_name)) dev = 'cpu' if args.gpu is None else 'cuda:{}'.format(args.gpu) device = torch.device(dev) with open(args.config_file, 'r') as f: try: config = yaml.safe_load(f) except yaml.YAMLError as exc: logging.error("Invalid YAML") logging.error(exc) # setup models model = ED() model = model.to(device) # setup training optimizers learning_rate = args.learning_rate optimizer = optim.Adam(model.parameters(), lr=learning_rate) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min') # setup colmap runner dataset_dir = args.dataset_dir if(args.dataset_type == 0): coco_dataset_dir = path.join(dataset_dir, 'coco') train_dataset = COCODataset(coco_dataset_dir, config, mode='train') val_dataset = COCODataset(coco_dataset_dir, config, mode='validation') else: raise NotImplementedError epoch_start = 0 epoch_end = args.epochs total_step = 0 # load from checkpoint iff exists latest_checkpoint_name = '{}-latest.ckpt'.format(args.run_name) latest_checkpoint_path = path.join(args.checkpoint_dir, latest_checkpoint_name) if((path.exists(latest_checkpoint_path)) and args.resume): checkpoint = CheckPoint.load(latest_checkpoint_path, device) model_weight = checkpoint['model'] optimizer_weight = checkpoint['optimizer'] scheduler_weight = checkpoint['scheduler'] epoch_start = checkpoint['epoch'] total_step = checkpoint['total_step'] model.load_state_dict(model_weight) optimizer.load_state_dict(optimizer_weight) scheduler.load_state_dict(scheduler_weight) # setup dataset and actually run training for epoch in range(epoch_start, epoch_end): #################### # run training model.train() sample_indices = list(range(len(train_dataset))) random.shuffle(sample_indices) sample_it = tqdm(sample_indices, leave=False) for data_index in sample_it: data = train_dataset[data_index] images, corrs, angles = data images = images.to(device) corrs = corrs.to(device) angles = angles.to(device) # compute loss and update normals = model(images) loss = surface_normal_loss(normals, corrs, angles) # update weights optimizer.zero_grad() loss.backward() optimizer.step() writer.add_scalar('data/loss', loss, total_step) sample_it.set_description('loss: {:.02f}'.format(loss)) if((total_step) % args.image_step == 0): writer.add_images('images/normals', (normals + 1 / 2), total_step) total_step += 1 # save every epoch checkpoint_name = '{}-{}.ckpt'.format(args.run_name, epoch) checkpoint_path = path.join(args.checkpoint_dir, checkpoint_name) CheckPoint.save(checkpoint_path, model, optimizer, scheduler, total_step, epoch) CheckPoint.save(latest_checkpoint_path, model, optimizer, scheduler, total_step, epoch) writer.close()
r'''上下文管理测试代码块运行时间,需要 import time from contextlib import contextmanager ''' start = time.perf_counter() try: yield finally: end = time.perf_counter() print('\033[1;34m{} : {}\033[0m'.format(label, end - start)) xml_dir = "/home/chiebotgpuhq/MyCode/dataset/Siam_detection/aqmzc" json_path = "/home/chiebotgpuhq/Share/gpu-server/disk/disk1/coco_dataset/annotations/instances_val2017.json" # a=XMLLikeDataset(xml_dir) a = COCODataset(json_path) # siam_g=GenerateSiamsesSample(a,"/home/chiebotgpuhq/MyCode/dataset/test") # siam_g.test() # with timeblock("too slow"): b, f, m = a[1263] # for b,f,m in a: # print(type(b)) # cv2.imshow("b",b) # cv2.imshow("f",f) # cv2.imshow("m",m) # cv2.waitKey(100) # b,roi=seamlessclone(b,f,m) # plt.imshow(b) plt.imshow(m) # plt.imshow(m)
#存图片 cv2.imwrite(os.path.join(save_dir, img_name), img) cv2.imwrite(os.path.join(save_dir, img_o_name), img_o) save_xml(save_dir, idx, img, box) def test(self): bg, fg, mask = self.dataset[100] self.deal_one_sample(bg, fg, mask, 100) def do_task(self, max_time: int = None, work_num: int = 8): if max_time is None: max_time = len(self.dataset) ''' XXX:测试代码 ''' for idx in tqdm(range(4589, max_time)): self.deal_one_sample(*self.dataset[idx], idx) # with futures.ThreadPoolExecutor() as exec: # task_list=(exec.submit(self.deal_one_sample,*self.dataset[idx],idx) \ # for idx in range(max_time)) # for task in tqdm(futures.as_completed(task_list),total=max_time): # pass if __name__ == "__main__": json_path = "/home/chiebotgpuhq/Share/gpu-server/disk/disk1/coco_dataset/annotations/instances_val2017.json" save_dir = "/home/chiebotgpuhq/Share/gpu-server/disk/disk2/cocosiam_dataset/val" dataset = COCODataset(json_path, sampler='normal') test_cls = GenerateSiamsesSample(dataset, save_dir) test_cls.do_task()
def main(args): print(args) traintrans = transforms.Compose([ transforms.Resize((args.image_size, args.image_size)), transforms.ToTensor() ]) traindset = COCODataset(args.root_dir, args.ann_dir, mode="train", transform=traintrans) trainloader = DataLoader(traindset, batch_size=args.batch_size, shuffle=True, num_workers=args.ncpu, collate_fn=utils.collate_fn, drop_last=True) """ create dictionary from args.captionfile """ vocab = Dictionary() utils.create_vocfile(traindset, args.captionfile) before = time.time() vocab.create_vocab(args.captionfile) print('took {}s for vocab'.format(time.time() - before), flush=True) """ models for training """ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') yolo = YOLOv3() capmodel = Captioning(vocab_size=len(vocab)) if args.multi_gpu: print('Using {} GPUs...'.format(torch.cuda.device_count())) if torch.cuda.device_count() > 1: capmodel = nn.DataParallel(capmodel) yolo = nn.DataParallel(yolo) if args.use_gpu: capmodel = capmodel.to(device) yolo = yolo.to(device) mseloss = nn.MSELoss() bceloss = nn.BCELoss() celoss = nn.CrossEntropyLoss() anchors = torch.tensor([[10, 13], [16, 30], [33, 23], [30, 61], [62, 45], [59, 119], [116, 90], [156, 198], [373, 326]], dtype=torch.float).to(device) scaled_anchors = utils.scale_anchors(args.image_size, args.grid_size, anchors) print('begin training') for ep in range(args.epochs): for it, sample in enumerate(trainloader): # sample contains 'image', 'captions', 'bboxinfo' im_ten = sample['image'].to(device) info = sample['bboxinfo'] """ for objects in info: bounding_boxes = [] classes = [] for bbinfo in objects: bounding_boxes.append(bbinfo['bbox']) classes.append(bbinfo['obj_id']) bbs = torch.tensor(bounding_boxes) cls = torch.tensor(classes) print(bbs.size()) print(cls.size()) im_ten = im_ten.to(device) """ x, y, w, h, pred_conf, pred_cls = yolo(im_ten) pred_x, pred_y, pred_w, pred_h = utils.offset_boxes( x, y, w, h, device, args.image_size, args.grid_size, scaled_anchors) break break print('done training') """ # size check of captioning model