Exemple #1
0
    def save(self, which_epoch):
        save_filename = f'{which_epoch}_{opt.model}.pt'
        save_path = os.path.join(self.save_dir, save_filename)
        save_dict = OrderedDict()
        save_dict['classifier'] = self.classifier.state_dict()

        save_dict['optimizer'] = self.optimizer.state_dict()
        save_dict['scheduler'] = self.scheduler.state_dict()
        save_dict['epoch'] = which_epoch
        torch.save(save_dict, save_path)
        utils.color_print(f'Save checkpoint "{save_path}".', 3)
Exemple #2
0
    def save(self, which_epoch):
        save_filename = f'{which_epoch}_{opt.model}.pt'
        save_path = os.path.join(self.save_dir, save_filename)
        save_dict = {
            'cleaner': self.cleaner,
            'optimizer': self.g_optimizer,
            'scheduler': self.scheduler,
            'epoch': which_epoch
        }

        save_checkpoint(save_dict, save_path)
        utils.color_print(f'Save checkpoint "{save_path}".', 3)
Exemple #3
0
    def save(self, which_epoch):
        save_filename = f'{which_epoch}_{opt.model}.pt'
        save_path = os.path.join(self.save_dir, save_filename)
        save_dict = OrderedDict()
        save_dict['direct_feature'] = self.direct_feature.state_dict()
        save_dict['meta_embedding'] = self.meta_embedding.state_dict()
        save_dict['centroids'] = self.mem

        save_dict['optimizer'] = self.optimizer.state_dict()
        save_dict['scheduler'] = self.scheduler.state_dict()
        save_dict['epoch'] = which_epoch
        torch.save(save_dict, save_path)
        utils.color_print(f'Save checkpoint "{save_path}".', 3)
Exemple #4
0
    def save(self, which_epoch, published=False):
        save_filename = f'{which_epoch}_{opt.model}.pt'
        save_path = os.path.join(self.save_dir, save_filename)
        save_dict = {'detector': self.detector, 'epoch': which_epoch}

        if published:
            save_dict['epoch'] = 0
        else:
            save_dict['optimizer'] = self.optimizer
            save_dict['scheduler'] = self.scheduler

        save_checkpoint(save_dict, save_path)
        utils.color_print(f'Save checkpoint "{save_path}".', 3)
Exemple #5
0
    def load(self, ckpt_path):
        load_dict = torch.load(ckpt_path, map_location=opt.device)
        self.classifier.load_state_dict(load_dict['classifier'])
        if opt.resume:
            self.optimizer.load_state_dict(load_dict['optimizer'])
            self.scheduler.load_state_dict(load_dict['scheduler'])
            epoch = load_dict['epoch']
            utils.color_print('Load checkpoint from %s, resume training.' % ckpt_path, 3)
        else:
            epoch = load_dict['epoch']
            utils.color_print('Load checkpoint from %s.' % ckpt_path, 3)

        return epoch
Exemple #6
0
    def load_network(self, network, network_label, epoch_label, save_dir=''):
        save_filename = '%s_net_%s.pt' % (epoch_label, network_label)
        if not save_dir:
            save_dir = self.save_dir
        save_path = os.path.join(save_dir, save_filename)
        if not os.path.isfile(save_path):
            color_print("Exception: Checkpoint '%s' not found" % save_path, 1)
            if network_label == 'G':
                raise Exception("Generator must exist!,file '%s' not found" %
                                save_path)
        else:
            # network.load_state_dict(torch.load(save_path))
            try:
                network.load_state_dict(
                    torch.load(save_path, map_location=opt.device))
                color_print('Load checkpoint from %s.' % save_path, 3)

            except:
                pretrained_dict = torch.load(save_path,
                                             map_location=opt.device)
                model_dict = network.state_dict()
                try:
                    pretrained_dict = {
                        k: v
                        for k, v in pretrained_dict.items() if k in model_dict
                    }
                    network.load_state_dict(pretrained_dict)
                    if self.opt.verbose:
                        print(
                            'Pretrained network %s has excessive layers; Only loading layers that are used'
                            % network_label)
                except:
                    print(
                        'Pretrained network %s has fewer layers; The following are not initialized:'
                        % network_label)
                    for k, v in pretrained_dict.items():
                        if v.size() == model_dict[k].size():
                            model_dict[k] = v

                    not_initialized = set()

                    for k, v in model_dict.items():
                        if k not in pretrained_dict or v.size(
                        ) != pretrained_dict[k].size():
                            not_initialized.add(k.split('.')[0])

                    print(sorted(not_initialized))
                    network.load_state_dict(model_dict)
Exemple #7
0
    def load(self, ckpt_path):
        load_dict = {
            'cleaner': self.cleaner,
        }

        if opt.resume:
            load_dict.update({
                'optimizer': self.g_optimizer,
                'scheduler': self.scheduler,
            })
            utils.color_print('Load checkpoint from %s, resume training.' % ckpt_path, 3)
        else:
            utils.color_print('Load checkpoint from %s.' % ckpt_path, 3)

        ckpt_info = load_checkpoint(load_dict, ckpt_path, map_location=opt.device)
        epoch = ckpt_info.get('epoch', 0)

        return epoch
Exemple #8
0
    def __init__(self, config, **kwargs):
        super(Model, self).__init__(config, kwargs)
        self.config = config

        # 根据YoloV2和YoloV3使用不同的配置文件
        if config.MODEL.NAME == 'Yolo2':
            cfgfile = 'configs/networks/yolo2-voc.cfg'
        elif config.MODEL.NAME == 'Yolo3':
            cfgfile = 'configs/networks/yolo3-coco.cfg'

        # 初始化detector
        self.detector = Darknet(cfgfile, device=opt.device).to(opt.device)
        if opt.debug:
            print_network(self.detector)

        # 在--load之前加载weights文件(可选)
        if opt.load and opt.load[-2:] != 'pt':
            if is_first_gpu():
                utils.color_print('Load Yolo weights from %s.' % opt.load, 3)
            self.detector.load_weights(opt.load)
        elif 'LOAD' in config.MODEL and config.MODEL.LOAD[-2:] != 'pt':
            if is_first_gpu():
                utils.color_print(
                    'Load Yolo weights from %s.' % config.MODEL.LOAD, 3)
            self.detector.load_weights(config.MODEL.LOAD)

        self.to(opt.device)
        # 多GPU支持
        if is_distributed():
            self.detector = torch.nn.SyncBatchNorm.convert_sync_batchnorm(
                self.detector)
            self.detector = torch.nn.parallel.DistributedDataParallel(
                self.detector,
                find_unused_parameters=False,
                device_ids=[opt.local_rank],
                output_device=opt.local_rank)
            # self.detector = torch.nn.parallel.DistributedDataParallel(self.detector, device_ids=[opt.local_rank], output_device=opt.local_rank)

        self.optimizer = get_optimizer(config, self.detector)
        self.scheduler = get_scheduler(config, self.optimizer)

        self.avg_meters = ExponentialMovingAverage(0.95)
        self.save_dir = os.path.join('checkpoints', opt.tag)
    def __init__(self, opt):
        super(Model, self).__init__()
        self.opt = opt
        
        # 根据YoloV2和YoloV3使用不同的配置文件
        if opt.model == 'Yolo2':
            cfgfile = 'configs/yolo2-voc.cfg'
        elif opt.model == 'Yolo3':
            cfgfile = 'configs/yolo3-coco.cfg'

        # 初始化detector
        self.detector = Darknet(cfgfile, device=opt.device).to(opt.device)
        print_network(self.detector)

        # 在--load之前加载weights文件(可选)
        if opt.weights:
            utils.color_print('Load Yolo weights from %s.' % opt.weights, 3)
            self.detector.load_weights(opt.weights)

        self.optimizer = get_optimizer(opt, self.detector)
        self.scheduler = get_scheduler(opt, self.optimizer)

        self.avg_meters = ExponentialMovingAverage(0.95)
        self.save_dir = os.path.join(opt.checkpoint_dir, opt.tag)
Exemple #10
0
def raise_exception(msg, error_code=1):
    utils.color_print('Exception: ' + msg, 1)
    exit(error_code)
Exemple #11
0
 def deprecation_info(*args, **kwargs):
     warnings.warn(info, DeprecationWarning)
     utils.color_print(f'DeprecationWarning: {info}', 1)
     result = fn(*args, **kwargs)
     return result
Exemple #12
0
                        default='cache',
                        help='folder name to clear')

    parser.add_argument('--rm', action='store_true', help='debug mode')

    return parser.parse_args()


opt = parse_args()

paths = ['checkpoints', 'logs', 'results']

if opt.rm:
    for path in paths:
        p = os.path.join(path, opt.tag)
        if os.path.isdir(p):
            command = 'rm -r ' + p
            print(command)
            os.system(command)
else:
    for path in paths:
        tmp = os.path.join('_.trash', utils.get_time_stamp(), path)
        utils.try_make_dir(tmp)
        p = os.path.join(path, opt.tag)
        if os.path.isdir(p):
            command = 'mv %s %s' % (p, tmp)
            print(command)
            os.system(command)

utils.color_print("Directory '%s' cleared." % opt.tag, 1)
    hparams = cfg['hparams'].items()
    hparams = list(hparams)

    n = len(hparams)

    temp = [''] * n

    ans = []

    def dfs(i):
        if i >= n:
            ans.append(temp.copy())
            # print(temp)
            return
        hparam, choices = hparams[i]

        for choice in choices:
            temp[i] = choice
            dfs(i + 1)

    dfs(0)
    # for hparam in hparams:
    for i, one_run in enumerate(ans):
        command = cmd + ' --tag %s' % hash(8)
        for (hparam, _), choice in zip(hparams, one_run):
            command += ' --%s %s' % (hparam, choice)

        utils.color_print(('%d/%d: ' % ((i + 1), len(ans)) + command), 4)
        if opt.run:
            os.system(command)
Exemple #14
0
    # scale
    parser.add_argument('--scale', type=int, default=256, help='scale images to this size')
    parser.add_argument('--crop', type=int, default=None, help='then crop to this size')

    parser.add_argument('--load', type=str, default=None, help='load checkpoint')
    parser.add_argument('--which-epoch', type=int, default=None, help='which epoch to resume')

    return parser.parse_args()


opt = parse_args()

if not opt.load:
    print('Usage: submit.py --model your_model --load LOAD --gpu 0')
    utils.color_print('Exception: submit.py: the following arguments are required: --load', 1)
    exit(1)

opt.device = 'cuda:' + opt.gpu_ids if torch.cuda.is_available() and opt.gpu_ids != '-1' else 'cpu'

Model = get_model(opt.model)
model = Model(opt)
model = model.to(device=opt.device)

opt.which_epoch = model.load(opt.load)

model.eval()

with open('submission.csv', 'w') as f:  # 如果在windows下打开csv出现空行的情况,加一个newline=''参数
    csv_writer = csv.writer(f)
    csv_writer.writerow(['id', 'predicted'])  # 写一行
Exemple #15
0
    def __init__(self,
                 voc_root,
                 class_names,
                 split='train.txt',
                 format='jpg',
                 transforms=None,
                 max_size=None,
                 use_cache=False,
                 use_difficult=False):
        utils.color_print(f'Use dataset: {voc_root}, split: {split[:-4]}', 3)

        im_list = os.path.join(voc_root, f'ImageSets/Main/{split}')
        image_root = os.path.join(voc_root, 'JPEGImages')

        self.image_paths = []
        self.bboxes = []
        self.labels = []
        self.difficults = []

        counter = defaultdict(int)
        tot_bbox = 0
        difficult_bbox = 0
        """
        如果有缓存的pickle文件,就直接从pickle文件读取bboxes
        """
        os.makedirs('.cache', exist_ok=True)

        cache_file = os.path.join(
            '.cache', f'{os.path.basename(voc_root)}_{split[:-4]}.pkl')
        if use_cache and os.path.isfile(cache_file):
            with open(cache_file, 'rb') as f:
                data = pickle.load(f, encoding='bytes')

            utils.color_print(f'Use cached annoations.', 3)

            self.image_paths, self.bboxes, self.labels, self.difficults, \
            counter, tot_bbox, difficult_bbox = data

        else:  # 没有缓存文件
            with open(im_list, 'r') as f:
                lines = f.readlines()

                for i, line in enumerate(lines):
                    utils.progress_bar(i, len(lines), 'Load Anno...')
                    image_id = line.rstrip('\n')
                    abspath = os.path.abspath(
                        os.path.join(image_root, f'{image_id}.{format}'))
                    self.image_paths.append(abspath)
                    with open(
                            os.path.join(voc_root,
                                         f'Annotations/{image_id}.xml'),
                            'r') as anno:
                        tree = ET.parse(anno)

                    # 解析xml标注
                    root = tree.getroot()
                    bboxes = []
                    labels = []

                    size = root.find('size')
                    width = int(size.find('width').text)
                    height = int(size.find('height').text)

                    for obj in root.iter('object'):  # 多个元素
                        # difficult = obj.find('difficult').text
                        class_name = obj.find('name').text

                        difficult = obj.find('difficult').text
                        if difficult != '0' and not use_difficult:
                            difficult_bbox += 1
                            continue  # 忽略困难样本

                        if class_name not in class_names:
                            continue  # class_names中没有的类别是忽略还是报错
                            raise Exception(
                                f'"{class_name}" not in class names({class_names}).'
                            )

                        class_id = class_names.index(class_name)
                        bbox = obj.find('bndbox')
                        x1 = limit(int(bbox.find('xmin').text), 0, width)
                        y1 = limit(int(bbox.find('ymin').text), 0, height)
                        x2 = limit(int(bbox.find('xmax').text), 0, width)
                        y2 = limit(int(bbox.find('ymax').text), 0, height)

                        if x2 - x1 <= 2 or y2 - y1 <= 2:  # 面积很小的标注
                            continue

                        counter[class_name] += 1
                        tot_bbox += 1
                        bboxes.append([x1, y1, x2, y2])
                        labels.append(class_id)

                    self.bboxes.append(bboxes)
                    self.labels.append(labels)
            """
            存放到缓存文件
            """
            data = [self.image_paths, self.bboxes, self.labels, self.difficults, \
                counter, tot_bbox, difficult_bbox]

            with open(cache_file, 'wb') as f:
                pickle.dump(data, f)

        for name in class_names:
            utils.color_print(
                f'{name}: {counter[name]} ({counter[name]/tot_bbox*100:.2f}%)',
                5)

        utils.color_print(f'Total bboxes: {tot_bbox}', 4)
        if difficult_bbox:
            utils.color_print(f'{difficult_bbox} difficult bboxes ignored.', 1)

        self.format = format

        assert transforms is not None, '"transforms" is required'

        self.transforms = transforms
        self.max_size = max_size
def is_image(file_name):
    return not file_name.startswith('.') and ('jpg' in file_name
                                              or 'png' in file_name)


files = list(filter(is_image, files))

if opt.label is None:  # test
    files.sort()
    test_count = len(files)
    with open(os.path.join(opt.out, 'test.txt'), 'w') as f:
        for line in files:
            line = os.path.join(os.path.abspath(opt.input), line)
            print(line)
            f.writelines(line + '\n')
        utils.color_print(f'test count: {test_count}', 3)

else:
    random.shuffle(files)
    count = len(files)
    val_count = int(count * opt.val_ratio)
    train_count = count - val_count
    val = random.sample(files, val_count)
    train = []
    for file in files:
        if file not in val:
            train.append(file)

    with open(os.path.join(opt.out, 'train.txt'), 'w') as f:
        for line in train:
            line = os.path.join(os.path.abspath(opt.input),