def infer(root_path, top_k=1): if C.get()['mode'] != 'test': return _infer(model, root_path) if C.get()['infer_mode'] == 'ret': return _infer_ret(model, root_path) else: return _infer(model, root_path)
def test_git_info(): config = Config() if '_git' in config: assert 'wbaek/theconf.git' in config['_git']['remote'] Config.clear()
def new_forward(self, x, extract=False): if isinstance(self, DataParallel): self = self.module x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) feat2 = torch.flatten(x, 1) x = self.avgpool(x) x = torch.flatten(x, 1) feat = x x = self.fc(x) x = softmax(x, dim=1) if C.get()['feat'] == 'feat2': return feat2 elif C.get()['feat'] == 'pool': return feat else: return x
def test_contains(): config = Config() config['key'] = 1 assert ('key' in config) == True assert ('not_exists' in config) == False Config.clear()
def _infer(model, root_path, test_loader=None): if test_loader is None: test_loader = data_loader(root=os.path.join(root_path, 'test_data'), phase='test') model.eval() res_fc = None res_id = None for idx, (data_id, image, _) in enumerate(tqdm(test_loader)): image = image.cuda() with torch.no_grad(): fc = model(image) fc = fc.detach().cpu().numpy() fc = np_softmax(fc) # with torch.no_grad(): # fc2 = model(torch.flip(image, (3, ))) # TTA : horizontal flip # fc2 = fc2.detach().cpu().numpy() # fc2 = np_softmax(fc2) # fc = fc + fc2 if C.get()['infer_mode'] == 'face': fc[:, range(60)] = -1 # target_lb = list(range(60, 100)) if idx == 0: res_fc = fc res_id = data_id else: res_fc = np.concatenate((res_fc, fc), axis=0) res_id = res_id + data_id res_cls = np.argmax(res_fc, axis=1) return [res_id, res_cls]
def test_get_default(): config = Config() config['key'] = 1 assert config.get('key', 0) == 1 assert config.get('not_exists', 0) == 0 Config.clear()
def get_transform(random_crop=True): normalize = transforms.Normalize( mean=[x / 255.0 for x in [125.3, 123.0, 113.9]], std=[x / 255.0 for x in [63.0, 62.1, 66.7]] ) transform = [] if random_crop: aug = C.get()['aug'] if aug == 'faa_imagenet': transform.append(Augmentation(fa_reduced_imagenet())) elif aug == 'faa_cifar10': transform.append(Augmentation(fa_reduced_cifar10())) elif aug == 'rot360': transform.append(transforms.RandomRotation(360)) elif aug == 'default': pass resize_method = C.get().conf.get('resize', 'ResizedCrop') if resize_method == 'ResizedCrop': transform.append(transforms.RandomResizedCrop(224, interpolation=Image.LANCZOS)) elif resize_method == 'Resize': transform.append(transforms.Resize(256, interpolation=Image.LANCZOS)) transform.append(transforms.CenterCrop(224)) else: raise ValueError(resize_method) # transform.append(transforms.RandomResizedCrop(224, scale=(0.8, 1.0), ratio=(0.8, 1.2), interpolation=Image.LANCZOS)) transform.append(transforms.RandomHorizontalFlip()) transform.append(transforms.RandomRotation(20)) else: transform.append(transforms.Resize(256, interpolation=Image.LANCZOS)) transform.append(transforms.CenterCrop(224)) transform.append(transforms.ToTensor()) transform.append(normalize) return transforms.Compose(transform)
def test_flatten(datafiles): filenames = [str(f) for f in datafiles.listdir()] config = Config(filenames[0], skip_timestamp=True, skip_git_info=True) config['tar'] = 'test' config['var'] = 'variation' p = config._flatten([], config.conf) assert len(p) == 4 assert p[0] == ('foo-bar', 1) assert p[1] == ('foo-baz', 2) assert p[2] == ('tar', 'test') assert p[3] == ('var', 'variation') p = config.flatten() assert len(p) == 4 assert p['foo-bar'] == 1 assert p['foo-baz'] == 2 assert p['tar'] == 'test' assert p['var'] == 'variation' Config.clear()
def test_update_and_dump(datafiles): filenames = [str(f) for f in datafiles.listdir()] config = Config(filenames[0], skip_timestamp=True, skip_git_info=True) assert config['_version'] == 1 assert config['foo']['bar'] == 1 assert config['foo']['baz'] == 2 config['foo']['bar'] = {'test': 10} config['foo']['baz'] = 3 assert config['foo']['bar']['test'] == 10 assert config['foo']['baz'] == 3 yaml_string = config.dump() assert yaml_string == '_version: 2\nfoo:\n bar:\n test: 10\n baz: 3\n' yaml_string = config.dump() assert yaml_string == '_version: 3\nfoo:\n bar:\n test: 10\n baz: 3\n' config.dump(filenames[0]) Config.clear() config = Config(filenames[0]) assert config['foo']['bar']['test'] == 10 assert config['foo']['baz'] == 3 Config.clear()
def test_singletone2(datafiles): filenames = [str(f) for f in datafiles.listdir()] _ = Config(filenames[0]) with pytest.raises(Exception) as _: _ = Config(filenames[0]) Config.clear()
def test_singletone(): conf1 = Config.get_instance() conf2 = Config.get_instance() conf3 = Config.get() assert conf1 == conf2 Config.clear()
def test_timestamp(): config = Config() assert datetime.datetime.now().strftime('%Y/%m/%d') in config['_timestamp'] Config.clear()
# mode argument args = ConfigArgumentParser(conflict_handler='resolve') args.add_argument("--cv", type=int, default=0) args.add_argument("--ratio", type=float, default=0.1) # reserved for nsml args.add_argument("--cuda", type=bool, default=True) args.add_argument("--mode", type=str, default="train") args.add_argument("--iteration", type=str, default='0') args.add_argument("--pause", type=int, default=0) args.add_argument("--transfer", type=bool, default=False) config = args.parse_args() logger.info(str(C.get().conf)) num_classes = C.get()['num_class'] base_lr = config.lr cuda = config.cuda eval_split = 'val' mode = config.mode if C.get()['model'] == 'resnet18': model = models.resnet18(pretrained=None) model.avgpool = nn.AdaptiveAvgPool2d((1, 1)) model.fc = nn.Linear(512 * 1, num_classes) elif C.get()['model'] == 'resnet50': model = models.resnet50(pretrained=None) model.avgpool = nn.AdaptiveAvgPool2d((1, 1)) model.fc = nn.Linear(512 * 4, num_classes)
if __name__ == '__main__': # mode argument args = ConfigArgumentParser(conflict_handler='resolve') args.add_argument("--cv", type=int, default=0) args.add_argument("--ratio", type=float, default=0.1) # reserved for nsml args.add_argument("--cuda", type=bool, default=True) args.add_argument("--mode", type=str, default="train") args.add_argument("--iteration", type=str, default='0') args.add_argument("--pause", type=int, default=0) config = args.parse_args() logger.info(str(C.get().conf)) num_classes = C.get()['num_class'] base_lr = config.lr cuda = config.cuda eval_split = 'val' mode = config.mode model = models.resnet50(pretrained=None) model.avgpool = nn.AdaptiveAvgPool2d((1, 1)) model.fc = nn.Linear(512 * 4, num_classes) loss_fn = nn.CrossEntropyLoss() if cuda: model = model.cuda() loss_fn = loss_fn.cuda()