Exemplo n.º 1
0
from network.wideresnet import WideResNet as WRN
import utils
import warnings

from cutmix.cutmix import CutMix
from cutmix.utils import CutMixCrossEntropyLoss
from autoaug.archive import fa_reduced_cifar10, fa_reduced_imagenet, autoaug_paper_cifar10, autoaug_policy
from autoaug.augmentations import Augmentation

warnings.filterwarnings("ignore")

model_names = sorted(name for name in models.__dict__
                     if name.islower() and not name.startswith("__")
                     and callable(models.__dict__[name]))

parser = ConfigArgumentParser(conflict_handler='resolve')
parser.add_argument('-j',
                    '--workers',
                    default=16,
                    type=int,
                    metavar='N',
                    help='number of data loading workers (default: 4)')
parser.add_argument('--expname',
                    default='TEST',
                    type=str,
                    help='name of experiment')
parser.add_argument('--cifarpath',
                    default='/data/private/pretrainedmodels/',
                    type=str)
parser.add_argument('--imagenetpath',
                    default='/data/private/pretrainedmodels/imagenet/',
Exemplo n.º 2
0
        print('loaded')

    def infer(root_path, top_k=1):
        if C.get()['mode'] != 'test':
            return _infer(model, root_path)
        if C.get()['infer_mode'] == 'ret':
            return _infer_ret(model, root_path)
        else:
            return _infer(model, root_path)

    nsml.bind(save=save, load=load, infer=infer)


if __name__ == '__main__':
    # mode argument
    args = ConfigArgumentParser(conflict_handler='resolve')
    args.add_argument("--cv", type=int, default=0)
    args.add_argument("--ratio", type=float, default=0.1)

    # reserved for nsml
    args.add_argument("--cuda", type=bool, default=True)

    args.add_argument("--mode", type=str, default="train")
    args.add_argument("--iteration", type=str, default='0')
    args.add_argument("--pause", type=int, default=0)
    args.add_argument("--transfer", type=bool, default=False)

    config = args.parse_args()

    logger.info(str(C.get().conf))
from theconf import Config as C

from commons import tta_num, get_logger, decode, \
    print_log, encoded_tta_default, mirror_expansion
from conf import dataroot
from imagenet import ImageNet
from metrics import accuracy, Accumulator
from networks import get_model
from profile import Profiler

logger = get_logger('learn2test')
logger.setLevel(logging.DEBUG)


if __name__ == '__main__':
    parser = ConfigArgumentParser(conflict_handler='resolve')
    parser.add_argument('--test-batch', type=int, default=32)
    parser.add_argument('--tta', type=str, default='center')
    parser.add_argument('--deform', type=str, default='')
    parser.add_argument('--corrupt', type=str, default='')
    args = parser.parse_args()

    assert args.dataset == 'imagenet'

    model_target = get_model(args.target_network, gpus=[0], num_classes=args.num_classes, train_aug=args.target_aug).eval()
    profiler = Profiler(model_target)
    print('target network, FLOPs=', profiler.flops(torch.zeros((1, 3, C.get()['target_size'], C.get()['target_size'])).cuda(), ))

    scaled_size = int(math.floor(args.target_size / 0.875))

    if args.deform != '':
Exemplo n.º 4
0
        testset1 = ImageNet(root=imagenet_path,
                            split='val',
                            transform=transform_train)
        testset2 = ImageNet(root=imagenet_path,
                            split='val',
                            transform=transform_test)

        trainset.num_class = testset1.num_class = testset2.num_class = 1000
        trainset.targets = [lb for _, lb in trainset.samples]
    else:
        raise ValueError(dataset)
    return trainset, testset1, testset2


if __name__ == '__main__':
    parser = ConfigArgumentParser(conflict_handler='resolve')
    parser.add_argument('--aug-p', default=0.2, type=float)
    parser.add_argument('--port', default=1958, type=int)
    args = parser.parse_args()

    total_trainset, testset_t, testset = get_dataset(args.dataset)
    num_class = total_trainset.num_class
    sss = StratifiedShuffleSplit(n_splits=5, test_size=0.2, random_state=0)
    sss = sss.split(list(range(len(total_trainset))), total_trainset.targets)
    train_idx, valid_idx = next(sss)

    valid_size = 25600
    small_train_size = 2048
    test_size = len(testset)
    validset = Subset(
        testset_t, valid_idx[:valid_size])  # TODO : max images for validation
Exemplo n.º 5
0
from architectures.efficientnet_pytorch.rmsproptf import RMSpropTF
from metrics import Accumulator, prediction_correlation
from networks import get_model
from profile import Profiler
from remote_dataloader.loader import RemoteDataLoader
from theconf import Config as C
from imagenet_c import corruption_dict
from conf import sodeep_model
from sodeep.sodeep import SpearmanLoss, load_sorter


if __name__ == '__main__':
    logger = get_logger('learn2test-train')
    logger.setLevel(logging.DEBUG)

    parser = ConfigArgumentParser(conflict_handler='resolve')
    parser.add_argument('--tag', default='dev', type=str)
    parser.add_argument('--port', default=1958, type=int)
    parser.add_argument('--cv', default=0, type=int)

    parser.add_argument('--regularization', default=2, type=int)
    parser.add_argument('--ema-momentum', default=0.999, type=float)

    parser.add_argument('--data-cache', default=0, type=int)

    parser.add_argument('--cutout', default=-1, type=int)
    parser.add_argument('--aug-corrupt', default=1, type=int)
    parser.add_argument('--aug-p', default=0.4, type=float)
    args = parser.parse_args()

    tv = 'v8'  # 12 tta, bugfix
    y = np.exp(y)

    # take the sum along the specified axis
    ax_sum = np.expand_dims(np.sum(y, axis=axis), axis)

    # finally: divide elementwise
    p = y / ax_sum

    # flatten if X was 1D
    if len(X.shape) == 1: p = p.flatten()

    return p


if __name__ == '__main__':
    parser = ConfigArgumentParser(conflict_handler='resolve')
    args = parser.parse_args()

    # L2T Dataset
    trainset, testset = get_dataset(args.dataset)

    # Legacy Dataset
    transform_test = transforms.Compose([
        transforms.Resize(args.target_size + 32, interpolation=Image.BICUBIC),
        transforms.CenterCrop(args.target_size),
    ])

    testset_legacy = ImageNet(root='/data/public/rw/datasets/imagenet-pytorch',
                              split='val',
                              transform=transform_test)