transforms.RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.ToTensor(), normalize, ]) valTransform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize, ]) trainLoader = TrainLoader(args.batchSize, args.trainDir, trainTransform) valLoader = ValLoader(args.batchSize, args.valDir, valTransform) randomSeed = 123 np.random.seed(randomSeed) torch.backends.cudnn.deterministic = True torch.manual_seed(randomSeed) net = models.resnet18() if args.resumePth : net.fc = FC(args.dropoutRatio, 512, 100) net.load_state_dict(torch.load(args.resumePth)) msg = 'Loading weight from {}'.format(args.resumePth) print (msg) net.fc = FC(args.dropoutRatio, 512, args.nbCls)
trainDir, valDir, testDir, episodeJson, nbCls = \ dataset_setting(args.dataset, args.nSupport) trainLoader = BatchSampler(imgDir = trainDir, nClsEpisode = args.nClsEpisode, nSupport = args.nSupport, nQuery = args.nQuery, transform = trainTransform, useGPU = args.cuda, inputW = inputW, inputH = inputH, batchSize = args.batchSize) valLoader = ValLoader(episodeJson, valDir, inputW, inputH, valTransform, args.cuda) testLoader = EpisodeSampler(imgDir = testDir, nClsEpisode = args.nClsEpisode, nSupport = args.nSupport, nQuery = args.nQuery, transform = valTransform, useGPU = args.cuda, inputW = inputW, inputH = inputH) ############################################################################################# ## Networks
]) valTransform = transforms.Compose([ transforms.Resize(args.imgSize), transforms.CenterCrop(args.imgSize), transforms.ToTensor(), normalize, ]) trainLoader = TrainLoader(batchSize=args.batchSize, pairCSV=args.trainCSV, imgDir=args.imgDir, trainTransform=trainTransform) valLoader = ValLoader(batchSize=args.batchSize, pairCSV=args.valCSV, imgDir=args.imgDir, valTransform=valTransform) if not os.path.exists(args.outDir): os.mkdir(args.outDir) # Train bestValLoss = np.inf history = {'TrainLoss': [], 'ValLoss': []} outHistory = os.path.join(args.outDir, 'history.json') outModel = os.path.join(args.outDir, 'netBest.pth') for epoch in range(1, args.nbEpoch + 1): trainLoss = 0. valLoss = 0. for i, batch in enumerate(trainLoader):